repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
tsml-java
tsml-java-master/src/main/java/utilities/generic_storage/SerialisableComparablePair.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.generic_storage; import java.io.Serializable; import java.util.Objects; public class SerialisableComparablePair<T1 extends Comparable<T1>, T2 extends Comparable<T2>> implements Comparable<SerialisableComparablePair<T1, T2>>, Serializable{ public final T1 var1; public final T2 var2; protected static final long serialVersionUID = 389546738L; public SerialisableComparablePair(T1 t1, T2 t2){ var1 = t1; var2 = t2; } @Override public String toString(){ return var1 + " " + var2; } @Override public int compareTo(SerialisableComparablePair<T1, T2> other) { int c1 = var1.compareTo(other.var1); if (c1 != 0) return c1; else return var2.compareTo(other.var2); } @Override public boolean equals(Object other) { if (other instanceof SerialisableComparablePair<?,?>) return var1.equals(((SerialisableComparablePair<?,?>)other).var1) && var2.equals(((SerialisableComparablePair<?,?>)other).var2) ; return false; } @Override public int hashCode() { return Objects.hash(var1, var2); } }
1,975
30.365079
93
java
tsml-java
tsml-java-master/src/main/java/utilities/generic_storage/Triple.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.generic_storage; /** * * @author raj09hxu */ public class Triple <T1, T2, T3> extends Pair<T1, T2>{ public final T3 var3; public Triple(T1 t1, T2 t2, T3 t3){ super(t1,t2); var3 = t3; } @Override public String toString(){ return super.toString() + " " + var3; } }
1,101
29.611111
76
java
tsml-java
tsml-java-master/src/main/java/utilities/multivariate_tools/MultivariateInstanceTools.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.multivariate_tools; import utilities.InstanceTools; import utilities.class_counts.TreeSetClassCounts; import weka.core.*; import tsml.transformers.RowNormalizer; import java.io.IOException; import java.util.ArrayList; /** * * @author raj09hxu */ public class MultivariateInstanceTools { //given some univariate datastreams, we want to merge them to be interweaved. //so given dataset X, Y, Z. //X_0,Y_0,Z_0,X_1,.....,Z_m //Needs more testing. public static Instances mergeStreams(String dataset, Instances[] inst, String[] dimChars){ String name; Instances firstInst = inst[0]; int dimensions = inst.length; int length = (firstInst.numAttributes()-1)*dimensions; ArrayList<Attribute> atts = new ArrayList<>(); for (int i = 0; i < length; i++) { name = dataset + "_" + dimChars[i%dimensions] + "_" + (i/dimensions); atts.add(new Attribute(name)); } //clone the class values over. //Could be from x,y,z doesn't matter. Attribute target = firstInst.attribute(firstInst.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) { vals.add(target.value(i)); } atts.add(new Attribute(firstInst.attribute(firstInst.classIndex()).name(), vals)); //same number of xInstances Instances result = new Instances(dataset, atts, firstInst.numInstances()); int size = result.numAttributes()-1; for(int i=0; i< firstInst.numInstances(); i++){ result.add(new DenseInstance(size+1)); for(int j=0; j<size;){ for(int k=0; k< dimensions; k++){ result.instance(i).setValue(j,inst[k].get(i).value(j/dimensions)); j++; } } } for (int j = 0; j < result.numInstances(); j++) { result.instance(j).setValue(size, firstInst.get(j).classValue()); } return result; } //this function concatinates an array of instances by adding the attributes together. maintains same size in n. //assumes properly orderered for class values //all atts in inst1, then all atts in inst2 etc. public static Instances concatinateInstances(Instances[] data){ ArrayList<Attribute> atts = new ArrayList(); String name; Instances firstInst = data[0]; int length =0; for (Instances data1 : data) { length += data1.numAttributes() - 1; } int dim = 0; int localAtt = 0; for (int i = 0; i < length; i++) { if(i % (length/(firstInst.numAttributes()-1)) == 0){ dim++; localAtt=0; } name = "attribute_dimension_" + dim + "_" + localAtt++ + data[0].attribute(0).name(); atts.add(new Attribute(name)); } //clone the class values over. //Could be from x,y,z doesn't matter. Attribute target = firstInst.attribute(firstInst.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) { vals.add(target.value(i)); } atts.add(new Attribute(firstInst.attribute(firstInst.classIndex()).name(), vals)); //same number of xInstances Instances result = new Instances(firstInst.relationName() + "_concatinated", atts, firstInst.numInstances()); for(int i=0; i< firstInst.numInstances(); i++){ result.add(new DenseInstance(length+1)); int k=0; //for each instance for(Instances inst : data){ double[] values = inst.get(i).toDoubleArray(); for(int j=0; j<values.length - 1; j++){ result.instance(i).setValue(k++, values[j]); } } } for (int j = 0; j < result.numInstances(); j++) { //we always want to write the true ClassValue here. Irrelevant of binarised or not. result.instance(j).setValue(length, firstInst.get(j).classValue()); } //se the class index where we put it. result.setClassIndex(length); return result; } public static Instances createRelationFrom(Instances header, double[][] data){ int numAttsInChannel = data[0].length; Instances output = new Instances(header, data.length); //each dense instance is row/ which is actually a channel. for(int i=0; i< data.length; i++){ output.add(new DenseInstance(numAttsInChannel)); for(int j=0; j<numAttsInChannel; j++) output.instance(i).setValue(j, data[i][j]); } return output; } public static Instances createRelationFrom(Instances header, ArrayList<ArrayList<Double>> data){ Instances output = new Instances(header, data.size()); //each dense instance is row/ which is actually a channel. for(int i=0; i< data.size(); i++){ int numAttsInChannel = data.get(i).size(); output.add(new DenseInstance(numAttsInChannel)); for(int j=0; j<numAttsInChannel; j++) output.instance(i).setValue(j, data.get(i).get(j)); } return output; } public static Instances createRelationHeader(int numAttsInChannel, int numChannels){ //construct relational attribute vector. ArrayList<Attribute> relational_atts = new ArrayList(numAttsInChannel); for (int i = 0; i < numAttsInChannel; i++) { relational_atts.add(new Attribute("att" + i)); } return new Instances("", relational_atts, numChannels); } /** * Input a list of instances, assumed to be properly aligned, where each Instances * contains data relating to a single dimension * @param instances: array of Instances * @return Instances: single merged file */ public static Instances mergeToMultivariateInstances(Instances[] instances){ Instance firstInst = instances[0].firstInstance(); int numAttsInChannel = instances[0].numAttributes()-1; ArrayList<Attribute> attributes = new ArrayList<>(); //construct relational attribute.# Instances relationHeader = createRelationHeader(numAttsInChannel, instances.length); relationHeader.setRelationName("relationalAtt"); Attribute relational_att = new Attribute("relationalAtt", relationHeader, numAttsInChannel); attributes.add(relational_att); //clone the class values over. //Could be from x,y,z doesn't matter. Attribute target = firstInst.attribute(firstInst.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) { vals.add(target.value(i)); } attributes.add(new Attribute(firstInst.attribute(firstInst.classIndex()).name(), vals)); Instances output = new Instances("", attributes, instances[0].numInstances()); for(int i=0; i < instances[0].numInstances(); i++){ //create each row. //only two attribtues, relational and class. output.add(new DenseInstance(2)); double[][] data = new double[instances.length][numAttsInChannel]; for(int j=0; j<instances.length; j++) for(int k=0; k<numAttsInChannel; k++) data[j][k] = instances[j].get(i).value(k); //set relation for the dataset/ Instances relational = createRelationFrom(relationHeader, data); int index = output.instance(i).attribute(0).addRelation(relational); output.instance(i).setValue(0, index); //set class value. output.instance(i).setValue(1, instances[0].get(i).classValue()); } output.setClassIndex(output.numAttributes()-1); //System.out.println(relational); return output; } //function which returns the separate channels of a multivariate problem as Instances[]. public static Instances[] splitMultivariateInstances(Instances multiInstances){ int d=numDimensions(multiInstances); Instances[] output = new Instances[d]; int length = channelLength(multiInstances); //all the values + a class value. //each channel we want to build an Instances object which contains the data, and the class attribute. for(int i=0; i< output.length; i++){ //construct numeric attributes ArrayList<Attribute> atts = new ArrayList<>(); for (int att = 0; att < length; att++) { atts.add(new Attribute("channel_"+i+"_"+att)); } //construct the class values atttribute. Attribute target = multiInstances.attribute(multiInstances.classIndex()); ArrayList<String> vals = new ArrayList(target.numValues()); for (int k = 0; k < target.numValues(); k++) { vals.add(target.value(k)); } atts.add(new Attribute(multiInstances.attribute(multiInstances.classIndex()).name(), vals)); output[i] = new Instances(multiInstances.relationName() + "_channel_" + i, atts, multiInstances.numInstances()); output[i].setClassIndex(length); //for each Instance in for(int j =0; j< multiInstances.numInstances(); j++){ //add the denseinstance to write too. output[i].add(new DenseInstance(length+1)); Instances inst=multiInstances.get(j).relationalValue(0); double [] channel = inst.get(i).toDoubleArray(); int k=0; for(; k<channel.length; k++){ output[i].instance(j).setValue(k, channel[k]); } double classVal = multiInstances.get(j).classValue(); output[i].instance(j).setValue(k, classVal); } } return output; } public static Instances[] resampleMultivariateInstances(Instances dataset, long seed, double prop){ Instances[] data_channels = splitMultivariateInstances(dataset); Instances[] resample_train_channels = new Instances[data_channels.length]; Instances[] resample_test_channels = new Instances[data_channels.length]; for (int i = 0; i < resample_train_channels.length; i++) { Instances[] temp = utilities.InstanceTools.resampleInstances(data_channels[i], seed, prop); resample_train_channels[i] = temp[0]; resample_test_channels[i] = temp[1]; } Instances[] output = new Instances[2]; output[0] = mergeToMultivariateInstances(resample_train_channels); output[1] = mergeToMultivariateInstances(resample_test_channels); return output; } public static void main(String[] args) throws IOException { String localPath="src/main/java/experiments/data/mtsc/"; String datasetName = "BasicMotions"; Instances train = experiments.data.DatasetLoading.loadData(localPath + datasetName + java.io.File.separator + datasetName+"_TRAIN.arff"); Instances test = experiments.data.DatasetLoading.loadData(localPath + datasetName + java.io.File.separator + datasetName+"_TEST.arff"); Instances[] resampled = MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances(train, test, 1); //Instances[] resampled_old = MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances_old(train, test, 1); System.out.println(resampled[1].get(resampled[1].numInstances()-1)); //System.out.println("------------------------------"); //System.out.println(resampled_old[1].get(resampled_old[1].numInstances()-1)); } /** * * This wraps the instancetools functionality for resampling. It is extremely fast compared with the old method. * * @param train * @param test * @param seed * @return */ public static Instances[] resampleMultivariateTrainAndTestInstances(Instances train, Instances test, long seed){ return InstanceTools.resampleTrainAndTestInstances(train, test, seed); } /** * * This function is miles slower. Do not use. * * @param train * @param test * @param seed * @return */ @Deprecated public static Instances[] resampleMultivariateTrainAndTestInstances_old(Instances train, Instances test, long seed){ Instances[] train_channels = splitMultivariateInstances(train); Instances[] test_channels = splitMultivariateInstances(test); Instances[] resample_train_channels = new Instances[train_channels.length]; Instances[] resample_test_channels = new Instances[test_channels.length]; for (int i = 0; i < resample_train_channels.length; i++) { Instances[] temp = utilities.InstanceTools.resampleTrainAndTestInstances(train_channels[i], test_channels[i], seed); resample_train_channels[i] = temp[0]; resample_test_channels[i] = temp[1]; } Instances[] output = new Instances[2]; output[0] = mergeToMultivariateInstances(resample_train_channels); output[0].setRelationName(train.relationName()); output[1] = mergeToMultivariateInstances(resample_test_channels); output[1].setRelationName(test.relationName()); return output; } public static Instance[] splitMultivariateInstanceWithClassVal(Instance instance){ Instances[] split = splitMultivariateInstances(instance.dataset()); int index = instance.dataset().indexOf(instance); Instance[] output = new Instance[numDimensions(instance)]; for(int i=0; i< output.length; i++){ output[i] = split[i].get(index); } return output; } public static Instance[] splitMultivariateInstance(Instance instance){ Instance[] output = new Instance[numDimensions(instance)]; for(int i=0; i< output.length; i++){ output[i] = instance.relationalValue(0).get(i); } return output; } //this won't include class value. public static double[][] convertMultiInstanceToArrays(Instance[] data){ double[][] output = new double[data.length][data[0].numAttributes()]; for(int i=0; i<output.length; i++){ for(int j=0; j<output[i].length; j++){ output[i][j] = data[i].value(j); } } return output; } //this won't include class value. public static double[][] convertMultiInstanceToTransposedArrays(Instance[] data){ double[][] output = new double[data[0].numAttributes()][data.length]; for(int i=0; i<output.length; i++){ for(int j=0; j<output[i].length; j++){ output[i][j] = data[j].value(i); } } return output; } public static int indexOfRelational(Instances inst, Instances findRelation){ int index = -1; Attribute relationAtt = inst.get(0).attribute(0); for(int i=0; i< inst.numInstances(); i++){ if(relationAtt.relation(i).equals(findRelation)){ index = i; break; } } return index; } public static int numDimensions(Instance multiInstance){ return multiInstance.relationalValue(0).numInstances(); } public static int numDimensions(Instances multiInstances){ //get the first attribute which we know is return numDimensions(multiInstances.firstInstance()); } public static int channelLength(Instances multiInstances){ return channelLength(multiInstances.firstInstance()); } public static int channelLength(Instance multiInstance){ return multiInstance.relationalValue(0).numAttributes(); } //Tony Added: /** Converts a standard Instances into a multivariate Instances. Assumes each dimension * is simply concatenated, so the first dimension is in positions 0 to length-1, * second in length to 2*length-1 etc. * First check is that the number of attributes is divisible by length */ public static Instances convertUnivariateToMultivariate(Instances flat, int length){ int numAtts=flat.numAttributes()-1; if(numAtts%length!=0){ System.out.println("Error, wrong number of attributes "+numAtts+" for problem of length "+length); return null; } int d=numAtts/length; System.out.println("Number of atts ="+numAtts+" num dimensions ="+d); ArrayList<Attribute> attributes = new ArrayList<>(); //construct relational attribute.# Instances relationHeader = createRelationHeader(length,d); // System.out.println(relationHeader); relationHeader.setRelationName("relationalAtt"); Attribute relational_att = new Attribute("relationalAtt", relationHeader, length); attributes.add(relational_att); //clone the class values over. Attribute target = flat.attribute(flat.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) { vals.add(target.value(i)); } attributes.add(new Attribute(flat.attribute(flat.classIndex()).name(), vals)); Instances output = new Instances(flat.relationName(), attributes, flat.numInstances()); for(int i=0; i < flat.numInstances(); i++){ //create each row. //only two attribtues, relational and class. output.add(new DenseInstance(2)); double[][] data = new double[d][length]; for(int j=0; j<d; j++){ for(int k=0; k<length; k++){ data[j][k] = flat.get(i).value(j*length+k); } } //set relation for the dataset/ Instances relational = createRelationFrom(relationHeader, data); int index = output.instance(i).attribute(0).addRelation(relational); output.instance(i).setValue(0, index); //set class value. output.instance(i).setValue(1, flat.get(i).classValue()); } output.setClassIndex(output.numAttributes()-1); return output; } //Especially for phil :) public static Instances transposeRelationalData(Instances data){ Instances test=data.instance(0).relationalValue(0); System.out.println("Number of cases ="+data.numInstances()+" Number of dimensions ="+test.numInstances()+" number of attributes ="+test.numAttributes()); int d=test.numAttributes(); int m=test.numInstances(); int count=0; ArrayList<Attribute> attributes = new ArrayList<>(); Instances relationHeader=MultivariateInstanceTools.createRelationHeader(m,d); //construct relational attribute.# relationHeader.setRelationName("relationalAtt"); Attribute relational_att = new Attribute("relationalAtt", relationHeader, m); attributes.add(relational_att); //clone the class values over. Attribute target = data.attribute(data.classIndex()); ArrayList<String> vals = new ArrayList<String>(target.numValues()); for (int i = 0; i < target.numValues(); i++) { vals.add(target.value(i)); } attributes.add(new Attribute(data.attribute(data.classIndex()).name(), vals)); Instances output = new Instances(data.relationName(), attributes, data.numInstances()); for(int i=0; i < data.numInstances(); i++){ output.add(new DenseInstance(2)); double[][] raw=new double[d][m]; test=data.instance(i).relationalValue(0); for(int j=0;j<test.numInstances();j++){ for(int k=0;k<test.instance(j).numAttributes();k++){ raw[k][j]=test.instance(j).value(k);//6 dimensions, need to be put in } } //set relation for the dataset/ Instances relational = createRelationFrom(relationHeader, raw); int index = output.instance(i).attribute(0).addRelation(relational); output.instance(i).setValue(0, index); //set class value. output.instance(i).setValue(1, data.get(i).classValue()); } output.setClassIndex(output.numAttributes()-1); //System.out.println(relational); return output; } public static Instances normaliseDimensions(Instances data) throws Exception { Instances[] channels = splitMultivariateInstances(data); RowNormalizer norm = new RowNormalizer(); for (int i = 0; i < channels.length; i++) { channels[i] = norm.transform(channels[i]); } return mergeToMultivariateInstances(channels); } //function that get a relational instance and return as a set of instances public static Instances splitMultivariateInstanceOnInstances(Instance instance){ Instances output = new Instances("instance", new FastVector(numDimensions(instance)),0); for(int i=0; i< instance.relationalValue(0).numAttributes(); i++){ output.insertAttributeAt(new Attribute("attr" + i), 0); } for(int i = 0; i< numDimensions(instance); i++){ output.add(instance.relationalValue(0).get(i)); } output.insertAttributeAt(new Attribute("class"), instance.relationalValue(0).numAttributes()); output.setClassIndex(output.numAttributes()-1); for(int i = 0; i< numDimensions(instance); i++){ output.get(i).setClassValue(0); } return output; } }
23,384
39.528596
161
java
tsml-java
tsml-java-master/src/main/java/utilities/numericalmethods/NelderMead.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.numericalmethods; import java.io.*; import java.util.*; import java.text.DecimalFormat; import java.util.function.Function; /** * * @author a.bostrom1 */ public class NelderMead { int NDIMS = 2; int NPTS = 3; final int MAXITER = 200; static int ncalls = 0; final double TOL = 1E-6; NumericalFunction func; double best_score; public static void main(String args[]) { NelderMead nm = new NelderMead(); nm.descend(NelderMead::rosen, new double[]{1.3, 0.7, 0.8, 1.9, 1.2}); System.out.println("\nThat's all folks"); } private double[] best_result; public void descend(NumericalFunction fun, double[] x0) { func = fun; NDIMS = x0.length; NPTS = NDIMS+1; //n-dimensional simplex construction //based on matlab's fminsearch routine. double simplex[][] = new double[NPTS][NDIMS]; simplex[0] = x0; for (int i = 1; i < NPTS; i++) { simplex[i] = new double[NDIMS]; for (int j = 0; j < NDIMS; j++) { simplex[i][j] = x0[j]; //unit vector in j-th coordinate axis multiplied by hj; if(j==i){ simplex[i][j]+= x0[j] != 0 ? 0.05 : 0.00025; } } } descend(fun, simplex); } public void descend(NumericalFunction fun, double[][] simplex){ func = fun; NDIMS = simplex[0].length; NPTS = NDIMS+1; double score[] = new double[NPTS]; for (int i = 0; i < NPTS; i++) { score [i] = 0.0; } best_score = 1E99; //////////////// initialize the funcvals //////////////// for (int i = 0; i < NPTS; i++) { score[i] = func.FunctionToMinimise(simplex[i]); } System.out.println("ncalls = " + fwi(ncalls, 6)); int iter = 0; for (iter = 1; iter < MAXITER; iter++) { /////////// identify lo, nhi, hi points ////////////// double flo = score[0]; double fhi = flo; int ilo = 0, ihi = 0, inhi = -1; // -1 means missing for (int i = 1; i < NPTS; i++) { if (score[i] < flo) { flo = score[i]; ilo = i; } if (score[i] > fhi) { fhi = score[i]; ihi = i; } } double fnhi = flo; inhi = ilo; for (int i = 0; i < NPTS; i++) { if ((i != ihi) && (score[i] > fnhi)) { fnhi = score[i]; inhi = i; } } /*for (int j = 0; j < NDIMS; j++) { System.out.print(fwd(simplex[ilo][j], 18, 9)); } System.out.print(fwd(score[ilo], 18, 9)); System.out.println();*/ ////////// exit criterion ////////////// if ((iter % 4 * NDIMS) == 0) { if (score[ilo] > best_score - TOL) { break; } best_score = score[ilo]; best_result = simplex[ilo]; } ///// compute ave[] vector excluding highest vertex ////// double ave[] = new double[NDIMS]; for (int j = 0; j < NDIMS; j++) { ave[j] = 0; } for (int i = 0; i < NPTS; i++) { if (i != ihi) { for (int j = 0; j < NDIMS; j++) { ave[j] += simplex[i][j]; } } } for (int j = 0; j < NDIMS; j++) { ave[j] /= (NPTS - 1); } ///////// try reflect //////////////// double r[] = new double[NDIMS]; for (int j = 0; j < NDIMS; j++) { r[j] = 2 * ave[j] - simplex[ihi][j]; } double fr = func.FunctionToMinimise(r); if ((flo <= fr) && (fr < fnhi)) // in zone: accept { System.arraycopy(r, 0, simplex[ihi], 0, NDIMS); score[ihi] = fr; continue; } if (fr < flo) //// below zone; try expand, else accept { double e[] = new double[NDIMS]; for (int j = 0; j < NDIMS; j++) { e[j] = 3 * ave[j] - 2 * simplex[ihi][j]; } double fe = func.FunctionToMinimise(e); if (fe < fr) { System.arraycopy(e, 0, simplex[ihi], 0, NDIMS); score[ihi] = fe; continue; } else { System.arraycopy(r, 0, simplex[ihi], 0, NDIMS); score[ihi] = fr; continue; } } ///////////// above midzone, try contractions: if (fr < fhi) /// try outside contraction { double c[] = new double[NDIMS]; for (int j = 0; j < NDIMS; j++) { c[j] = 1.5 * ave[j] - 0.5 * simplex[ihi][j]; } double fc = func.FunctionToMinimise(c); if (fc <= fr) { System.arraycopy(c, 0, simplex[ihi], 0, NDIMS); score[ihi] = fc; continue; } else /////// contract { for (int i = 0; i < NPTS; i++) { if (i != ilo) { for (int j = 0; j < NDIMS; j++) { simplex[i][j] = 0.5 * simplex[ilo][j] + 0.5 * simplex[i][j]; } score[i] = func.FunctionToMinimise(simplex[i]); } } continue; } } if (fr >= fhi) /// over the top; try inside contraction { double cc[] = new double[NDIMS]; for (int j = 0; j < NDIMS; j++) { cc[j] = 0.5 * ave[j] + 0.5 * simplex[ihi][j]; } double fcc = func.FunctionToMinimise(cc); if (fcc < fhi) { System.arraycopy(cc, 0, simplex[ihi], 0, NDIMS); score[ihi] = fcc; } else ///////// contract { for (int i = 0; i < NPTS; i++) { if (i != ilo) { for (int j = 0; j < NDIMS; j++) { simplex[i][j] = 0.5 * simplex[ilo][j] + 0.5 * simplex[i][j]; } score[i] = func.FunctionToMinimise(simplex[i]); } } } } } //System.out.println("ncalls, iters, Best =" + fwi(ncalls, 6) + fwi(iter, 6) + fwd(best_score, 16, 9)); } public double getScore(){ return best_score; } public double[] getResult(){ return best_result; } static double func(double p[]) { ncalls++; return rosen(p); } static double rosen(double p[]) // Rosenbrock banana, two dimensions { double sum =0; for(int i=0; i< p.length/2; i++){ double p1 = p[2*i]; double p2 = p[2*i+1]; double temp = ((p1*p1) - p2); double temp2 = temp*temp*100; double temp3 = p1 - 1; double temp4 = temp3*temp3; sum += temp2+ temp4; } /*double r0 = 10.0 * (p[1] - SQR(p[0])); double r1 = 1.0 - p[0]; return SQR(r0) + SQR(r1);*/ return sum; } static double parab(double p[]) // simple paraboloid { return SQR(p[0] - 2) + SQR(p[1] - 20); } /////////////////////////////////utilities //////////////////// static double SQR(double x) { return x * x; } static String fwi(int n, int w) // converts an int to a string with given width. { String s = Integer.toString(n); while (s.length() < w) { s = " " + s; } return s; } static String fwd(double x, int w, int d) // converts a double to a string with given width and decimals. { java.text.DecimalFormat df = new DecimalFormat(); df.setMaximumFractionDigits(d); df.setMinimumFractionDigits(d); df.setGroupingUsed(false); String s = df.format(x); while (s.length() < w) { s = " " + s; } if (s.length() > w) { s = ""; for (int i = 0; i < w; i++) { s = s + "-"; } } return s; } }
9,861
29.915361
111
java
tsml-java
tsml-java-master/src/main/java/utilities/numericalmethods/NumericalFunction.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.numericalmethods; /** * * @author a.bostrom1 */ public interface NumericalFunction { public double FunctionToMinimise(double x[]); }
922
33.185185
76
java
tsml-java
tsml-java-master/src/main/java/utilities/rescalers/NoRescaling.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.rescalers; /** * * @author a.bostrom1 * * * This class just wraps up the series rescaler for no rescaling. * It allows the user to obfuscate to using classes what type of rescaling we're doing * as they shouldn't care. */ public class NoRescaling implements SeriesRescaler{ @Override public double[] rescaleSeries(double[] series) { return rescaleSeries(series, false); } @Override public double[] rescaleSeries(double[] series, boolean hasClassValue) { return series; } }
1,313
30.285714
86
java
tsml-java
tsml-java-master/src/main/java/utilities/rescalers/SeriesRescaler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.rescalers; /** * * @author a.bostrom1 */ public interface SeriesRescaler { public double[] rescaleSeries(double[] series); public double[] rescaleSeries(double[] series, boolean hasClassValue); }
998
32.3
76
java
tsml-java
tsml-java-master/src/main/java/utilities/rescalers/ZNormalisation.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.rescalers; /** * * @author a.bostrom1 */ public class ZNormalisation implements SeriesRescaler{ public static final double ROUNDING_ERROR_CORRECTION = 0.000000000000001; @Override public double[] rescaleSeries(double[] series) { return rescaleSeries(series, false); } /** * Z-Normalise a time series * * @param series the input time series to be z-normalised * @param hasClassValue specify whether the time series includes a class value * @return a z-normalised version of input */ @Override public double[] rescaleSeries(double[] series, boolean hasClassValue) { double mean; double stdv; int classValPenalty = hasClassValue ? 1 : 0; int inputLength = series.length - classValPenalty; double[] output = new double[series.length]; double seriesTotal = 0; for (int i = 0; i < inputLength; i++) { seriesTotal += series[i]; } mean = seriesTotal / (double) inputLength; stdv = 0; double temp; for (int i = 0; i < inputLength; i++) { temp = (series[i] - mean); stdv += temp * temp; } stdv /= (double) inputLength; // if the variance is less than the error correction, just set it to 0, else calc stdv. stdv = (stdv < ROUNDING_ERROR_CORRECTION) ? 0.0 : Math.sqrt(stdv); //System.out.println("mean "+ mean); //System.out.println("stdv "+stdv); for (int i = 0; i < inputLength; i++) { //if the stdv is 0 then set to 0, else normalise. output[i] = (stdv == 0.0) ? 0.0 : ((series[i] - mean) / stdv); } if (hasClassValue) { output[output.length - 1] = series[series.length - 1]; } return output; } }
2,674
29.747126
95
java
tsml-java
tsml-java-master/src/main/java/utilities/rescalers/ZStandardisation.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.rescalers; /** * * @author a.bostrom1 */ public class ZStandardisation implements SeriesRescaler{ @Override public double[] rescaleSeries(double[] series) { return rescaleSeries(series, false); } @Override public double[] rescaleSeries(double[] series, boolean hasClassValue) { double mean; double stdv; int classValPenalty = hasClassValue ? 1 : 0; int inputLength = series.length - classValPenalty; double[] output = new double[series.length]; double seriesTotal = 0; for (int i = 0; i < inputLength; i++) { seriesTotal += series[i]; } mean = seriesTotal / (double) inputLength; for (int i = 0; i < inputLength; i++) { //if the stdv is 0 then set to 0, else normalise. output[i] = series[i] - mean; } if (hasClassValue) { output[output.length - 1] = series[series.length - 1]; } return output; } }
1,830
27.609375
76
java
tsml-java
tsml-java-master/src/main/java/utilities/samplers/RandomIndexSampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.samplers; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import java.util.List; import java.util.Random; public class RandomIndexSampler implements Sampler{ private List<Integer> instances; private Random random; public RandomIndexSampler(Random random){ this.random = random; } public RandomIndexSampler(){ random = new Random(); } public void setInstances(Instances instances) { this.instances = new ArrayList(instances.numInstances()); for (int i = 0; i < instances.numInstances(); i++){ this.instances.add(i); } } public boolean hasNext() { return !instances.isEmpty(); } public Integer next() { return instances.remove(random.nextInt(instances.size())); } }
1,581
30.019608
88
java
tsml-java
tsml-java-master/src/main/java/utilities/samplers/RandomRoundRobinIndexSampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.samplers; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import java.util.List; import java.util.Random; import static utilities.InstanceTools.indexByClass; import static utilities.InstanceTools.instancesByClass; public class RandomRoundRobinIndexSampler implements Sampler{ private List<List<Integer>> instancesByClass; private Random random; private final List<Integer> indicies = new ArrayList<>(); public RandomRoundRobinIndexSampler(Random random){ this.random = random; } public RandomRoundRobinIndexSampler(){ random = new Random(); } private void regenerateClassValues() { for(int i = 0; i < instancesByClass.size(); i++) { indicies.add(i); } } public void setInstances(Instances instances) { instancesByClass = indexByClass(instances); regenerateClassValues(); } public boolean hasNext() { return !indicies.isEmpty() || !instancesByClass.isEmpty(); } public Integer next() { int classValue = indicies.remove(random.nextInt(indicies.size())); List<Integer> homogeneousInstances = instancesByClass.get(classValue); int instance = homogeneousInstances.remove(random.nextInt(homogeneousInstances.size())); if(homogeneousInstances.isEmpty()) { instancesByClass.remove(classValue); for(int i = 0; i < indicies.size(); i++) { if (indicies.get(i) > classValue) { indicies.set(i, indicies.get(i) - 1); } } } if(indicies.isEmpty()) { regenerateClassValues(); } return instance; } }
2,501
31.493506
96
java
tsml-java
tsml-java-master/src/main/java/utilities/samplers/RandomRoundRobinSampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.samplers; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import java.util.List; import java.util.Random; import static utilities.InstanceTools.instancesByClass; public class RandomRoundRobinSampler implements Sampler{ private List<Instances> instancesByClass; private Random random; private final List<Integer> indicies = new ArrayList<>(); public RandomRoundRobinSampler(Random random){ this.random = random; } public RandomRoundRobinSampler(){ random = new Random(); } private void regenerateClassValues() { for(int i = 0; i < instancesByClass.size(); i++) { indicies.add(i); } } public void setInstances(Instances instances) { instancesByClass = instancesByClass(instances); regenerateClassValues(); } public boolean hasNext() { return !indicies.isEmpty() || !instancesByClass.isEmpty(); } public Instance next() { int classValue = indicies.remove(random.nextInt(indicies.size())); Instances homogeneousInstances = instancesByClass.get(classValue); Instance instance = homogeneousInstances.remove(random.nextInt(homogeneousInstances.size())); if(homogeneousInstances.isEmpty()) { instancesByClass.remove(classValue); for(int i = 0; i < indicies.size(); i++) { if (indicies.get(i) > classValue) { indicies.set(i, indicies.get(i) - 1); } } } if(indicies.isEmpty()) { regenerateClassValues(); } return instance; } }
2,436
31.065789
101
java
tsml-java
tsml-java-master/src/main/java/utilities/samplers/RandomSampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.samplers; import weka.core.Instance; import weka.core.Instances; import java.util.List; import java.util.Random; public class RandomSampler implements Sampler{ private Instances instances; private Random random; public RandomSampler(Random random){ this.random = random; } public RandomSampler(){ random = new Random(); } public void setInstances(Instances instances) { this.instances = new Instances(instances); } public boolean hasNext() { return !instances.isEmpty(); } public Instance next() { return instances.remove(random.nextInt(instances.size())); } }
1,403
30.2
96
java
tsml-java
tsml-java-master/src/main/java/utilities/samplers/RandomStratifiedIndexSampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.samplers; import utilities.ArrayUtilities; import weka.core.Instances; import java.util.List; import java.util.Random; import static utilities.InstanceTools.*; import static utilities.Utilities.argMax; public class RandomStratifiedIndexSampler implements Sampler{ private List<List<Integer>> instancesByClass; private double[] classDistribution; private double[] classSamplingProbabilities; private int count; private Random random; private int maxCount; public RandomStratifiedIndexSampler(Random random){ this.random = random; } public RandomStratifiedIndexSampler(){ random = new Random(); } public void setInstances(Instances instances) { instancesByClass = indexByClass(instances); classDistribution = classDistribution(instances); classSamplingProbabilities = classDistribution(instances); count = 0; maxCount = instances.size(); } public boolean hasNext() { return count < maxCount; } public Integer next() { int sampleClass = argMax(classSamplingProbabilities, random); List<Integer> homogeneousInstances = instancesByClass.get(sampleClass); // instances of the class value int sampledInstance = homogeneousInstances.remove(random.nextInt(homogeneousInstances.size())); classSamplingProbabilities[sampleClass]--; ArrayUtilities.add(classSamplingProbabilities, classDistribution); return sampledInstance; } }
2,283
33.089552
111
java
tsml-java
tsml-java-master/src/main/java/utilities/samplers/RandomStratifiedSampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.samplers; import utilities.ArrayUtilities; import weka.core.Instance; import weka.core.Instances; import java.util.List; import java.util.Random; import static utilities.InstanceTools.classDistribution; import static utilities.InstanceTools.instancesByClass; import static utilities.Utilities.argMax; public class RandomStratifiedSampler implements Sampler{ private List<Instances> instancesByClass; private double[] classDistribution; private double[] classSamplingProbabilities; private int count; private Random random; private int maxCount; public RandomStratifiedSampler(Random random){ this.random = random; } public RandomStratifiedSampler(){ random = new Random(); } public void setInstances(Instances instances) { instancesByClass = instancesByClass(instances); classDistribution = classDistribution(instances); classSamplingProbabilities = classDistribution(instances); count = 0; maxCount = instances.size(); } public boolean hasNext() { return count < maxCount; } public Instance next() { int sampleClass = argMax(classSamplingProbabilities, random); Instances homogeneousInstances = instancesByClass.get(sampleClass); // instances of the class value Instance sampledInstance = homogeneousInstances.remove(random.nextInt(homogeneousInstances.numInstances())); classSamplingProbabilities[sampleClass]--; ArrayUtilities.add(classSamplingProbabilities, classDistribution); count++; return sampledInstance; } }
2,392
34.191176
116
java
tsml-java
tsml-java-master/src/main/java/utilities/samplers/Sampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package utilities.samplers; import weka.core.Instances; public interface Sampler { void setInstances(Instances instances); boolean hasNext(); Object next(); }
938
31.37931
76
java
tsml-java
tsml-java-master/src/main/java/weka/associations/AbstractAssociator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Associator.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; /** * Abstract scheme for learning associations. All schemes for learning * associations implemement this class * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class AbstractAssociator implements Cloneable, Associator, Serializable, CapabilitiesHandler, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -3017644543382432070L; /** * Creates a new instance of a associator given it's class name and * (optional) arguments to pass to it's setOptions method. If the * associator implements OptionHandler and the options parameter is * non-null, the associator will have it's options set. * * @param associatorName the fully qualified class name of the associator * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created associator, ready for use. * @exception Exception if the associator name is invalid, or the options * supplied are not acceptable to the associator */ public static Associator forName(String associatorName, String [] options) throws Exception { return (Associator)Utils.forName(Associator.class, associatorName, options); } /** * Creates a deep copy of the given associator using serialization. * * @param model the associator to copy * @return a deep copy of the associator * @exception Exception if an error occurs */ public static Associator makeCopy(Associator model) throws Exception { return (Associator) new SerializedObject(model).getObject(); } /** * Creates copies of the current associator. Note that this method * now uses Serialization to perform a deep copy, so the Associator * object must be fully Serializable. Any currently built model will * now be copied as well. * * @param model an example associator to copy * @param num the number of associators copies to create. * @return an array of associators. * @exception Exception if an error occurs */ public static Associator[] makeCopies(Associator model, int num) throws Exception { if (model == null) { throw new Exception("No model associator set"); } Associator [] associators = new Associator [num]; SerializedObject so = new SerializedObject(model); for(int i = 0; i < associators.length; i++) { associators[i] = (Associator) so.getObject(); } return associators; } /** * Returns the Capabilities of this associator. Maximally permissive * capabilities are allowed by default. Derived associators should * override this method and first disable all capabilities and then * enable just those capabilities that make sense for the scheme. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities defaultC = new Capabilities(this); defaultC.enableAll(); return defaultC; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * runs the associator with the given commandline options * * @param associator the associator to run * @param options the commandline options */ public static void runAssociator(Associator associator, String[] options) { try { System.out.println( AssociatorEvaluation.evaluate(associator, options)); } catch (Exception e) { if ( (e.getMessage() != null) && (e.getMessage().indexOf("General options") == -1) ) e.printStackTrace(); else System.err.println(e.getMessage()); } } }
4,817
31.554054
88
java
tsml-java
tsml-java-master/src/main/java/weka/associations/Apriori.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Apriori.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.util.ArrayList; import java.util.Enumeration; import java.util.Hashtable; import java.util.List; import weka.core.AttributeStats; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.FastVector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** <!-- globalinfo-start --> * Class implementing an Apriori-type algorithm. * Iteratively reduces the minimum support until it finds the required number of * rules with the given minimum confidence.<br/> * The algorithm has an option to mine class association rules. It is adapted as * explained in the second reference.<br/> * <br/> * For more information see:<br/> * <br/> * R. Agrawal, R. Srikant: Fast Algorithms for Mining Association Rules in Large * Databases. In: 20th International Conference on Very Large Data Bases, * 478-499, 1994.<br/> * <br/> * Bing Liu, Wynne Hsu, Yiming Ma: Integrating Classification and Association * Rule Mining. In: Fourth International Conference on Knowledge Discovery and * Data Mining, 80-86, 1998. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * * <pre> * &#64;inproceedings{Agrawal1994, * author = {R. Agrawal and R. Srikant}, * booktitle = {20th International Conference on Very Large Data Bases}, * pages = {478-499}, * publisher = {Morgan Kaufmann, Los Altos, CA}, * title = {Fast Algorithms for Mining Association Rules in Large Databases}, * year = {1994} * } * * &#64;inproceedings{Liu1998, * author = {Bing Liu and Wynne Hsu and Yiming Ma}, * booktitle = {Fourth International Conference on Knowledge Discovery and Data Mining}, * pages = {80-86}, * publisher = {AAAI Press}, * title = {Integrating Classification and Association Rule Mining}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: * <p/> * * <pre> * -N &lt;required number of rules output&gt; * The required number of rules. (default = 10) * </pre> * * <pre> * -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric type by which to rank rules. (default = confidence) * </pre> * * <pre> * -C &lt;minimum metric score of a rule&gt; * The minimum confidence of a rule. (default = 0.9) * </pre> * * <pre> * -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05) * </pre> * * <pre> * -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0) * </pre> * * <pre> * -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1) * </pre> * * <pre> * -S &lt;significance level&gt; * If used, rules are tested for significance at * the given level. Slower. (default = no significance testing) * </pre> * * <pre> * -I * If set the itemsets found are also output. (default = no) * </pre> * * <pre> * -R * Remove columns that contain all missing values (default = no) * </pre> * * <pre> * -V * Report progress iteratively. (default = no) * </pre> * * <pre> * -A * If set class association rules are mined. (default = no) * </pre> * * <pre> * -Z * Treat zero (i.e. first value of nominal attributes) as missing * </pre> * * <pre> * -B &lt;toString delimiters&gt; * If used, two characters to use as rule delimiters * in the result of toString: the first to delimit fields, * the second to delimit items within fields. * (default = traditional toString result) * </pre> * * <pre> * -c &lt;the class index&gt; * The class index. (default = last) * </pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 9722 $ */ public class Apriori extends AbstractAssociator implements OptionHandler, AssociationRulesProducer, CARuleMiner, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 3277498842319212687L; /** The minimum support. */ protected double m_minSupport; /** The upper bound on the support */ protected double m_upperBoundMinSupport; /** The lower bound for the minimum support. */ protected double m_lowerBoundMinSupport; /** Metric type: Confidence */ protected static final int CONFIDENCE = 0; /** Metric type: Lift */ protected static final int LIFT = 1; /** Metric type: Leverage */ protected static final int LEVERAGE = 2; /** Metric type: Conviction */ protected static final int CONVICTION = 3; /** Metric types. */ public static final Tag[] TAGS_SELECTION = { new Tag(CONFIDENCE, "Confidence"), new Tag(LIFT, "Lift"), new Tag(LEVERAGE, "Leverage"), new Tag(CONVICTION, "Conviction") }; /** The selected metric type. */ protected int m_metricType = CONFIDENCE; /** The minimum metric score. */ protected double m_minMetric; /** The maximum number of rules that are output. */ protected int m_numRules; /** Delta by which m_minSupport is decreased in each iteration. */ protected double m_delta; /** Significance level for optional significance test. */ protected double m_significanceLevel; /** Number of cycles used before required number of rules was one. */ protected int m_cycles; /** The set of all sets of itemsets L. */ protected FastVector m_Ls; /** The same information stored in hash tables. */ protected FastVector m_hashtables; /** The list of all generated rules. */ protected FastVector[] m_allTheRules; /** * The instances (transactions) to be used for generating the association * rules. */ protected Instances m_instances; /** Output itemsets found? */ protected boolean m_outputItemSets; /** Remove columns with all missing values */ protected boolean m_removeMissingCols; /** Report progress iteratively */ protected boolean m_verbose; /** Only the class attribute of all Instances. */ protected Instances m_onlyClass; /** The class index. */ protected int m_classIndex; /** Flag indicating whether class association rules are mined. */ protected boolean m_car; /** * Treat zeros as missing (rather than a value in their own right) */ protected boolean m_treatZeroAsMissing = false; /** * ToString delimiters, if any */ protected String m_toStringDelimiters = null; /** * Returns a string describing this associator * * @return a description of the evaluator suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "Class implementing an Apriori-type algorithm. Iteratively reduces " + "the minimum support until it finds the required number of rules with " + "the given minimum confidence.\n" + "The algorithm has an option to mine class association rules. It is " + "adapted as explained in the second reference.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "R. Agrawal and R. Srikant"); result.setValue(Field.TITLE, "Fast Algorithms for Mining Association Rules in Large Databases"); result.setValue(Field.BOOKTITLE, "20th International Conference on Very Large Data Bases"); result.setValue(Field.YEAR, "1994"); result.setValue(Field.PAGES, "478-499"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann, Los Altos, CA"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Bing Liu and Wynne Hsu and Yiming Ma"); additional.setValue(Field.TITLE, "Integrating Classification and Association Rule Mining"); additional .setValue(Field.BOOKTITLE, "Fourth International Conference on Knowledge Discovery and Data Mining"); additional.setValue(Field.YEAR, "1998"); additional.setValue(Field.PAGES, "80-86"); additional.setValue(Field.PUBLISHER, "AAAI Press"); return result; } /** * Constructor that allows to sets default values for the minimum confidence * and the maximum number of rules the minimum confidence. */ public Apriori() { resetOptions(); } /** * Resets the options to the default values. */ public void resetOptions() { m_removeMissingCols = false; m_verbose = false; m_delta = 0.05; m_minMetric = 0.90; m_numRules = 10; m_lowerBoundMinSupport = 0.1; m_upperBoundMinSupport = 1.0; m_significanceLevel = -1; m_outputItemSets = false; m_car = false; m_classIndex = -1; m_treatZeroAsMissing = false; m_metricType = CONFIDENCE; } /** * Removes columns that are all missing from the data * * @param instances the instances * @return a new set of instances with all missing columns removed * @throws Exception if something goes wrong */ protected Instances removeMissingColumns(Instances instances) throws Exception { int numInstances = instances.numInstances(); StringBuffer deleteString = new StringBuffer(); int removeCount = 0; boolean first = true; int maxCount = 0; for (int i = 0; i < instances.numAttributes(); i++) { AttributeStats as = instances.attributeStats(i); if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) { // see if we can decrease this by looking for the most frequent value int[] counts = as.nominalCounts; if (counts[Utils.maxIndex(counts)] > maxCount) { maxCount = counts[Utils.maxIndex(counts)]; } } if (as.missingCount == numInstances) { if (first) { deleteString.append((i + 1)); first = false; } else { deleteString.append("," + (i + 1)); } removeCount++; } } if (m_verbose) { System.err.println("Removed : " + removeCount + " columns with all missing " + "values."); } if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) { m_upperBoundMinSupport = (double) maxCount / (double) numInstances; if (m_verbose) { System.err.println("Setting upper bound min support to : " + m_upperBoundMinSupport); } } if (deleteString.toString().length() > 0) { Remove af = new Remove(); af.setAttributeIndices(deleteString.toString()); af.setInvertSelection(false); af.setInputFormat(instances); Instances newInst = Filter.useFilter(instances, af); return newInst; } return instances; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // enable what we can handle // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class (can handle a nominal class if CAR rules are selected). This result.enable(Capability.NO_CLASS); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method that generates all large itemsets with a minimum support, and from * these all association rules with a minimum confidence. * * @param instances the instances to be used for generating the associations * @throws Exception if rules can't be built successfully */ @Override public void buildAssociations(Instances instances) throws Exception { double[] confidences, supports; int[] indices; FastVector[] sortedRuleSet; double necSupport = 0; instances = new Instances(instances); if (m_removeMissingCols) { instances = removeMissingColumns(instances); } if (m_car && m_metricType != CONFIDENCE) throw new Exception("For CAR-Mining metric type has to be confidence!"); // only set class index if CAR is requested if (m_car) { if (m_classIndex == -1) { instances.setClassIndex(instances.numAttributes() - 1); } else if (m_classIndex <= instances.numAttributes() && m_classIndex > 0) { instances.setClassIndex(m_classIndex - 1); } else { throw new Exception("Invalid class index."); } } // can associator handle the data? getCapabilities().testWithFail(instances); m_cycles = 0; // make sure that the lower bound is equal to at least one instance double lowerBoundMinSupportToUse = (m_lowerBoundMinSupport * instances.numInstances() < 1.0) ? 1.0 / instances.numInstances() : m_lowerBoundMinSupport; if (m_car) { // m_instances does not contain the class attribute m_instances = LabeledItemSet.divide(instances, false); // m_onlyClass contains only the class attribute m_onlyClass = LabeledItemSet.divide(instances, true); } else m_instances = instances; if (m_car && m_numRules == Integer.MAX_VALUE) { // Set desired minimum support m_minSupport = lowerBoundMinSupportToUse; } else { // Decrease minimum support until desired number of rules found. // m_minSupport = m_upperBoundMinSupport - m_delta; m_minSupport = 1.0 - m_delta; m_minSupport = (m_minSupport < lowerBoundMinSupportToUse) ? lowerBoundMinSupportToUse : m_minSupport; } do { // Reserve space for variables m_Ls = new FastVector(); m_hashtables = new FastVector(); m_allTheRules = new FastVector[6]; m_allTheRules[0] = new FastVector(); m_allTheRules[1] = new FastVector(); m_allTheRules[2] = new FastVector(); // if (m_metricType != CONFIDENCE || m_significanceLevel != -1) { m_allTheRules[3] = new FastVector(); m_allTheRules[4] = new FastVector(); m_allTheRules[5] = new FastVector(); // } sortedRuleSet = new FastVector[6]; sortedRuleSet[0] = new FastVector(); sortedRuleSet[1] = new FastVector(); sortedRuleSet[2] = new FastVector(); // if (m_metricType != CONFIDENCE || m_significanceLevel != -1) { sortedRuleSet[3] = new FastVector(); sortedRuleSet[4] = new FastVector(); sortedRuleSet[5] = new FastVector(); // } if (!m_car) { // Find large itemsets and rules findLargeItemSets(); if (m_significanceLevel != -1 || m_metricType != CONFIDENCE) findRulesBruteForce(); else findRulesQuickly(); } else { findLargeCarItemSets(); findCarRulesQuickly(); } // prune rules for upper bound min support if (m_upperBoundMinSupport < 1.0) { pruneRulesForUpperBoundSupport(); } // Sort rules according to their support /* * supports = new double[m_allTheRules[2].size()]; for (int i = 0; i < * m_allTheRules[2].size(); i++) supports[i] = * (double)((AprioriItemSet)m_allTheRules[1].elementAt(i)).support(); * indices = Utils.stableSort(supports); for (int i = 0; i < * m_allTheRules[2].size(); i++) { * sortedRuleSet[0].addElement(m_allTheRules[0].elementAt(indices[i])); * sortedRuleSet[1].addElement(m_allTheRules[1].elementAt(indices[i])); * sortedRuleSet[2].addElement(m_allTheRules[2].elementAt(indices[i])); if * (m_metricType != CONFIDENCE || m_significanceLevel != -1) { * sortedRuleSet[3].addElement(m_allTheRules[3].elementAt(indices[i])); * sortedRuleSet[4].addElement(m_allTheRules[4].elementAt(indices[i])); * sortedRuleSet[5].addElement(m_allTheRules[5].elementAt(indices[i])); } * } */ int j = m_allTheRules[2].size() - 1; supports = new double[m_allTheRules[2].size()]; for (int i = 0; i < (j + 1); i++) supports[j - i] = ((double) ((ItemSet) m_allTheRules[1] .elementAt(j - i)).support()) * (-1); indices = Utils.stableSort(supports); for (int i = 0; i < (j + 1); i++) { sortedRuleSet[0].addElement(m_allTheRules[0].elementAt(indices[j - i])); sortedRuleSet[1].addElement(m_allTheRules[1].elementAt(indices[j - i])); sortedRuleSet[2].addElement(m_allTheRules[2].elementAt(indices[j - i])); if (!m_car) { // if (m_metricType != CONFIDENCE || m_significanceLevel != -1) { sortedRuleSet[3].addElement(m_allTheRules[3] .elementAt(indices[j - i])); sortedRuleSet[4].addElement(m_allTheRules[4] .elementAt(indices[j - i])); sortedRuleSet[5].addElement(m_allTheRules[5] .elementAt(indices[j - i])); } // } } // Sort rules according to their confidence m_allTheRules[0].removeAllElements(); m_allTheRules[1].removeAllElements(); m_allTheRules[2].removeAllElements(); // if (m_metricType != CONFIDENCE || m_significanceLevel != -1) { m_allTheRules[3].removeAllElements(); m_allTheRules[4].removeAllElements(); m_allTheRules[5].removeAllElements(); // } confidences = new double[sortedRuleSet[2].size()]; int sortType = 2 + m_metricType; for (int i = 0; i < sortedRuleSet[2].size(); i++) confidences[i] = ((Double) sortedRuleSet[sortType].elementAt(i)) .doubleValue(); indices = Utils.stableSort(confidences); for (int i = sortedRuleSet[0].size() - 1; (i >= (sortedRuleSet[0].size() - m_numRules)) && (i >= 0); i--) { m_allTheRules[0].addElement(sortedRuleSet[0].elementAt(indices[i])); m_allTheRules[1].addElement(sortedRuleSet[1].elementAt(indices[i])); m_allTheRules[2].addElement(sortedRuleSet[2].elementAt(indices[i])); // if (m_metricType != CONFIDENCE || m_significanceLevel != -1) { if (!m_car) { m_allTheRules[3].addElement(sortedRuleSet[3].elementAt(indices[i])); m_allTheRules[4].addElement(sortedRuleSet[4].elementAt(indices[i])); m_allTheRules[5].addElement(sortedRuleSet[5].elementAt(indices[i])); } // } } if (m_verbose) { if (m_Ls.size() > 1) { System.out.println(toString()); } } if (m_minSupport == lowerBoundMinSupportToUse || m_minSupport - m_delta > lowerBoundMinSupportToUse) m_minSupport -= m_delta; else m_minSupport = lowerBoundMinSupportToUse; necSupport = Math.rint(m_minSupport * m_instances.numInstances()); m_cycles++; } while ((m_allTheRules[0].size() < m_numRules) && (Utils.grOrEq(m_minSupport, lowerBoundMinSupportToUse)) /* (necSupport >= lowerBoundNumInstancesSupport) */ /* (Utils.grOrEq(m_minSupport, m_lowerBoundMinSupport)) */&& (necSupport >= 1)); m_minSupport += m_delta; } private void pruneRulesForUpperBoundSupport() { int necMaxSupport = (int) (m_upperBoundMinSupport * m_instances.numInstances() + 0.5); FastVector[] prunedRules = new FastVector[6]; for (int i = 0; i < 6; i++) { prunedRules[i] = new FastVector(); } for (int i = 0; i < m_allTheRules[0].size(); i++) { if (((ItemSet) m_allTheRules[1].elementAt(i)).support() <= necMaxSupport) { prunedRules[0].addElement(m_allTheRules[0].elementAt(i)); prunedRules[1].addElement(m_allTheRules[1].elementAt(i)); prunedRules[2].addElement(m_allTheRules[2].elementAt(i)); if (!m_car) { prunedRules[3].addElement(m_allTheRules[3].elementAt(i)); prunedRules[4].addElement(m_allTheRules[4].elementAt(i)); prunedRules[5].addElement(m_allTheRules[5].elementAt(i)); } } } m_allTheRules[0] = prunedRules[0]; m_allTheRules[1] = prunedRules[1]; m_allTheRules[2] = prunedRules[2]; m_allTheRules[3] = prunedRules[3]; m_allTheRules[4] = prunedRules[4]; m_allTheRules[5] = prunedRules[5]; } /** * Method that mines all class association rules with minimum support and with * a minimum confidence. * * @return an sorted array of FastVector (confidence depended) containing the * rules and metric information * @param data the instances for which class association rules should be mined * @throws Exception if rules can't be built successfully */ @Override public FastVector[] mineCARs(Instances data) throws Exception { m_car = true; buildAssociations(data); return m_allTheRules; } /** * Gets the instances without the class atrribute. * * @return the instances without the class attribute. */ @Override public Instances getInstancesNoClass() { return m_instances; } /** * Gets only the class attribute of the instances. * * @return the class attribute of all instances. */ @Override public Instances getInstancesOnlyClass() { return m_onlyClass; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration listOptions() { String string1 = "\tThe required number of rules. (default = " + m_numRules + ")", string2 = "\tThe minimum confidence of a rule. (default = " + m_minMetric + ")", string3 = "\tThe delta by which the minimum support is decreased in\n", string4 = "\teach iteration. (default = " + m_delta + ")", string5 = "\tThe lower bound for the minimum support. (default = " + m_lowerBoundMinSupport + ")", string6 = "\tIf used, rules are tested for significance at\n", string7 = "\tthe given level. Slower. (default = no significance testing)", string8 = "\tIf set the itemsets found are also output. (default = no)", string9 = "\tIf set class association rules are mined. (default = no)", string10 = "\tThe class index. (default = last)", stringType = "\tThe metric type by which to rank rules. (default = " + "confidence)", stringZeroAsMissing = "\tTreat zero (i.e. first value of nominal attributes) as " + "missing", stringToStringDelimiters = "\tIf used, two characters to use as rule delimiters\n" + "\tin the result of toString: the first to delimit fields,\n" + "\tthe second to delimit items within fields.\n" + "\t(default = traditional toString result)"; FastVector newVector = new FastVector(14); newVector.addElement(new Option(string1, "N", 1, "-N <required number of rules output>")); newVector.addElement(new Option(stringType, "T", 1, "-T <0=confidence | 1=lift | " + "2=leverage | 3=Conviction>")); newVector.addElement(new Option(string2, "C", 1, "-C <minimum metric score of a rule>")); newVector.addElement(new Option(string3 + string4, "D", 1, "-D <delta for minimum support>")); newVector.addElement(new Option("\tUpper bound for minimum support. " + "(default = 1.0)", "U", 1, "-U <upper bound for minimum support>")); newVector.addElement(new Option(string5, "M", 1, "-M <lower bound for minimum support>")); newVector.addElement(new Option(string6 + string7, "S", 1, "-S <significance level>")); newVector.addElement(new Option(string8, "I", 0, "-I")); newVector.addElement(new Option("\tRemove columns that contain " + "all missing values (default = no)", "R", 0, "-R")); newVector.addElement(new Option("\tReport progress iteratively. (default " + "= no)", "V", 0, "-V")); newVector.addElement(new Option(string9, "A", 0, "-A")); newVector.addElement(new Option(stringZeroAsMissing, "Z", 0, "-Z")); newVector.addElement(new Option(stringToStringDelimiters, "B", 1, "-B <toString delimiters>")); newVector.addElement(new Option(string10, "c", 1, "-c <the class index>")); return newVector.elements(); } /** * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: * <p/> * * <pre> * -N &lt;required number of rules output&gt; * The required number of rules. (default = 10) * </pre> * * <pre> * -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric type by which to rank rules. (default = confidence) * </pre> * * <pre> * -C &lt;minimum metric score of a rule&gt; * The minimum confidence of a rule. (default = 0.9) * </pre> * * <pre> * -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05) * </pre> * * <pre> * -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0) * </pre> * * <pre> * -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1) * </pre> * * <pre> * -S &lt;significance level&gt; * If used, rules are tested for significance at * the given level. Slower. (default = no significance testing) * </pre> * * <pre> * -I * If set the itemsets found are also output. (default = no) * </pre> * * <pre> * -R * Remove columns that contain all missing values (default = no) * </pre> * * <pre> * -V * Report progress iteratively. (default = no) * </pre> * * <pre> * -A * If set class association rules are mined. (default = no) * </pre> * * <pre> * -Z * Treat zero (i.e. first value of nominal attributes) as missing * </pre> * * <pre> * -B &lt;toString delimiters&gt; * If used, two characters to use as rule delimiters * in the result of toString: the first to delimit fields, * the second to delimit items within fields. * (default = traditional toString result) * </pre> * * <pre> * -c &lt;the class index&gt; * The class index. (default = last) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { resetOptions(); String numRulesString = Utils.getOption('N', options), minConfidenceString = Utils .getOption('C', options), deltaString = Utils.getOption('D', options), maxSupportString = Utils .getOption('U', options), minSupportString = Utils.getOption('M', options), significanceLevelString = Utils.getOption('S', options), classIndexString = Utils .getOption('c', options), toStringDelimitersString = Utils.getOption( 'B', options); String metricTypeString = Utils.getOption('T', options); if (metricTypeString.length() != 0) { setMetricType(new SelectedTag(Integer.parseInt(metricTypeString), TAGS_SELECTION)); } if (numRulesString.length() != 0) { m_numRules = Integer.parseInt(numRulesString); } if (classIndexString.length() != 0) { if (classIndexString.equalsIgnoreCase("last")) { m_classIndex = -1; } else if (classIndexString.equalsIgnoreCase("first")) { m_classIndex = 0; } else { m_classIndex = Integer.parseInt(classIndexString); } } if (minConfidenceString.length() != 0) { m_minMetric = (new Double(minConfidenceString)).doubleValue(); } if (deltaString.length() != 0) { m_delta = (new Double(deltaString)).doubleValue(); } if (maxSupportString.length() != 0) { setUpperBoundMinSupport((new Double(maxSupportString)).doubleValue()); } if (minSupportString.length() != 0) { m_lowerBoundMinSupport = (new Double(minSupportString)).doubleValue(); } if (significanceLevelString.length() != 0) { m_significanceLevel = (new Double(significanceLevelString)).doubleValue(); } m_outputItemSets = Utils.getFlag('I', options); m_car = Utils.getFlag('A', options); m_verbose = Utils.getFlag('V', options); m_treatZeroAsMissing = Utils.getFlag('Z', options); setRemoveAllMissingCols(Utils.getFlag('R', options)); if (toStringDelimitersString.length() == 2) { m_toStringDelimiters = toStringDelimitersString; } } /** * Gets the current settings of the Apriori object. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { String[] options = new String[23]; int current = 0; if (m_outputItemSets) { options[current++] = "-I"; } if (getRemoveAllMissingCols()) { options[current++] = "-R"; } options[current++] = "-N"; options[current++] = "" + m_numRules; options[current++] = "-T"; options[current++] = "" + m_metricType; options[current++] = "-C"; options[current++] = "" + m_minMetric; options[current++] = "-D"; options[current++] = "" + m_delta; options[current++] = "-U"; options[current++] = "" + m_upperBoundMinSupport; options[current++] = "-M"; options[current++] = "" + m_lowerBoundMinSupport; options[current++] = "-S"; options[current++] = "" + m_significanceLevel; if (m_car) options[current++] = "-A"; if (m_verbose) options[current++] = "-V"; if (m_treatZeroAsMissing) { options[current++] = "-Z"; } options[current++] = "-c"; options[current++] = "" + m_classIndex; if (m_toStringDelimiters != null) { options[current++] = "-B"; options[current++] = m_toStringDelimiters; } while (current < options.length) { options[current++] = ""; } return options; } /** * Outputs the size of all the generated sets of itemsets and the rules. * * @return a string representation of the model */ @Override public String toString() { StringBuffer text = new StringBuffer(); if (m_Ls.size() <= 1) return "\nNo large itemsets and rules found!\n"; text.append("\nApriori\n=======\n\n"); text.append("Minimum support: " + Utils.doubleToString(m_minSupport, 2) + " (" + ((int) (m_minSupport * m_instances.numInstances() + 0.5)) + " instances)" + '\n'); text.append("Minimum metric <"); switch (m_metricType) { case CONFIDENCE: text.append("confidence>: "); break; case LIFT: text.append("lift>: "); break; case LEVERAGE: text.append("leverage>: "); break; case CONVICTION: text.append("conviction>: "); break; } text.append(Utils.doubleToString(m_minMetric, 2) + '\n'); if (m_significanceLevel != -1) text.append("Significance level: " + Utils.doubleToString(m_significanceLevel, 2) + '\n'); text.append("Number of cycles performed: " + m_cycles + '\n'); text.append("\nGenerated sets of large itemsets:\n"); if (!m_car) { for (int i = 0; i < m_Ls.size(); i++) { text.append("\nSize of set of large itemsets L(" + (i + 1) + "): " + ((FastVector) m_Ls.elementAt(i)).size() + '\n'); if (m_outputItemSets) { text.append("\nLarge Itemsets L(" + (i + 1) + "):\n"); for (int j = 0; j < ((FastVector) m_Ls.elementAt(i)).size(); j++) text.append(((AprioriItemSet) ((FastVector) m_Ls.elementAt(i)) .elementAt(j)).toString(m_instances) + "\n"); } } text.append("\nBest rules found:\n\n"); if (m_toStringDelimiters != null) { text.append("Number,Premise,Premise Support,Consequence,Consequence Support,Confidence,Lift,Leverage,LeverageT,Conviction\n"); } for (int i = 0; i < m_allTheRules[0].size(); i++) { /* * text.append(Utils.doubleToString((double) i + 1, (int) * (Math.log(m_numRules) / Math.log(10) + 1), 0) + ". " + * ((AprioriItemSet) m_allTheRules[0].elementAt(i)) * .toString(m_instances) + " ==> " + ((AprioriItemSet) * m_allTheRules[1].elementAt(i)) .toString(m_instances)); * text.append(" " + ((m_metricType == CONFIDENCE) ? "<" : "") + * "conf:(" + Utils.doubleToString( ((Double) * m_allTheRules[2].elementAt(i)).doubleValue(), 2) + ")" + * ((m_metricType == CONFIDENCE) ? ">" : "")); */ String outerDelim; String innerDelim; String stop; String implies; String confOpen; String confClose; String liftOpen; String liftClose; String levOpen; String levInner; String levClose; String convOpen; String convClose; if (m_toStringDelimiters != null) { outerDelim = m_toStringDelimiters.substring(0, 1); innerDelim = m_toStringDelimiters.substring(1, 2); stop = outerDelim; implies = outerDelim; confOpen = outerDelim; confClose = ""; liftOpen = outerDelim; liftClose = ""; levOpen = outerDelim; levInner = outerDelim; levClose = ""; convOpen = outerDelim; convClose = ""; } else { outerDelim = " "; innerDelim = " "; stop = ". "; implies = " ==> "; confOpen = " " + (m_metricType == CONFIDENCE ? "<" : "") + "conf:("; confClose = ")" + (m_metricType == CONFIDENCE ? ">" : ""); liftOpen = (m_metricType == LIFT ? " <" : "") + " lift:("; liftClose = ")" + (m_metricType == LIFT ? ">" : ""); levOpen = (m_metricType == LEVERAGE ? " <" : "") + " lev:("; levInner = ")" + " ["; levClose = "]" + (m_metricType == LEVERAGE ? ">" : ""); convOpen = (m_metricType == CONVICTION ? " <" : "") + " conv:("; convClose = ")" + (m_metricType == CONVICTION ? ">" : ""); } char odc = outerDelim.charAt(0); char idc = innerDelim.charAt(0); String n = Utils.doubleToString((double) i + 1, (int) (Math.log(m_numRules) / Math.log(10) + 1), 0); String premise = ((AprioriItemSet) m_allTheRules[0].elementAt(i)) .toString(m_instances, odc, idc); String consequence = ((AprioriItemSet) m_allTheRules[1].elementAt(i)) .toString(m_instances, odc, idc); String confidence = Utils.doubleToString( ((Double) m_allTheRules[2].elementAt(i)).doubleValue(), 2); String lift = Utils.doubleToString( ((Double) m_allTheRules[3].elementAt(i)).doubleValue(), 2); String leverage = Utils.doubleToString( ((Double) m_allTheRules[4].elementAt(i)).doubleValue(), 2); String conviction = Utils.doubleToString( ((Double) m_allTheRules[5].elementAt(i)).doubleValue(), 2); int leverageT = (int) (((Double) m_allTheRules[4].elementAt(i)) .doubleValue() * m_instances.numInstances()); text.append(n).append(stop); text.append(premise).append(implies).append(consequence); text.append(confOpen).append(confidence).append(confClose); // if (/*m_metricType != CONFIDENCE ||*/ m_significanceLevel != -1) { text.append(liftOpen).append(lift).append(liftClose); text.append(levOpen).append(leverage).append(levInner) .append(leverageT).append(levClose); text.append(convOpen).append(conviction).append(convClose); // if (/*m_metricType != CONFIDENCE ||*/ m_significanceLevel != -1) { /* * text.append((m_metricType == LIFT ? " <" : "") + " lift:(" + * Utils.doubleToString( ((Double) * m_allTheRules[3].elementAt(i)).doubleValue(), 2) + ")" + * (m_metricType == LIFT ? ">" : "")); text.append((m_metricType == * LEVERAGE ? " <" : "") + " lev:(" + Utils.doubleToString( ((Double) * m_allTheRules[4].elementAt(i)).doubleValue(), 2) + ")"); * text.append(" [" + (int) (((Double) * m_allTheRules[4].elementAt(i)).doubleValue() * m_instances * .numInstances()) + "]" + (m_metricType == LEVERAGE ? ">" : "")); * text.append((m_metricType == CONVICTION ? " <" : "") + " conv:(" + * Utils.doubleToString( ((Double) * m_allTheRules[5].elementAt(i)).doubleValue(), 2) + ")" + * (m_metricType == CONVICTION ? ">" : "")); */ // } text.append('\n'); } } else { for (int i = 0; i < m_Ls.size(); i++) { text.append("\nSize of set of large itemsets L(" + (i + 1) + "): " + ((FastVector) m_Ls.elementAt(i)).size() + '\n'); if (m_outputItemSets) { text.append("\nLarge Itemsets L(" + (i + 1) + "):\n"); for (int j = 0; j < ((FastVector) m_Ls.elementAt(i)).size(); j++) { text.append(((ItemSet) ((FastVector) m_Ls.elementAt(i)) .elementAt(j)).toString(m_instances) + "\n"); text.append(((LabeledItemSet) ((FastVector) m_Ls.elementAt(i)) .elementAt(j)).m_classLabel + " "); text.append(((LabeledItemSet) ((FastVector) m_Ls.elementAt(i)) .elementAt(j)).support() + "\n"); } } } text.append("\nBest rules found:\n\n"); if (m_toStringDelimiters != null) { text.append("Number,Premise,Premise Support,Consequence,Consequence Support,Confidence\n"); } for (int i = 0; i < m_allTheRules[0].size(); i++) { /* * text.append(Utils.doubleToString((double) i + 1, (int) * (Math.log(m_numRules) / Math.log(10) + 1), 0) + ". " + ((ItemSet) * m_allTheRules[0].elementAt(i)).toString(m_instances) + " ==> " + * ((ItemSet) m_allTheRules[1].elementAt(i)).toString(m_onlyClass) + * " conf:(" + Utils.doubleToString( ((Double) * m_allTheRules[2].elementAt(i)).doubleValue(), 2) + ")"); */ String outerDelim; String innerDelim; String stop; String implies; String confOpen; String confClose; if (m_toStringDelimiters != null) { outerDelim = m_toStringDelimiters.substring(0, 1); innerDelim = m_toStringDelimiters.substring(1, 2); stop = outerDelim; implies = outerDelim; confOpen = outerDelim; confClose = ""; } else { outerDelim = " "; innerDelim = " "; stop = ". "; implies = " ==> "; confOpen = " " + "conf:("; confClose = ")"; } char odc = outerDelim.charAt(0); char idc = innerDelim.charAt(0); String n = Utils.doubleToString((double) i + 1, (int) (Math.log(m_numRules) / Math.log(10) + 1), 0); String premise = ((ItemSet) m_allTheRules[0].elementAt(i)).toString( m_instances, odc, idc); String consequence = ((ItemSet) m_allTheRules[1].elementAt(i)) .toString(m_onlyClass, odc, idc); String confidence = Utils.doubleToString( ((Double) m_allTheRules[2].elementAt(i)).doubleValue(), 2); text.append(n).append(stop).append(premise).append(implies) .append(consequence).append(confOpen).append(confidence) .append(confClose); text.append('\n'); } } return text.toString(); } /** * Returns the metric string for the chosen metric type * * @return a string describing the used metric for the interestingness of a * class association rule */ @Override public String metricString() { switch (m_metricType) { case LIFT: return "lif"; case LEVERAGE: return "leverage"; case CONVICTION: return "conviction"; default: return "conf"; } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String removeAllMissingColsTipText() { return "Remove columns with all missing values."; } /** * Remove columns containing all missing values. * * @param r true if cols are to be removed. */ public void setRemoveAllMissingCols(boolean r) { m_removeMissingCols = r; } /** * Returns whether columns containing all missing values are to be removed * * @return true if columns are to be removed. */ public boolean getRemoveAllMissingCols() { return m_removeMissingCols; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String upperBoundMinSupportTipText() { return "Upper bound for minimum support. Start iteratively decreasing " + "minimum support from this value."; } /** * Get the value of upperBoundMinSupport. * * @return Value of upperBoundMinSupport. */ public double getUpperBoundMinSupport() { return m_upperBoundMinSupport; } /** * Set the value of upperBoundMinSupport. * * @param v Value to assign to upperBoundMinSupport. */ public void setUpperBoundMinSupport(double v) { m_upperBoundMinSupport = v; } /** * Sets the class index * * @param index the class index */ @Override public void setClassIndex(int index) { m_classIndex = index; } /** * Gets the class index * * @return the index of the class attribute */ public int getClassIndex() { return m_classIndex; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String classIndexTipText() { return "Index of the class attribute. If set to -1, the last attribute is taken as class attribute."; } /** * Sets class association rule mining * * @param flag if class association rules are mined, false otherwise */ public void setCar(boolean flag) { m_car = flag; } /** * Gets whether class association ruels are mined * * @return true if class association rules are mined, false otherwise */ public boolean getCar() { return m_car; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String carTipText() { return "If enabled class association rules are mined instead of (general) association rules."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lowerBoundMinSupportTipText() { return "Lower bound for minimum support."; } /** * Get the value of lowerBoundMinSupport. * * @return Value of lowerBoundMinSupport. */ public double getLowerBoundMinSupport() { return m_lowerBoundMinSupport; } /** * Set the value of lowerBoundMinSupport. * * @param v Value to assign to lowerBoundMinSupport. */ public void setLowerBoundMinSupport(double v) { m_lowerBoundMinSupport = v; } /** * Get the metric type * * @return the type of metric to use for ranking rules */ public SelectedTag getMetricType() { return new SelectedTag(m_metricType, TAGS_SELECTION); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String metricTypeTipText() { return "Set the type of metric by which to rank rules. Confidence is " + "the proportion of the examples covered by the premise that are also " + "covered by the consequence (Class association rules can only be mined using confidence). Lift is confidence divided by the " + "proportion of all examples that are covered by the consequence. This " + "is a measure of the importance of the association that is independent " + "of support. Leverage is the proportion of additional examples covered " + "by both the premise and consequence above those expected if the " + "premise and consequence were independent of each other. The total " + "number of examples that this represents is presented in brackets " + "following the leverage. Conviction is " + "another measure of departure from independence. Conviction is given " + "by P(premise)P(!consequence) / P(premise, !consequence)."; } /** * Set the metric type for ranking rules * * @param d the type of metric */ public void setMetricType(SelectedTag d) { if (d.getTags() == TAGS_SELECTION) { m_metricType = d.getSelectedTag().getID(); } if (m_metricType == CONFIDENCE) { setMinMetric(0.9); } if (m_metricType == LIFT || m_metricType == CONVICTION) { setMinMetric(1.1); } if (m_metricType == LEVERAGE) { setMinMetric(0.1); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minMetricTipText() { return "Minimum metric score. Consider only rules with scores higher than " + "this value."; } /** * Get the value of minConfidence. * * @return Value of minConfidence. */ public double getMinMetric() { return m_minMetric; } /** * Set the value of minConfidence. * * @param v Value to assign to minConfidence. */ public void setMinMetric(double v) { m_minMetric = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numRulesTipText() { return "Number of rules to find."; } /** * Get the value of numRules. * * @return Value of numRules. */ public int getNumRules() { return m_numRules; } /** * Set the value of numRules. * * @param v Value to assign to numRules. */ public void setNumRules(int v) { m_numRules = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String deltaTipText() { return "Iteratively decrease support by this factor. Reduces support " + "until min support is reached or required number of rules has been " + "generated."; } /** * Get the value of delta. * * @return Value of delta. */ public double getDelta() { return m_delta; } /** * Set the value of delta. * * @param v Value to assign to delta. */ public void setDelta(double v) { m_delta = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String significanceLevelTipText() { return "Significance level. Significance test (confidence metric only)."; } /** * Get the value of significanceLevel. * * @return Value of significanceLevel. */ public double getSignificanceLevel() { return m_significanceLevel; } /** * Set the value of significanceLevel. * * @param v Value to assign to significanceLevel. */ public void setSignificanceLevel(double v) { m_significanceLevel = v; } /** * Sets whether itemsets are output as well * * @param flag true if itemsets are to be output as well */ public void setOutputItemSets(boolean flag) { m_outputItemSets = flag; } /** * Gets whether itemsets are output as well * * @return true if itemsets are output as well */ public boolean getOutputItemSets() { return m_outputItemSets; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String outputItemSetsTipText() { return "If enabled the itemsets are output as well."; } /** * Sets verbose mode * * @param flag true if algorithm should be run in verbose mode */ public void setVerbose(boolean flag) { m_verbose = flag; } /** * Gets whether algorithm is run in verbose mode * * @return true if algorithm is run in verbose mode */ public boolean getVerbose() { return m_verbose; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String verboseTipText() { return "If enabled the algorithm will be run in verbose mode."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String treatZeroAsMissingTipText() { return "If enabled, zero (that is, the first value of a nominal) is " + "treated in the same way as a missing value."; } /** * Sets whether zeros (i.e. the first value of a nominal attribute) should be * treated as missing values. * * @param z true if zeros should be treated as missing values. */ public void setTreatZeroAsMissing(boolean z) { m_treatZeroAsMissing = z; } /** * Gets whether zeros (i.e. the first value of a nominal attribute) is to be * treated int he same way as missing values. * * @return true if zeros are to be treated like missing values. */ public boolean getTreatZeroAsMissing() { return m_treatZeroAsMissing; } /** * Method that finds all large itemsets for the given set of instances. * * @throws Exception if an attribute is numeric */ private void findLargeItemSets() throws Exception { FastVector kMinusOneSets, kSets; Hashtable hashtable; int necSupport, necMaxSupport, i = 0; // Find large itemsets // minimum support necSupport = (int) (m_minSupport * m_instances.numInstances() + 0.5); necMaxSupport = (int) (m_upperBoundMinSupport * m_instances.numInstances() + 0.5); kSets = AprioriItemSet.singletons(m_instances, m_treatZeroAsMissing); if (m_treatZeroAsMissing) { AprioriItemSet.upDateCountersTreatZeroAsMissing(kSets, m_instances); } else { AprioriItemSet.upDateCounters(kSets, m_instances); } kSets = AprioriItemSet.deleteItemSets(kSets, necSupport, m_instances.numInstances()); if (kSets.size() == 0) return; do { m_Ls.addElement(kSets); kMinusOneSets = kSets; kSets = AprioriItemSet.mergeAllItemSets(kMinusOneSets, i, m_instances.numInstances()); hashtable = AprioriItemSet.getHashtable(kMinusOneSets, kMinusOneSets.size()); m_hashtables.addElement(hashtable); kSets = AprioriItemSet.pruneItemSets(kSets, hashtable); if (m_treatZeroAsMissing) { AprioriItemSet.upDateCountersTreatZeroAsMissing(kSets, m_instances); } else { AprioriItemSet.upDateCounters(kSets, m_instances); } kSets = AprioriItemSet.deleteItemSets(kSets, necSupport, m_instances.numInstances()); i++; } while (kSets.size() > 0); } /** * Method that finds all association rules and performs significance test. * * @throws Exception if an attribute is numeric */ private void findRulesBruteForce() throws Exception { FastVector[] rules; // Build rules for (int j = 1; j < m_Ls.size(); j++) { FastVector currentItemSets = (FastVector) m_Ls.elementAt(j); Enumeration enumItemSets = currentItemSets.elements(); while (enumItemSets.hasMoreElements()) { AprioriItemSet currentItemSet = (AprioriItemSet) enumItemSets .nextElement(); // AprioriItemSet currentItemSet = new // AprioriItemSet((ItemSet)enumItemSets.nextElement()); rules = currentItemSet.generateRulesBruteForce(m_minMetric, m_metricType, m_hashtables, j + 1, m_instances.numInstances(), m_significanceLevel); for (int k = 0; k < rules[0].size(); k++) { m_allTheRules[0].addElement(rules[0].elementAt(k)); m_allTheRules[1].addElement(rules[1].elementAt(k)); m_allTheRules[2].addElement(rules[2].elementAt(k)); m_allTheRules[3].addElement(rules[3].elementAt(k)); m_allTheRules[4].addElement(rules[4].elementAt(k)); m_allTheRules[5].addElement(rules[5].elementAt(k)); } } } } /** * Method that finds all association rules. * * @throws Exception if an attribute is numeric */ private void findRulesQuickly() throws Exception { FastVector[] rules; // Build rules for (int j = 1; j < m_Ls.size(); j++) { FastVector currentItemSets = (FastVector) m_Ls.elementAt(j); Enumeration enumItemSets = currentItemSets.elements(); while (enumItemSets.hasMoreElements()) { AprioriItemSet currentItemSet = (AprioriItemSet) enumItemSets .nextElement(); // AprioriItemSet currentItemSet = new // AprioriItemSet((ItemSet)enumItemSets.nextElement()); rules = currentItemSet.generateRules(m_minMetric, m_hashtables, j + 1); for (int k = 0; k < rules[0].size(); k++) { m_allTheRules[0].addElement(rules[0].elementAt(k)); m_allTheRules[1].addElement(rules[1].elementAt(k)); m_allTheRules[2].addElement(rules[2].elementAt(k)); if (rules.length > 3) { m_allTheRules[3].addElement(rules[3].elementAt(k)); m_allTheRules[4].addElement(rules[4].elementAt(k)); m_allTheRules[5].addElement(rules[5].elementAt(k)); } } } } } /** * * Method that finds all large itemsets for class association rules for the * given set of instances. * * @throws Exception if an attribute is numeric */ private void findLargeCarItemSets() throws Exception { FastVector kMinusOneSets, kSets; Hashtable hashtable; int necSupport, necMaxSupport, i = 0; // Find large itemsets // minimum support double nextMinSupport = m_minSupport * m_instances.numInstances(); double nextMaxSupport = m_upperBoundMinSupport * m_instances.numInstances(); if (Math.rint(nextMinSupport) == nextMinSupport) { necSupport = (int) nextMinSupport; } else { necSupport = Math.round((float) (nextMinSupport + 0.5)); } if (Math.rint(nextMaxSupport) == nextMaxSupport) { necMaxSupport = (int) nextMaxSupport; } else { necMaxSupport = Math.round((float) (nextMaxSupport + 0.5)); } // find item sets of length one kSets = LabeledItemSet.singletons(m_instances, m_onlyClass); LabeledItemSet.upDateCounters(kSets, m_instances, m_onlyClass); // check if a item set of lentgh one is frequent, if not delete it kSets = LabeledItemSet.deleteItemSets(kSets, necSupport, m_instances.numInstances()); if (kSets.size() == 0) return; do { m_Ls.addElement(kSets); kMinusOneSets = kSets; kSets = LabeledItemSet.mergeAllItemSets(kMinusOneSets, i, m_instances.numInstances()); hashtable = LabeledItemSet.getHashtable(kMinusOneSets, kMinusOneSets.size()); kSets = LabeledItemSet.pruneItemSets(kSets, hashtable); LabeledItemSet.upDateCounters(kSets, m_instances, m_onlyClass); kSets = LabeledItemSet.deleteItemSets(kSets, necSupport, m_instances.numInstances()); i++; } while (kSets.size() > 0); } /** * Method that finds all class association rules. * * @throws Exception if an attribute is numeric */ private void findCarRulesQuickly() throws Exception { FastVector[] rules; // Build rules for (int j = 0; j < m_Ls.size(); j++) { FastVector currentLabeledItemSets = (FastVector) m_Ls.elementAt(j); Enumeration enumLabeledItemSets = currentLabeledItemSets.elements(); while (enumLabeledItemSets.hasMoreElements()) { LabeledItemSet currentLabeledItemSet = (LabeledItemSet) enumLabeledItemSets .nextElement(); rules = currentLabeledItemSet.generateRules(m_minMetric, false); for (int k = 0; k < rules[0].size(); k++) { m_allTheRules[0].addElement(rules[0].elementAt(k)); m_allTheRules[1].addElement(rules[1].elementAt(k)); m_allTheRules[2].addElement(rules[2].elementAt(k)); } } } } /** * returns all the rules * * @return all the rules * @see #m_allTheRules */ public FastVector[] getAllTheRules() { return m_allTheRules; } @Override public AssociationRules getAssociationRules() { List<AssociationRule> rules = new ArrayList<AssociationRule>(); if (m_allTheRules != null && m_allTheRules.length > 3) { for (int i = 0; i < m_allTheRules[0].size(); i++) { // Construct the Lists for the premise and consequence List<Item> premise = new ArrayList<Item>(); List<Item> consequence = new ArrayList<Item>(); AprioriItemSet premiseSet = (AprioriItemSet) m_allTheRules[0].get(i); AprioriItemSet consequenceSet = (AprioriItemSet) m_allTheRules[1] .get(i); for (int j = 0; j < m_instances.numAttributes(); j++) { if (premiseSet.m_items[j] != -1) { try { Item newItem = new NominalItem(m_instances.attribute(j), premiseSet.m_items[j]); premise.add(newItem); } catch (Exception ex) { ex.printStackTrace(); } } if (consequenceSet.m_items[j] != -1) { try { Item newItem = new NominalItem(m_instances.attribute(j), consequenceSet.m_items[j]); consequence.add(newItem); } catch (Exception ex) { ex.printStackTrace(); } } } // get the constituents of the metrics int totalTrans = premiseSet.m_totalTransactions; int totalSupport = consequenceSet.m_counter; int premiseSupport = premiseSet.m_counter; // reconstruct consequenceSupport using Lift: double lift = ((Double) m_allTheRules[3].get(i)).doubleValue(); double conf = ((Double) m_allTheRules[2].get(i)).doubleValue(); int consequenceSupport = (int) ((totalTrans * conf) / lift); // map the primary metric DefaultAssociationRule.METRIC_TYPE metric = null; switch (m_metricType) { case CONFIDENCE: metric = DefaultAssociationRule.METRIC_TYPE.CONFIDENCE; break; case LIFT: metric = DefaultAssociationRule.METRIC_TYPE.LIFT; break; case LEVERAGE: metric = DefaultAssociationRule.METRIC_TYPE.LEVERAGE; break; case CONVICTION: metric = DefaultAssociationRule.METRIC_TYPE.CONVICTION; break; } DefaultAssociationRule newRule = new DefaultAssociationRule(premise, consequence, metric, premiseSupport, consequenceSupport, totalSupport, totalTrans); rules.add(newRule); } } return new AssociationRules(rules, this); } /** * Gets a list of the names of the metrics output for each rule. This list * should be the same (in terms of the names and order thereof) as that * produced by AssociationRule.getMetricNamesForRule(). * * @return an array of the names of the metrics available for each rule * learned by this producer. */ @Override public String[] getRuleMetricNames() { String[] metricNames = new String[DefaultAssociationRule.TAGS_SELECTION.length]; for (int i = 0; i < DefaultAssociationRule.TAGS_SELECTION.length; i++) { metricNames[i] = DefaultAssociationRule.TAGS_SELECTION[i].getReadable(); } return metricNames; } /** * Returns true if this AssociationRulesProducer can actually produce rules. * Most implementing classes will always return true from this method * (obviously :-)). However, an implementing class that actually acts as a * wrapper around things that may or may not implement * AssociationRulesProducer will want to return false if the thing they wrap * can't produce rules. * * @return true if this producer can produce rules in its current * configuration */ @Override public boolean canProduceRules() { return true; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9722 $"); } /** * Main method. * * @param args the commandline options */ public static void main(String[] args) { runAssociator(new Apriori(), args); } }
62,894
30.894016
442
java
tsml-java
tsml-java-master/src/main/java/weka/associations/AprioriItemSet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AprioriItemSet.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import java.util.Enumeration; import java.util.Hashtable; import weka.core.ContingencyTables; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Class for storing a set of items. Item sets are stored in a lexicographic * order, which is determined by the header information of the set of instances * used for generating the set of items. All methods in this class assume that * item sets are stored in lexicographic order. The class provides methods that * are used in the Apriori algorithm to construct association rules. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 9722 $ */ public class AprioriItemSet extends ItemSet implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 7684467755712672058L; /** * Constructor * * @param totalTrans the total number of transactions in the data */ public AprioriItemSet(int totalTrans) { super(totalTrans); } /** * Outputs the confidence for a rule. * * @param premise the premise of the rule * @param consequence the consequence of the rule * @return the confidence on the training data */ public static double confidenceForRule(AprioriItemSet premise, AprioriItemSet consequence) { return (double) consequence.m_counter / (double) premise.m_counter; } /** * Outputs the lift for a rule. Lift is defined as:<br> * confidence / prob(consequence) * * @param premise the premise of the rule * @param consequence the consequence of the rule * @param consequenceCount how many times the consequence occurs independent * of the premise * @return the lift on the training data */ public double liftForRule(AprioriItemSet premise, AprioriItemSet consequence, int consequenceCount) { double confidence = confidenceForRule(premise, consequence); return confidence / ((double) consequenceCount / (double) m_totalTransactions); } /** * Outputs the leverage for a rule. Leverage is defined as: <br> * prob(premise & consequence) - (prob(premise) * prob(consequence)) * * @param premise the premise of the rule * @param consequence the consequence of the rule * @param premiseCount how many times the premise occurs independent of the * consequent * @param consequenceCount how many times the consequence occurs independent * of the premise * @return the leverage on the training data */ public double leverageForRule(AprioriItemSet premise, AprioriItemSet consequence, int premiseCount, int consequenceCount) { double coverageForItemSet = (double) consequence.m_counter / (double) m_totalTransactions; double expectedCoverageIfIndependent = ((double) premiseCount / (double) m_totalTransactions) * ((double) consequenceCount / (double) m_totalTransactions); double lev = coverageForItemSet - expectedCoverageIfIndependent; return lev; } /** * Outputs the conviction for a rule. Conviction is defined as: <br> * prob(premise) * prob(!consequence) / prob(premise & !consequence) * * @param premise the premise of the rule * @param consequence the consequence of the rule * @param premiseCount how many times the premise occurs independent of the * consequent * @param consequenceCount how many times the consequence occurs independent * of the premise * @return the conviction on the training data */ public double convictionForRule(AprioriItemSet premise, AprioriItemSet consequence, int premiseCount, int consequenceCount) { double num = (double) premiseCount * (double) (m_totalTransactions - consequenceCount) / m_totalTransactions; double denom = ((premiseCount - consequence.m_counter) + 1); if (num < 0 || denom < 0) { System.err.println("*** " + num + " " + denom); System.err.println("premis count: " + premiseCount + " consequence count " + consequenceCount + " total trans " + m_totalTransactions); } return num / denom; } /** * Generates all rules for an item set. * * @param minConfidence the minimum confidence the rules have to have * @param hashtables containing all(!) previously generated item sets * @param numItemsInSet the size of the item set for which the rules are to be * generated * @return all the rules with minimum confidence for the given item set */ public FastVector[] generateRules(double minConfidence, FastVector hashtables, int numItemsInSet) { FastVector premises = new FastVector(), consequences = new FastVector(), conf = new FastVector(); // TODO FastVector lift = new FastVector(), lev = new FastVector(), conv = new FastVector(); // TODO FastVector[] rules = new FastVector[6], moreResults; AprioriItemSet premise, consequence; Hashtable hashtable = (Hashtable) hashtables.elementAt(numItemsInSet - 2); // Generate all rules with one item in the consequence. for (int i = 0; i < m_items.length; i++) if (m_items[i] != -1) { premise = new AprioriItemSet(m_totalTransactions); consequence = new AprioriItemSet(m_totalTransactions); premise.m_items = new int[m_items.length]; consequence.m_items = new int[m_items.length]; consequence.m_counter = m_counter; for (int j = 0; j < m_items.length; j++) consequence.m_items[j] = -1; System.arraycopy(m_items, 0, premise.m_items, 0, m_items.length); premise.m_items[i] = -1; consequence.m_items[i] = m_items[i]; premise.m_counter = ((Integer) hashtable.get(premise)).intValue(); Hashtable hashtableForConsequence = (Hashtable) hashtables.elementAt(0); int consequenceUnconditionedCounter = ((Integer) hashtableForConsequence .get(consequence)).intValue(); premises.addElement(premise); consequences.addElement(consequence); conf.addElement(new Double(confidenceForRule(premise, consequence))); double tempLift = liftForRule(premise, consequence, consequenceUnconditionedCounter); double tempLev = leverageForRule(premise, consequence, premise.m_counter, consequenceUnconditionedCounter); double tempConv = convictionForRule(premise, consequence, premise.m_counter, consequenceUnconditionedCounter); lift.addElement(new Double(tempLift)); lev.addElement(new Double(tempLev)); conv.addElement(new Double(tempConv)); } rules[0] = premises; rules[1] = consequences; rules[2] = conf; rules[3] = lift; rules[4] = lev; rules[5] = conv; pruneRules(rules, minConfidence); // Generate all the other rules moreResults = moreComplexRules(rules, numItemsInSet, 1, minConfidence, hashtables); if (moreResults != null) for (int i = 0; i < moreResults[0].size(); i++) { rules[0].addElement(moreResults[0].elementAt(i)); rules[1].addElement(moreResults[1].elementAt(i)); rules[2].addElement(moreResults[2].elementAt(i)); // TODO rules[3].addElement(moreResults[3].elementAt(i)); rules[4].addElement(moreResults[4].elementAt(i)); rules[5].addElement(moreResults[5].elementAt(i)); } return rules; } /** * Generates all significant rules for an item set. * * @param minMetric the minimum metric (confidence, lift, leverage, * improvement) the rules have to have * @param metricType (confidence=0, lift, leverage, improvement) * @param hashtables containing all(!) previously generated item sets * @param numItemsInSet the size of the item set for which the rules are to be * generated * @param numTransactions * @param significanceLevel the significance level for testing the rules * @return all the rules with minimum metric for the given item set * @exception Exception if something goes wrong */ public final FastVector[] generateRulesBruteForce(double minMetric, int metricType, FastVector hashtables, int numItemsInSet, int numTransactions, double significanceLevel) throws Exception { FastVector premises = new FastVector(), consequences = new FastVector(), conf = new FastVector(), lift = new FastVector(), lev = new FastVector(), conv = new FastVector(); FastVector[] rules = new FastVector[6]; AprioriItemSet premise, consequence; Hashtable hashtableForPremise, hashtableForConsequence; int numItemsInPremise, help, max, consequenceUnconditionedCounter; double[][] contingencyTable = new double[2][2]; double metric, chiSquared = 0; // Generate all possible rules for this item set and test their // significance. max = (int) Math.pow(2, numItemsInSet); for (int j = 1; j < max; j++) { numItemsInPremise = 0; help = j; while (help > 0) { if (help % 2 == 1) numItemsInPremise++; help /= 2; } if (numItemsInPremise < numItemsInSet) { hashtableForPremise = (Hashtable) hashtables .elementAt(numItemsInPremise - 1); hashtableForConsequence = (Hashtable) hashtables .elementAt(numItemsInSet - numItemsInPremise - 1); premise = new AprioriItemSet(m_totalTransactions); consequence = new AprioriItemSet(m_totalTransactions); premise.m_items = new int[m_items.length]; consequence.m_items = new int[m_items.length]; consequence.m_counter = m_counter; help = j; for (int i = 0; i < m_items.length; i++) if (m_items[i] != -1) { if (help % 2 == 1) { premise.m_items[i] = m_items[i]; consequence.m_items[i] = -1; } else { premise.m_items[i] = -1; consequence.m_items[i] = m_items[i]; } help /= 2; } else { premise.m_items[i] = -1; consequence.m_items[i] = -1; } premise.m_counter = ((Integer) hashtableForPremise.get(premise)) .intValue(); consequenceUnconditionedCounter = ((Integer) hashtableForConsequence .get(consequence)).intValue(); if (significanceLevel != -1) { contingencyTable[0][0] = (consequence.m_counter); contingencyTable[0][1] = (premise.m_counter - consequence.m_counter); contingencyTable[1][0] = (consequenceUnconditionedCounter - consequence.m_counter); contingencyTable[1][1] = (numTransactions - premise.m_counter - consequenceUnconditionedCounter + consequence.m_counter); chiSquared = ContingencyTables.chiSquared(contingencyTable, false); } if (metricType == 0) { metric = confidenceForRule(premise, consequence); if ((!(metric < minMetric)) && (significanceLevel == -1 || !(chiSquared > significanceLevel))) { premises.addElement(premise); consequences.addElement(consequence); conf.addElement(new Double(metric)); lift.addElement(new Double(liftForRule(premise, consequence, consequenceUnconditionedCounter))); lev.addElement(new Double(leverageForRule(premise, consequence, premise.m_counter, consequenceUnconditionedCounter))); conv.addElement(new Double(convictionForRule(premise, consequence, premise.m_counter, consequenceUnconditionedCounter))); } } else { double tempConf = confidenceForRule(premise, consequence); double tempLift = liftForRule(premise, consequence, consequenceUnconditionedCounter); double tempLev = leverageForRule(premise, consequence, premise.m_counter, consequenceUnconditionedCounter); double tempConv = convictionForRule(premise, consequence, premise.m_counter, consequenceUnconditionedCounter); switch (metricType) { case 1: metric = tempLift; break; case 2: metric = tempLev; break; case 3: metric = tempConv; break; default: throw new Exception("ItemSet: Unknown metric type!"); } if (!(metric < minMetric) && (significanceLevel == -1 || !(chiSquared > significanceLevel))) { premises.addElement(premise); consequences.addElement(consequence); conf.addElement(new Double(tempConf)); lift.addElement(new Double(tempLift)); lev.addElement(new Double(tempLev)); conv.addElement(new Double(tempConv)); } } } } rules[0] = premises; rules[1] = consequences; rules[2] = conf; rules[3] = lift; rules[4] = lev; rules[5] = conv; return rules; } /** * Subtracts an item set from another one. * * @param toSubtract the item set to be subtracted from this one. * @return an item set that only contains items form this item sets that are * not contained by toSubtract */ public final AprioriItemSet subtract(AprioriItemSet toSubtract) { AprioriItemSet result = new AprioriItemSet(m_totalTransactions); result.m_items = new int[m_items.length]; for (int i = 0; i < m_items.length; i++) if (toSubtract.m_items[i] == -1) result.m_items[i] = m_items[i]; else result.m_items[i] = -1; result.m_counter = 0; return result; } /** * Generates rules with more than one item in the consequence. * * @param rules all the rules having (k-1)-item sets as consequences * @param numItemsInSet the size of the item set for which the rules are to be * generated * @param numItemsInConsequence the value of (k-1) * @param minConfidence the minimum confidence a rule has to have * @param hashtables the hashtables containing all(!) previously generated * item sets * @return all the rules having (k)-item sets as consequences */ private final FastVector[] moreComplexRules(FastVector[] rules, int numItemsInSet, int numItemsInConsequence, double minConfidence, FastVector hashtables) { AprioriItemSet newPremise; FastVector[] result, moreResults; FastVector newConsequences, newPremises = new FastVector(), newConf = new FastVector(); Hashtable hashtable; FastVector newLift = null, newLev = null, newConv = null; // if (rules.length > 3) { newLift = new FastVector(); newLev = new FastVector(); newConv = new FastVector(); // } if (numItemsInSet > numItemsInConsequence + 1) { hashtable = (Hashtable) hashtables.elementAt(numItemsInSet - numItemsInConsequence - 2); newConsequences = mergeAllItemSets(rules[1], numItemsInConsequence - 1, m_totalTransactions); int newNumInConsequence = numItemsInConsequence + 1; Hashtable hashtableForConsequence = (Hashtable) hashtables .elementAt(newNumInConsequence - 1); Enumeration enu = newConsequences.elements(); while (enu.hasMoreElements()) { AprioriItemSet current = (AprioriItemSet) enu.nextElement(); int z = 0; for (int jj = 0; jj < current.m_items.length; jj++) { if (current.m_items[jj] != -1) { z++; } } current.m_counter = m_counter; newPremise = subtract(current); newPremise.m_counter = ((Integer) hashtable.get(newPremise)).intValue(); newPremises.addElement(newPremise); newConf.addElement(new Double(confidenceForRule(newPremise, current))); // if (rules.length > 3) { int consequenceUnconditionedCounter = ((Integer) hashtableForConsequence .get(current)).intValue(); double tempLift = liftForRule(newPremise, current, consequenceUnconditionedCounter); double tempLev = leverageForRule(newPremise, current, newPremise.m_counter, consequenceUnconditionedCounter); double tempConv = convictionForRule(newPremise, current, newPremise.m_counter, consequenceUnconditionedCounter); newLift.addElement(new Double(tempLift)); newLev.addElement(new Double(tempLev)); newConv.addElement(new Double(tempConv)); // } } result = new FastVector[rules.length]; result[0] = newPremises; result[1] = newConsequences; result[2] = newConf; // if (rules.length > 3) { result[3] = newLift; result[4] = newLev; result[5] = newConv; // } pruneRules(result, minConfidence); moreResults = moreComplexRules(result, numItemsInSet, numItemsInConsequence + 1, minConfidence, hashtables); if (moreResults != null) for (int i = 0; i < moreResults[0].size(); i++) { result[0].addElement(moreResults[0].elementAt(i)); result[1].addElement(moreResults[1].elementAt(i)); result[2].addElement(moreResults[2].elementAt(i)); // result[3].addElement(moreResults[3].elementAt(i)); result[4].addElement(moreResults[4].elementAt(i)); result[5].addElement(moreResults[5].elementAt(i)); } return result; } else return null; } /** * Returns the contents of an item set as a string. * * @param instances contains the relevant header information * @return string describing the item set */ @Override public final String toString(Instances instances) { return super.toString(instances); } /** * Converts the header info of the given set of instances into a set of item * sets (singletons). The ordering of values in the header file determines the * lexicographic order. * * @param instances the set of instances whose header info is to be used * @return a set of item sets, each containing a single item * @exception Exception if singletons can't be generated successfully */ public static FastVector singletons(Instances instances, boolean treatZeroAsMissing) throws Exception { FastVector setOfItemSets = new FastVector(); ItemSet current; for (int i = 0; i < instances.numAttributes(); i++) { if (instances.attribute(i).isNumeric()) throw new Exception("Can't handle numeric attributes!"); int j = (treatZeroAsMissing) ? 1 : 0; for (; j < instances.attribute(i).numValues(); j++) { current = new AprioriItemSet(instances.numInstances()); current.m_items = new int[instances.numAttributes()]; for (int k = 0; k < instances.numAttributes(); k++) current.m_items[k] = -1; current.m_items[i] = j; setOfItemSets.addElement(current); } } return setOfItemSets; } /** * Merges all item sets in the set of (k-1)-item sets to create the (k)-item * sets and updates the counters. * * @param itemSets the set of (k-1)-item sets * @param size the value of (k-1) * @param totalTrans the total number of transactions in the data * @return the generated (k)-item sets */ public static FastVector mergeAllItemSets(FastVector itemSets, int size, int totalTrans) { FastVector newVector = new FastVector(); ItemSet result; int numFound, k; for (int i = 0; i < itemSets.size(); i++) { ItemSet first = (ItemSet) itemSets.elementAt(i); out: for (int j = i + 1; j < itemSets.size(); j++) { ItemSet second = (ItemSet) itemSets.elementAt(j); result = new AprioriItemSet(totalTrans); result.m_items = new int[first.m_items.length]; // Find and copy common prefix of size 'size' numFound = 0; k = 0; while (numFound < size) { if (first.m_items[k] == second.m_items[k]) { if (first.m_items[k] != -1) numFound++; result.m_items[k] = first.m_items[k]; } else break out; k++; } // Check difference while (k < first.m_items.length) { if ((first.m_items[k] != -1) && (second.m_items[k] != -1)) break; else { if (first.m_items[k] != -1) result.m_items[k] = first.m_items[k]; else result.m_items[k] = second.m_items[k]; } k++; } if (k == first.m_items.length) { result.m_counter = 0; newVector.addElement(result); } } } return newVector; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9722 $"); } }
21,863
36.183673
175
java
tsml-java
tsml-java-master/src/main/java/weka/associations/AssociationRule.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AssociationRule.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.util.ArrayList; import java.util.Collection; /** * Abstract class for storing and manipulating an association rule. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com). * @version $Revision: 8034 $ */ public abstract class AssociationRule implements Comparable<AssociationRule> { /** * Get the premise of this rule. * * @return the premise of this rule. */ public abstract Collection<Item> getPremise(); /** * Get the consequence of this rule. * * @return the consequence of this rule. */ public abstract Collection<Item> getConsequence(); /** * Get the name of the primary metric of this rule (e.g. confidence). * * @return the name of the primary metric of this rule. */ public abstract String getPrimaryMetricName(); /** * Get the value of the metric for this rule. * * @return the value of the metric for this rule. */ public abstract double getPrimaryMetricValue(); /** * Get the value of the named metric for this rule * * @param metricName the metric to get the value for * @return the value of the named metric * @throws Exception if the requested metric is unknown for this rule */ public abstract double getNamedMetricValue(String metricName) throws Exception; /** * Gets the number of metrics available for this rule. * * @return the number of metrics available for this rule */ public abstract int getNumberOfMetricsForRule(); /** * Return the names of the metrics available for this rule. * * @return the names of the metrics that are available for this rule. */ public abstract String[] getMetricNamesForRule(); /** * Get all the available metric values for this rule. Values are * returned in an array with entries that correspond to the metric * names returned by getMetricNamesForRule(). * * @return all the available metrics for this rule. * @throws Exception if a metric can't be computed for some reason. */ public abstract double[] getMetricValuesForRule() throws Exception; /** * Get the support for the premise. * * @return the support for the premise. */ public abstract int getPremiseSupport(); /** * Get the support for the consequence. * * @return the support for the consequence. */ public abstract int getConsequenceSupport(); /** * Get the total support for this rule (premise + consequence). * * @return the total support for this rule. */ public abstract int getTotalSupport(); /** * Get the total number of transactions in the data. * * @return the total number of transactions in the data. */ public abstract int getTotalTransactions(); /** * Compare this rule to the supplied rule. * * @param other the rule to compare to. * @return the result of the comparison. */ public int compareTo(AssociationRule other) { return -Double.compare(getPrimaryMetricValue(), other.getPrimaryMetricValue()); } /** * Return true if this rule is equal to the supplied one. * * @return true if this rule is the same as the supplied rule. */ public boolean equals(Object other) { if (!(other instanceof AssociationRule)) { return false; } AssociationRule otherRule = (AssociationRule)other; boolean result = getPremise().equals(otherRule.getPremise()) && getConsequence().equals(otherRule.getConsequence()) && (getPrimaryMetricValue() == otherRule.getPrimaryMetricValue()); return result; } public boolean containsItems(ArrayList<Item> items, boolean useOr) { int numItems = items.size(); int count = 0; for (Item i : getPremise()) { if (items.contains(i)) { if (useOr) { return true; // can stop here } else { count++; } } } for (Item i : getConsequence()) { if (items.contains(i)) { if (useOr) { return true; // can stop here } else { count++; } } } if (!useOr) { if (count == numItems) { return true; } } return false; } }
5,008
26.075676
83
java
tsml-java
tsml-java-master/src/main/java/weka/associations/AssociationRules.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AssociationRules.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import java.util.List; import weka.core.OptionHandler; import weka.core.Utils; /** * Class encapsulating a list of association rules. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public class AssociationRules implements Serializable { /** For serialization */ private static final long serialVersionUID = 8889198755948056749L; /** The scheme that produced these rules */ protected String m_producer = "Unknown"; /** The list of rules */ protected List<AssociationRule> m_rules; /** * Constructs a new AssociationRules. * * @param rules the list of rules. * @param producer a string describing the scheme that produced these rules. */ public AssociationRules(List<AssociationRule> rules, String producer) { m_rules = rules; m_producer = producer; } /** * Constructs a new AssociationRules. * * @param rules the list of rules. * @param producer the scheme that produced the rules. */ public AssociationRules(List<AssociationRule> rules, Object producer) { String producerString = producer.getClass().getName(); if (producerString.startsWith("weka.associations.")) { producerString = producerString.substring("weka.associations.".length()); } if (producer instanceof OptionHandler) { String [] o = ((OptionHandler) producer).getOptions(); producerString += " " + Utils.joinOptions(o); } m_rules = rules; m_producer = producerString; } /** * Constructs a new AssociationRules. * * @param rules the list of rules. */ public AssociationRules(List<AssociationRule> rules) { this(rules, "Unknown"); } /** * Set the rules to use. * * @param rules the rules to use. */ public void setRules(List<AssociationRule> rules) { m_rules = rules; } /** * Get the rules. * * @return the rules. */ public List<AssociationRule> getRules() { return m_rules; } /** * Get the number of rules. * * @return the number of rules. */ public int getNumRules() { return m_rules.size(); } /** * Set a textual description of the scheme that produced * these rules. * * @param producer a textual description of the scheme that produced * these rules. */ public void setProducer(String producer) { m_producer = producer; } /** * Get a string describing the scheme that produced these rules. * * @return producer */ public String getProducer() { return m_producer; } }
3,428
24.4
79
java
tsml-java
tsml-java-master/src/main/java/weka/associations/AssociationRulesProducer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AssociationRulesProducer.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; /** * Interface to something that can provide a list of * AssociationRules. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public interface AssociationRulesProducer { /** * Gets the list of mined association rules. * * @return the list of association rules discovered during mining. * Returns null if mining hasn't been performed yet. */ AssociationRules getAssociationRules(); /** * Gets a list of the names of the metrics output for * each rule. This list should be the same (in terms of * the names and order thereof) as that produced by * AssociationRule.getMetricNamesForRule(). * * @return an array of the names of the metrics available * for each rule learned by this producer. */ String[] getRuleMetricNames(); /** * Returns true if this AssociationRulesProducer can actually * produce rules. Most implementing classes will always return * true from this method (obviously :-)). However, an implementing * class that actually acts as a wrapper around things that may * or may not implement AssociationRulesProducer will want to * return false if the thing they wrap can't produce rules. * * @return true if this producer can produce rules in its current * configuration */ boolean canProduceRules(); }
2,173
31.939394
74
java
tsml-java
tsml-java-master/src/main/java/weka/associations/Associator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Associator.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.Capabilities; import weka.core.Instances; public interface Associator { /** * Generates an associator. Must initialize all fields of the associator * that are not being set via options (ie. multiple calls of buildAssociator * must always lead to the same result). Must not change the dataset * in any way. * * @param data set of instances serving as training data * @exception Exception if the associator has not been * generated successfully */ void buildAssociations(Instances data) throws Exception; /** * Returns the Capabilities of this associator. Derived associators have to * override this method to enable capabilities. * * @return the capabilities of this object * @see Capabilities */ Capabilities getCapabilities(); }
1,640
32.489796
78
java
tsml-java
tsml-java-master/src/main/java/weka/associations/AssociatorEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AssociatorEvaluation.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.associations; import java.io.BufferedWriter; import java.io.FileWriter; import java.util.Enumeration; import weka.core.Drawable; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.converters.ConverterUtils.DataSource; /** * Class for evaluating Associaters. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class AssociatorEvaluation implements RevisionHandler { /** the result string */ protected StringBuffer m_Result; /** * default constructor */ public AssociatorEvaluation() { super(); m_Result = new StringBuffer(); } /** * Generates an option string to output on the commandline. * * @param associator the associator to generate the string for * @return the option string */ protected static String makeOptionString(Associator associator) { StringBuffer text; text = new StringBuffer(); // general options text.append("\nGeneral options:\n\n"); text.append("-t <training file>\n"); text.append("\tThe name of the training file.\n"); text.append("-g <name of graph file>\n"); text.append("\tOutputs the graph representation (if supported) of the associator to a file.\n"); // associator specific options, if any if (associator instanceof OptionHandler) { text.append( "\nOptions specific to " + associator.getClass().getName().replaceAll(".*\\.", "") + ":\n\n"); Enumeration enm = ((OptionHandler) associator).listOptions(); while (enm.hasMoreElements()) { Option option = (Option) enm.nextElement(); text.append(option.synopsis() + "\n"); text.append(option.description() + "\n"); } } return text.toString(); } /** * Evaluates an associator with the options given in an array of strings. * * @param associatorString class of associator as a string * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluate(String associatorString, String[] options) throws Exception { Associator associator; // Create associator try { associator = (Associator) Class.forName(associatorString).newInstance(); } catch (Exception e) { throw new Exception("Can't find class with name " + associatorString + '.'); } return evaluate(associator, options); } /** * Evaluates the associator with the given commandline options and returns * the evaluation string. * * @param associator the Associator to evaluate * @param options the commandline options * @return the generated output string * @throws Exception if evaluation fails */ public static String evaluate(Associator associator, String[] options) throws Exception { String trainFileString = ""; String graphFileName = ""; AssociatorEvaluation eval; DataSource loader; // help? if (Utils.getFlag('h', options)) throw new Exception("\nHelp requested.\n" + makeOptionString(associator)); try { // general options trainFileString = Utils.getOption('t', options); if (trainFileString.length() == 0) throw new Exception("No training file given!"); loader = new DataSource(trainFileString); graphFileName = Utils.getOption('g', options); // associator specific options if (associator instanceof OptionHandler) { ((OptionHandler) associator).setOptions(options); } // left-over options? Utils.checkForRemainingOptions(options); } catch (Exception e) { throw new Exception( "\nWeka exception: " + e.getMessage() + "\n" + makeOptionString(associator)); } // load file and build associations eval = new AssociatorEvaluation(); String results = eval.evaluate(associator, new Instances(loader.getDataSet())); // If associator is drawable output string describing graph if ((associator instanceof Drawable) && (graphFileName.length() != 0)) { BufferedWriter writer = new BufferedWriter(new FileWriter(graphFileName)); writer.write(((Drawable) associator).graph()); writer.newLine(); writer.flush(); writer.close(); } return results; } /** * Evaluates the associator with the given commandline options and returns * the evaluation string. * * @param associator the Associator to evaluate * @param data the data to run the associator with * @return the generated output string * @throws Exception if evaluation fails */ public String evaluate(Associator associator, Instances data) throws Exception { long startTime; long endTime; // build associations startTime = System.currentTimeMillis(); associator.buildAssociations(data); endTime = System.currentTimeMillis(); m_Result = new StringBuffer(associator.toString()); m_Result.append("\n=== Evaluation ===\n\n"); m_Result.append("Elapsed time: " + (((double) (endTime - startTime)) / 1000) + "s"); m_Result.append("\n"); return m_Result.toString(); } /** * Tests whether the current evaluation object is equal to another * evaluation object * * @param obj the object to compare against * @return true if the two objects are equal */ public boolean equals(Object obj) { if ((obj == null) || !(obj.getClass().equals(this.getClass()))) return false; AssociatorEvaluation cmp = (AssociatorEvaluation) obj; // TODO: better comparison??? String associatingResults1 = m_Result.toString().replaceAll("Elapsed time.*", ""); String associatingResults2 = cmp.m_Result.toString().replaceAll("Elapsed time.*", ""); if (!associatingResults1.equals(associatingResults2)) return false; return true; } /** * returns a summary string of the evaluation with a no title * * @return the summary string */ public String toSummaryString() { return toSummaryString(""); } /** * returns a summary string of the evaluation with a default title * * @param title the title to print before the result * @return the summary string */ public String toSummaryString(String title) { StringBuffer result; result = new StringBuffer(title); if (title.length() != 0) result.append("\n"); result.append(m_Result); return result.toString(); } /** * returns the current result * * @return the currently stored result * @see #toSummaryString() */ public String toString() { return toSummaryString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * A test method for this class. Just extracts the first command line * argument as an associator class name and calls evaluate. * * @param args an array of command line arguments, the first of which * must be the class name of an associator. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception( "The first argument must be the class name of a kernel"); } String associator = args[0]; args[0] = ""; System.out.println(evaluate(associator, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } } }
8,539
28.146758
100
java
tsml-java
tsml-java-master/src/main/java/weka/associations/BinaryItem.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BinaryItem.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import weka.core.Attribute; /** * Class that encapsulates an item whose backing Attribute is * binary or unary. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision; $ */ public class BinaryItem extends NominalItem implements Serializable { /** For serialization */ private static final long serialVersionUID = -3372941834914147669L; /** * Constructor. * * @param att the attribute that backs this item. * @param valueIndex the index of the value for this item. * @throws Exception if the backing attribute is not binary or unary. */ public BinaryItem(Attribute att, int valueIndex) throws Exception { super(att, valueIndex); if (att.isNumeric() || (att.isNominal() && att.numValues() > 2)) { throw new Exception("BinaryItem must be constructed using a nominal attribute" + " with at most 2 values!"); } } /** * Equals. Just compares attribute and valueIndex. * @return true if this BinaryItem is equal to the argument. */ public boolean equals(Object compareTo) { if (!(compareTo instanceof BinaryItem)) { return false; } BinaryItem b = (BinaryItem)compareTo; if (m_attribute.equals(b.getAttribute()) && // m_frequency == b.getFrequency() && m_valueIndex == b.getValueIndex()) { return true; } return false; } public int hashCode() { return (m_attribute.name().hashCode() ^ m_attribute.numValues()) * m_frequency; } }
2,472
29.530864
88
java
tsml-java
tsml-java-master/src/main/java/weka/associations/CARuleMiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CARuleMiner.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.FastVector; import weka.core.Instances; import weka.core.OptionHandler; /** * Interface for learning class association rules. All schemes for learning * class association rules implemement this interface. * * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface CARuleMiner extends OptionHandler { /** * Method for mining class association rules. * Must initialize all fields of the CARuleMiner that are not being set via options (ie. multiple calls of mineCARs * must always lead to the same result). Must not change the dataset in any way. * @param data the insatnces for which class association rules are mined * @throws Exception throws exception if class association rules cannot be mined * @return class association rules and their scoring metric in an FastVector array */ public FastVector[] mineCARs(Instances data) throws Exception; /** * Gets the instances without the class attribute * @return the instances withoput the class attribute */ public Instances getInstancesNoClass(); /** * Gets the class attribute and its values for all instances * @return the class attribute and its values for all instances */ public Instances getInstancesOnlyClass(); /** * Gets name of the scoring metric used for car mining * @return string containing the name of the scoring metric */ public String metricString(); /** * Sets the class index for the class association rule miner * @param index the class index */ public void setClassIndex(int index); }
2,421
32.638889
119
java
tsml-java
tsml-java-master/src/main/java/weka/associations/CaRuleGeneration.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * CaRuleGeneration.java * Copyright (C) 2004 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.Attribute; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.UnassignedClassException; import java.io.Serializable; import java.util.Hashtable; import java.util.TreeSet; /** * Class implementing the rule generation procedure of the predictive apriori algorithm for class association rules. * * For association rules in gerneral the method is described in: * T. Scheffer (2001). <i>Finding Association Rules That Trade Support * Optimally against Confidence</i>. Proc of the 5th European Conf. * on Principles and Practice of Knowledge Discovery in Databases (PKDD'01), * pp. 424-435. Freiburg, Germany: Springer-Verlag. <p> * * The implementation follows the paper expect for adding a rule to the output of the * <i>n</i> best rules. A rule is added if: * the expected predictive accuracy of this rule is among the <i>n</i> best and it is * not subsumed by a rule with at least the same expected predictive accuracy * (out of an unpublished manuscript from T. Scheffer). * * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 1.4 $ */ public class CaRuleGeneration extends RuleGeneration implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 3065752149646517703L; /** * Constructor * @param itemSet the item set that forms the premise of the rule */ public CaRuleGeneration(ItemSet itemSet){ super(itemSet); } /** * Generates all rules for an item set. The item set is the premise. * @param numRules the number of association rules the use wants to mine. * This number equals the size <i>n</i> of the list of the * best rules. * @param midPoints the mid points of the intervals * @param priors Hashtable that contains the prior probabilities * @param expectation the minimum value of the expected predictive accuracy * that is needed to get into the list of the best rules * @param instances the instances for which association rules are generated * @param best the list of the <i>n</i> best rules. * The list is implemented as a TreeSet * @param genTime the maximum time of generation * @return all the rules with minimum confidence for the given item set */ public TreeSet generateRules(int numRules, double[] midPoints, Hashtable priors, double expectation, Instances instances, TreeSet best, int genTime) { boolean redundant = false; FastVector consequences = new FastVector(); ItemSet premise; RuleItem current = null, old = null; Hashtable hashtable; m_change = false; m_midPoints = midPoints; m_priors = priors; m_best = best; m_expectation = expectation; m_count = genTime; m_instances = instances; //create rule body premise =null; premise = new ItemSet(m_totalTransactions); int[] premiseItems = new int[m_items.length]; System.arraycopy(m_items, 0, premiseItems, 0, m_items.length); premise.setItem(premiseItems); premise.setCounter(m_counter); consequences = singleConsequence(instances); //create n best rules do{ if(premise == null || consequences.size() == 0) return m_best; m_minRuleCount = 1; while(expectation((double)m_minRuleCount,premise.counter(),m_midPoints,m_priors) <= m_expectation){ m_minRuleCount++; if(m_minRuleCount > premise.counter()) return m_best; } redundant = false; //create possible heads FastVector allRuleItems = new FastVector(); int h = 0; while(h < consequences.size()){ RuleItem dummie = new RuleItem(); m_count++; current = dummie.generateRuleItem(premise,(ItemSet)consequences.elementAt(h),instances,m_count,m_minRuleCount,m_midPoints,m_priors); if(current != null) allRuleItems.addElement(current); h++; } //update best for(h =0; h< allRuleItems.size();h++){ current = (RuleItem)allRuleItems.elementAt(h); if(m_best.size() < numRules){ m_change =true; redundant = removeRedundant(current); } else{ m_expectation = ((RuleItem)(m_best.first())).accuracy(); if(current.accuracy() > m_expectation){ boolean remove = m_best.remove(m_best.first()); m_change = true; redundant = removeRedundant(current); m_expectation = ((RuleItem)(m_best.first())).accuracy(); while(expectation((double)m_minRuleCount, (current.premise()).counter(),m_midPoints,m_priors) < m_expectation){ m_minRuleCount++; if(m_minRuleCount > (current.premise()).counter()) break; } } } } }while(redundant); return m_best; } /** * Methods that decides whether or not rule a subsumes rule b. * The defintion of subsumption is: * Rule a subsumes rule b, if a subsumes b * AND * a has got least the same expected predictive accuracy as b. * @param a an association rule stored as a RuleItem * @param b an association rule stored as a RuleItem * @return true if rule a subsumes rule b or false otherwise. */ public static boolean aSubsumesB(RuleItem a, RuleItem b){ if(!a.consequence().equals(b.consequence())) return false; if(a.accuracy() < b.accuracy()) return false; for(int k = 0; k < ((a.premise()).items()).length;k++){ if((a.premise()).itemAt(k) != (b.premise()).itemAt(k)){ if(((a.premise()).itemAt(k) != -1 && (b.premise()).itemAt(k) != -1) || (b.premise()).itemAt(k) == -1) return false; } /*if(a.m_consequence.m_items[k] != b.m_consequence.m_items[k]){ if((a.m_consequence.m_items[k] != -1 && b.m_consequence.m_items[k] != -1) || a.m_consequence.m_items[k] == -1) return false; }*/ } return true; } /** * Converts the header info of the given set of instances into a set * of item sets (singletons). The ordering of values in the header file * determines the lexicographic order. * * @param instances the set of instances whose header info is to be used * @return a set of item sets, each containing a single item * @exception Exception if singletons can't be generated successfully */ public static FastVector singletons(Instances instances) throws Exception { FastVector setOfItemSets = new FastVector(); ItemSet current; if(instances.classIndex() == -1) throw new UnassignedClassException("Class index is negative (not set)!"); Attribute att = instances.classAttribute(); for (int i = 0; i < instances.numAttributes(); i++) { if (instances.attribute(i).isNumeric()) throw new Exception("Can't handle numeric attributes!"); if(i != instances.classIndex()){ for (int j = 0; j < instances.attribute(i).numValues(); j++) { current = new ItemSet(instances.numInstances()); int[] currentItems = new int[instances.numAttributes()]; for (int k = 0; k < instances.numAttributes(); k++) currentItems[k] = -1; currentItems[i] = j; current.setItem(currentItems); setOfItemSets.addElement(current); } } } return setOfItemSets; } /** * generates a consequence of length 1 for a class association rule. * @param instances the instances under consideration * @return FastVector with consequences of length 1 */ public static FastVector singleConsequence(Instances instances){ ItemSet consequence; FastVector consequences = new FastVector(); for (int j = 0; j < (instances.classAttribute()).numValues(); j++) { consequence = new ItemSet(instances.numInstances()); int[] consequenceItems = new int[instances.numAttributes()]; consequence.setItem(consequenceItems); for (int k = 0; k < instances.numAttributes(); k++) consequence.setItemAt(-1,k); consequence.setItemAt(j,instances.classIndex()); consequences.addElement(consequence); } return consequences; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
8,993
33.860465
152
java
tsml-java
tsml-java-master/src/main/java/weka/associations/CheckAssociator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckAssociator.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.Attribute; import weka.core.CheckScheme; import weka.core.FastVector; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.TestInstances; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** * Class for examining the capabilities and finding problems with * associators. If you implement an associators using the WEKA.libraries, * you should run the checks on it to ensure robustness and correct * operation. Passing all the tests of this object does not mean * bugs in the associators don't exist, but this will help find some * common ones. <p/> * * Typical usage: <p/> * <code>java weka.associations.CheckAssociator -W associator_name * -- associator_options </code><p/> * * CheckAssociator reports on the following: * <ul> * <li> Associator abilities * <ul> * <li> Possible command line options to the associators </li> * <li> Whether the associators can predict nominal, numeric, string, * date or relational class attributes. </li> * <li> Whether the associators can handle numeric predictor attributes </li> * <li> Whether the associators can handle nominal predictor attributes </li> * <li> Whether the associators can handle string predictor attributes </li> * <li> Whether the associators can handle date predictor attributes </li> * <li> Whether the associators can handle relational predictor attributes </li> * <li> Whether the associators can handle multi-instance data </li> * <li> Whether the associators can handle missing predictor values </li> * <li> Whether the associators can handle missing class values </li> * <li> Whether a nominal associators only handles 2 class problems </li> * <li> Whether the associators can handle instance weights </li> * </ul> * </li> * <li> Correct functioning * <ul> * <li> Correct initialisation during buildAssociations (i.e. no result * changes when buildAssociations called repeatedly) </li> * <li> Whether the associators alters the data pased to it * (number of instances, instance order, instance weights, etc) </li> * </ul> * </li> * <li> Degenerate cases * <ul> * <li> building associators with zero training instances </li> * <li> all but one predictor attribute values missing </li> * <li> all predictor attribute values missing </li> * <li> all but one class values missing </li> * <li> all class values missing </li> * </ul> * </li> * </ul> * Running CheckAssociator with the debug option set will output the * training dataset for any failed tests.<p/> * * The <code>weka.associations.AbstractAssociatorTest</code> uses this * class to test all the associators. Any changes here, have to be * checked in that abstract test class, too. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the associator analysed. * eg: weka.associations.Apriori * (default weka.associations.Apriori)</pre> * * <pre> * Options specific to associator weka.associations.Apriori: * </pre> * * <pre> -N &lt;required number of rules output&gt; * The required number of rules. (default = 10)</pre> * * <pre> -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric type by which to rank rules. (default = confidence)</pre> * * <pre> -C &lt;minimum metric score of a rule&gt; * The minimum confidence of a rule. (default = 0.9)</pre> * * <pre> -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05)</pre> * * <pre> -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0)</pre> * * <pre> -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1)</pre> * * <pre> -S &lt;significance level&gt; * If used, rules are tested for significance at * the given level. Slower. (default = no significance testing)</pre> * * <pre> -I * If set the itemsets found are also output. (default = no)</pre> * * <pre> -R * Remove columns that contain all missing values (default = no)</pre> * * <pre> -V * Report progress iteratively. (default = no)</pre> * * <pre> -A * If set class association rules are mined. (default = no)</pre> * * <pre> -c &lt;the class index&gt; * The class index. (default = last)</pre> * <!-- options-end --> * * Options after -- are passed to the designated associator.<p/> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see TestInstances */ public class CheckAssociator extends CheckScheme implements RevisionHandler { /* * Note about test methods: * - methods return array of booleans * - first index: success or not * - second index: acceptable or not (e.g., Exception is OK) * * FracPete (fracpete at waikato dot ac dot nz) */ /** a "dummy" class type */ public final static int NO_CLASS = -1; /*** The associator to be examined */ protected Associator m_Associator = new weka.associations.Apriori(); /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tFull name of the associator analysed.\n" +"\teg: weka.associations.Apriori\n" + "\t(default weka.associations.Apriori)", "W", 1, "-W")); if ((m_Associator != null) && (m_Associator instanceof OptionHandler)) { result.addElement(new Option("", "", 0, "\nOptions specific to associator " + m_Associator.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_Associator).listOptions(); while (enu.hasMoreElements()) result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the associator analysed. * eg: weka.associations.Apriori * (default weka.associations.Apriori)</pre> * * <pre> * Options specific to associator weka.associations.Apriori: * </pre> * * <pre> -N &lt;required number of rules output&gt; * The required number of rules. (default = 10)</pre> * * <pre> -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric type by which to rank rules. (default = confidence)</pre> * * <pre> -C &lt;minimum metric score of a rule&gt; * The minimum confidence of a rule. (default = 0.9)</pre> * * <pre> -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05)</pre> * * <pre> -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0)</pre> * * <pre> -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1)</pre> * * <pre> -S &lt;significance level&gt; * If used, rules are tested for significance at * the given level. Slower. (default = no significance testing)</pre> * * <pre> -I * If set the itemsets found are also output. (default = no)</pre> * * <pre> -R * Remove columns that contain all missing values (default = no)</pre> * * <pre> -V * Report progress iteratively. (default = no)</pre> * * <pre> -A * If set class association rules are mined. (default = no)</pre> * * <pre> -c &lt;the class index&gt; * The class index. (default = last)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) tmpStr = weka.associations.Apriori.class.getName(); setAssociator( (Associator) forName( "weka.associations", Associator.class, tmpStr, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckAssociator. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getAssociator() != null) { result.add("-W"); result.add(getAssociator().getClass().getName()); } if ((m_Associator != null) && (m_Associator instanceof OptionHandler)) options = ((OptionHandler) m_Associator).getOptions(); else options = new String[0]; if (options.length > 0) { result.add("--"); for (i = 0; i < options.length; i++) result.add(options[i]); } return (String[]) result.toArray(new String[result.size()]); } /** * Begin the tests, reporting results to System.out */ public void doTests() { if (getAssociator() == null) { println("\n=== No associator set ==="); return; } println("\n=== Check on Associator: " + getAssociator().getClass().getName() + " ===\n"); // Start tests m_ClasspathProblems = false; println("--> Checking for interfaces"); canTakeOptions(); boolean weightedInstancesHandler = weightedInstancesHandler()[0]; boolean multiInstanceHandler = multiInstanceHandler()[0]; println("--> Associator tests"); declaresSerialVersionUID(); println("--> no class attribute"); testsWithoutClass(weightedInstancesHandler, multiInstanceHandler); println("--> with class attribute"); testsPerClassType(Attribute.NOMINAL, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.NUMERIC, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.DATE, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.STRING, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.RELATIONAL, weightedInstancesHandler, multiInstanceHandler); } /** * Set the associator to test. * * @param newAssociator the Associator to use. */ public void setAssociator(Associator newAssociator) { m_Associator = newAssociator; } /** * Get the associator being tested * * @return the associator being tested */ public Associator getAssociator() { return m_Associator; } /** * Run a battery of tests for a given class attribute type * * @param classType true if the class attribute should be numeric * @param weighted true if the associator says it handles weights * @param multiInstance true if the associator is a multi-instance associator */ protected void testsPerClassType(int classType, boolean weighted, boolean multiInstance) { boolean PNom = canPredict(true, false, false, false, false, multiInstance, classType)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance, classType)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance, classType)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance, classType)[0]; boolean PRel; if (!multiInstance) PRel = canPredict(false, false, false, false, true, multiInstance, classType)[0]; else PRel = false; if (PNom || PNum || PStr || PDat || PRel) { if (weighted) instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); if (classType == Attribute.NOMINAL) canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4); if (!multiInstance) { canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 0); canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 1); } canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 20)[0]; if (handleMissingPredictors) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 100); boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 20)[0]; if (handleMissingClass) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 100); correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, handleMissingPredictors, handleMissingClass); } } /** * Run a battery of tests without a class * * @param weighted true if the associator says it handles weights * @param multiInstance true if the associator is a multi-instance associator */ protected void testsWithoutClass(boolean weighted, boolean multiInstance) { boolean PNom = canPredict(true, false, false, false, false, multiInstance, NO_CLASS)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance, NO_CLASS)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance, NO_CLASS)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance, NO_CLASS)[0]; boolean PRel; if (!multiInstance) PRel = canPredict(false, false, false, false, true, multiInstance, NO_CLASS)[0]; else PRel = false; if (PNom || PNum || PStr || PDat || PRel) { if (weighted) instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS); canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS, true, false, 20)[0]; if (handleMissingPredictors) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS, true, false, 100); correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, NO_CLASS, handleMissingPredictors, false); } } /** * Checks whether the scheme can take command line options. * * @return index 0 is true if the associator can take options */ protected boolean[] canTakeOptions() { boolean[] result = new boolean[2]; print("options..."); if (m_Associator instanceof OptionHandler) { println("yes"); if (m_Debug) { println("\n=== Full report ==="); Enumeration enu = ((OptionHandler)m_Associator).listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); print(option.synopsis() + "\n" + option.description() + "\n"); } println("\n"); } result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme says it can handle instance weights. * * @return true if the associator handles instance weights */ protected boolean[] weightedInstancesHandler() { boolean[] result = new boolean[2]; print("weighted instances associator..."); if (m_Associator instanceof WeightedInstancesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme handles multi-instance data. * * @return true if the associator handles multi-instance data */ protected boolean[] multiInstanceHandler() { boolean[] result = new boolean[2]; print("multi-instance associator..."); if (m_Associator instanceof MultiInstanceCapabilitiesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * tests for a serialVersionUID. Fails in case the scheme doesn't declare * a UID. * * @return index 0 is true if the scheme declares a UID */ protected boolean[] declaresSerialVersionUID() { boolean[] result = new boolean[2]; print("serialVersionUID..."); result[0] = !SerializationHelper.needsUID(m_Associator.getClass()); if (result[0]) println("yes"); else println("no"); return result; } /** * Checks basic prediction of the scheme, for simple non-troublesome * datasets. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canPredict( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("basic predict"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("any"); accepts.addElement("unary"); accepts.addElement("binary"); accepts.addElement("nominal"); accepts.addElement("numeric"); accepts.addElement("string"); accepts.addElement("date"); accepts.addElement("relational"); accepts.addElement("multi-instance"); accepts.addElement("not in classpath"); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether nominal schemes can handle more than two classes. * If a scheme is only designed for two-class problems it should * throw an appropriate exception for multi-class problems. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param numClasses the number of classes to test * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleNClasses( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int numClasses) { print("more than two class problems"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL); print("..."); FastVector accepts = new FastVector(); accepts.addElement("number"); accepts.addElement("class"); int numTrain = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle class attributes as Nth attribute. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class attribute (0-based, -1 means last attribute) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable * @see TestInstances#CLASS_IS_LAST */ protected boolean[] canHandleClassAsNthAttribute( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex) { if (classIndex == TestInstances.CLASS_IS_LAST) print("class attribute as last attribute"); else print("class attribute as " + (classIndex + 1) + ". attribute"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, classIndex, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle zero training instances. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleZeroTraining( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("handle zero training instances"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("train"); accepts.addElement("value"); int numTrain = 0, numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme correctly initialises models when * buildAssociations is called. This test calls buildAssociations with * one training dataset. buildAssociations is then called on a training * set with different structure, and then again with the original training * set. If the equals method of the AssociatorEvaluation class returns * false, this is noted as incorrect build initialisation. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed */ protected boolean[] correctBuildInitialisation( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { boolean[] result = new boolean[2]; print("correct initialisation during buildAssociations"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; Instances train1 = null; Instances train2 = null; Associator associator = null; AssociatorEvaluation evaluation1A = null; AssociatorEvaluation evaluation1B = null; AssociatorEvaluation evaluation2 = null; int stage = 0; try { // Make two train sets with different numbers of attributes train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); train2 = makeTestDataset(84, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() + 1 : 0, datePredictor ? getNumDate() + 1 : 0, relationalPredictor ? getNumRelational() + 1 : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train1, missingLevel, predictorMissing, classMissing); addMissing(train2, missingLevel, predictorMissing, classMissing); } associator = AbstractAssociator.makeCopies(getAssociator(), 1)[0]; evaluation1A = new AssociatorEvaluation(); evaluation1B = new AssociatorEvaluation(); evaluation2 = new AssociatorEvaluation(); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { stage = 0; evaluation1A.evaluate(associator, train1); stage = 1; evaluation2.evaluate(associator, train2); stage = 2; evaluation1B.evaluate(associator, train1); stage = 3; if (!evaluation1A.equals(evaluation1B)) { if (m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildAssociations()") + "\n\n"); println( evaluation1B.toSummaryString("\nSecond buildAssociations()") + "\n\n"); } throw new Exception("Results differ between buildAssociations calls"); } println("yes"); result[0] = true; if (false && m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildAssociations()") + "\n\n"); println( evaluation1B.toSummaryString("\nSecond buildAssociations()") + "\n\n"); } } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); switch (stage) { case 0: print(" of dataset 1"); break; case 1: print(" of dataset 2"); break; case 2: print(" of dataset 1 (2nd build)"); break; case 3: print(", comparing results from builds of dataset 1"); break; } println(": " + ex.getMessage() + "\n"); println("here are the datasets:\n"); println("=== Train1 Dataset ===\n" + train1.toString() + "\n"); println("=== Train2 Dataset ===\n" + train2.toString() + "\n"); } } return result; } /** * Checks basic missing value handling of the scheme. If the missing * values cause an exception to be thrown by the scheme, this will be * recorded. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param missingLevel the percentage of missing values * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleMissing( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing, int missingLevel) { if (missingLevel == 100) print("100% "); print("missing"); if (predictorMissing) { print(" predictor"); if (classMissing) print(" and"); } if (classMissing) print(" class"); print(" values"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("missing"); accepts.addElement("value"); accepts.addElement("train"); int numTrain = getNumInstances(), numClasses = 2; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the associator can handle instance weights. * This test compares the associator performance on two datasets * that are identical except for the training weights. If the * results change, then the associator must be using the weights. It * may be possible to get a false positive from this test if the * weight changes aren't significant enough to induce a change * in associator performance (but the weights are chosen to minimize * the likelihood of this). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 true if the test was passed */ protected boolean[] instanceWeights( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("associator uses instance weights"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2*getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Associator [] associators = null; AssociatorEvaluation evaluationB = null; AssociatorEvaluation evaluationI = null; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); associators = AbstractAssociator.makeCopies(getAssociator(), 2); evaluationB = new AssociatorEvaluation(); evaluationI = new AssociatorEvaluation(); evaluationB.evaluate(associators[0], train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now modify instance weights and re-built/test for (int i = 0; i < train.numInstances(); i++) { train.instance(i).setWeight(0); } Random random = new Random(1); for (int i = 0; i < train.numInstances() / 2; i++) { int inst = Math.abs(random.nextInt()) % train.numInstances(); int weight = Math.abs(random.nextInt()) % 10 + 1; train.instance(inst).setWeight(weight); } evaluationI.evaluate(associators[1], train); if (evaluationB.equals(evaluationI)) { // println("no"); evalFail = true; throw new Exception("evalFail"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results don't differ between non-weighted and " + "weighted instance models."); println("Here are the results:\n"); println(evaluationB.toSummaryString("\nboth methods\n")); } else { print("Problem during building"); println(": " + ex.getMessage() + "\n"); } println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } } } return result; } /** * Checks whether the scheme alters the training dataset during * building. If the scheme needs to modify the data it should take * a copy of the training data. Currently checks for changes to header * structure, number of instances, order of instances, instance weights. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if we know the associator can handle * (at least) moderate missing predictor values * @param classMissing true if we know the associator can handle * (at least) moderate missing class values * @return index 0 is true if the test was passed */ protected boolean[] datasetIntegrity( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing) { print("associator doesn't alter original datasets"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 20; boolean[] result = new boolean[2]; Instances train = null; Associator associator = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); associator = AbstractAssociator.makeCopies(getAssociator(), 1)[0]; } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { Instances trainCopy = new Instances(train); associator.buildAssociations(trainCopy); compareDatasets(train, trainCopy); println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); println(": " + ex.getMessage() + "\n"); println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } return result; } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, FastVector accepts) { return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the attribute index of the class * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, FastVector accepts) { boolean[] result = new boolean[2]; Instances train = null; Associator associator = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); associator = AbstractAssociator.makeCopies(getAssociator(), 1)[0]; } catch (Exception ex) { ex.printStackTrace(); throw new Error("Error setting up for tests: " + ex.getMessage()); } try { associator.buildAssociations(train); println("yes"); result[0] = true; } catch (Exception ex) { boolean acceptable = false; String msg; if (ex.getMessage() == null) msg = ""; else msg = ex.getMessage().toLowerCase(); if (msg.indexOf("not in classpath") > -1) m_ClasspathProblems = true; for (int i = 0; i < accepts.size(); i++) { if (msg.indexOf((String)accepts.elementAt(i)) >= 0) { acceptable = true; } } println("no" + (acceptable ? " (OK error message)" : "")); result[1] = acceptable; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); println(": " + ex.getMessage() + "\n"); if (!acceptable) { if (accepts.size() > 0) { print("Error message doesn't mention "); for (int i = 0; i < accepts.size(); i++) { if (i != 0) { print(" or "); } print('"' + (String)accepts.elementAt(i) + '"'); } } println("here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } } return result; } /** * Make a simple set of instances, which can later be modified * for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, boolean multiInstance) throws Exception { return makeTestDataset( seed, numInstances, numNominal, numNumeric, numString, numDate, numRelational, numClasses, classType, TestInstances.CLASS_IS_LAST, multiInstance); } /** * Make a simple set of instances with variable position of the class * attribute, which can later be modified for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class (0-based, -1 as last) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see TestInstances#CLASS_IS_LAST * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, int classIndex, boolean multiInstance) throws Exception { TestInstances dataset = new TestInstances(); dataset.setSeed(seed); dataset.setNumInstances(numInstances); dataset.setNumNominal(numNominal); dataset.setNumNumeric(numNumeric); dataset.setNumString(numString); dataset.setNumDate(numDate); dataset.setNumRelational(numRelational); dataset.setNumClasses(numClasses); if (classType == NO_CLASS) { dataset.setClassType(Attribute.NOMINAL); // ignored dataset.setClassIndex(TestInstances.NO_CLASS); } else { dataset.setClassType(classType); dataset.setClassIndex(classIndex); } dataset.setNumClasses(numClasses); dataset.setMultiInstance(multiInstance); dataset.setWords(getWords()); dataset.setWordSeparators(getWordSeparators()); return process(dataset.generate()); } /** * Print out a short summary string for the dataset characteristics * * @param nominalPredictor true if nominal predictor attributes are present * @param numericPredictor true if numeric predictor attributes are present * @param stringPredictor true if string predictor attributes are present * @param datePredictor true if date predictor attributes are present * @param relationalPredictor true if relational predictor attributes are present * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) */ protected void printAttributeSummary(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { String str = ""; if (numericPredictor) str += " numeric"; if (nominalPredictor) { if (str.length() > 0) str += " &"; str += " nominal"; } if (stringPredictor) { if (str.length() > 0) str += " &"; str += " string"; } if (datePredictor) { if (str.length() > 0) str += " &"; str += " date"; } if (relationalPredictor) { if (str.length() > 0) str += " &"; str += " relational"; } str += " predictors)"; switch (classType) { case Attribute.NUMERIC: str = " (numeric class," + str; break; case Attribute.NOMINAL: str = " (nominal class," + str; break; case Attribute.STRING: str = " (string class," + str; break; case Attribute.DATE: str = " (date class," + str; break; case Attribute.RELATIONAL: str = " (relational class," + str; break; case NO_CLASS: str = " (no class," + str; break; } print(str); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Test method for this class * * @param args the commandline parameters */ public static void main(String [] args) { runCheck(new CheckAssociator(), args); } }
55,644
35.086252
131
java
tsml-java
tsml-java-master/src/main/java/weka/associations/DefaultAssociationRule.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DefaultAssociationRule.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import java.util.Collection; import weka.core.Tag; import weka.core.Utils; /** * Class for storing and manipulating an association rule. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com). */ public class DefaultAssociationRule extends AssociationRule implements Serializable { /** For serialization */ private static final long serialVersionUID = -661269018702294489L; /** Enum for holding different metric types */ public static enum METRIC_TYPE { CONFIDENCE("conf") { double compute(int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions) { return (double)totalSupport / (double)premiseSupport; } }, LIFT("lift") { double compute(int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions) { double confidence = METRIC_TYPE.CONFIDENCE.compute(premiseSupport, consequenceSupport, totalSupport, totalTransactions); return confidence / ((double)consequenceSupport / (double)totalTransactions); } }, LEVERAGE("lev") { double compute(int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions) { double coverageForItemSet = (double)totalSupport / (double)totalTransactions; double expectedCoverageIfIndependent = ((double)premiseSupport / (double)totalTransactions) * ((double)consequenceSupport / (double)totalTransactions); return coverageForItemSet - expectedCoverageIfIndependent; } }, CONVICTION("conv") { double compute(int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions) { double num = (double)premiseSupport * (double)(totalTransactions - consequenceSupport) / (double)totalTransactions; double denom = premiseSupport - totalSupport + 1; return num / denom; } }; private final String m_stringVal; METRIC_TYPE(String name) { m_stringVal = name; } abstract double compute(int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions); public String toString() { return m_stringVal; } public String toStringMetric(int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions) { return m_stringVal + ":(" + Utils.doubleToString(compute(premiseSupport, consequenceSupport, totalSupport, totalTransactions), 2) + ")"; } public String toXML(int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions) { String result = "<CRITERE name=\"" + m_stringVal + "\" value=\" " + Utils.doubleToString(compute(premiseSupport, consequenceSupport, totalSupport, totalTransactions), 2) + "\"/>"; return result; } } /** Tags for display in the GUI */ public static final Tag[] TAGS_SELECTION = { new Tag(METRIC_TYPE.CONFIDENCE.ordinal(), "Confidence"), new Tag(METRIC_TYPE.LIFT.ordinal(), "Lift"), new Tag(METRIC_TYPE.LEVERAGE.ordinal(), "Leverage"), new Tag(METRIC_TYPE.CONVICTION.ordinal(), "Conviction") }; /** The metric type for this rule */ protected DefaultAssociationRule.METRIC_TYPE m_metricType = METRIC_TYPE.CONFIDENCE; /** The premise of the rule */ protected Collection<Item> m_premise; /** The consequence of the rule */ protected Collection<Item> m_consequence; /** The support for the premise */ protected int m_premiseSupport; /** The support for the consequence */ protected int m_consequenceSupport; /** The total support for the item set (premise + consequence) */ protected int m_totalSupport; /** The total number of transactions in the data */ protected int m_totalTransactions; /** * Construct a new default association rule. * * @param premise the premise of the rule * @param consequence the consequence of the rule * @param metric the metric for the rule * @param premiseSupport the support of the premise * @param consequenceSupport the support of the consequence * @param totalSupport the total support of the rule * @param totalTransactions the number of transactions in the data */ public DefaultAssociationRule(Collection<Item> premise, Collection<Item> consequence, METRIC_TYPE metric, int premiseSupport, int consequenceSupport, int totalSupport, int totalTransactions) { m_premise = premise; m_consequence = consequence; m_metricType = metric; m_premiseSupport = premiseSupport; m_consequenceSupport = consequenceSupport; m_totalSupport = totalSupport; m_totalTransactions = totalTransactions; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getPremise() */ public Collection<Item> getPremise() { return m_premise; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getConsequence() */ public Collection<Item> getConsequence() { return m_consequence; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getPrimaryMetricName() */ public String getPrimaryMetricName() { return TAGS_SELECTION[m_metricType.ordinal()].getReadable(); } /* (non-Javadoc) * @see weka.associations.AssociationRule#getPrimaryMetricValue() */ public double getPrimaryMetricValue() { return m_metricType.compute(m_premiseSupport, m_consequenceSupport, m_totalSupport, m_totalTransactions); } /* (non-Javadoc) * @see weka.associations.AssociationRule#getNamedMetricValue(java.lang.String) */ public double getNamedMetricValue(String metricName) throws Exception { DefaultAssociationRule.METRIC_TYPE requested = null; for (DefaultAssociationRule.METRIC_TYPE m : METRIC_TYPE.values()) { if (TAGS_SELECTION[m.ordinal()].getReadable().equals(metricName)) { requested = m; } } if (requested == null) { throw new Exception("[AssociationRule] Unknown metric: " + metricName); } return requested.compute(m_premiseSupport, m_consequenceSupport, m_totalSupport, m_totalTransactions); } /* (non-Javadoc) * @see weka.associations.AssociationRule#getNumberOfMetricsForRule() */ public int getNumberOfMetricsForRule() { return METRIC_TYPE.values().length; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getMetricNamesForRule() */ public String[] getMetricNamesForRule() { String[] metricNames = new String[TAGS_SELECTION.length]; for (int i = 0; i < TAGS_SELECTION.length; i++) { metricNames[i] = TAGS_SELECTION[i].getReadable(); } return metricNames; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getMetricValuesForRule() */ public double[] getMetricValuesForRule() throws Exception { double[] values = new double[TAGS_SELECTION.length]; for (int i = 0; i < TAGS_SELECTION.length; i++) { values[i] = getNamedMetricValue(TAGS_SELECTION[i].getReadable()); } return values; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getPremiseSupport() */ public int getPremiseSupport() { return m_premiseSupport; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getConsequenceSupport() */ public int getConsequenceSupport() { return m_consequenceSupport; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getTotalSupport() */ public int getTotalSupport() { return m_totalSupport; } /* (non-Javadoc) * @see weka.associations.AssociationRule#getTotalTransactions() */ public int getTotalTransactions() { return m_totalTransactions; } /** * Get a textual description of this rule. * * @return a textual description of this rule. */ public String toString() { StringBuffer result = new StringBuffer(); result.append(m_premise.toString() + ": " + m_premiseSupport + " ==> " + m_consequence.toString() + ": " + m_totalSupport + " "); for (DefaultAssociationRule.METRIC_TYPE m : METRIC_TYPE.values()) { if (m.equals(m_metricType)) { result.append("<" + m.toStringMetric(m_premiseSupport, m_consequenceSupport, m_totalSupport, m_totalTransactions) + "> "); } else { result.append("" + m.toStringMetric(m_premiseSupport, m_consequenceSupport, m_totalSupport, m_totalTransactions) + " "); } } return result.toString(); } }
9,605
30.913621
98
java
tsml-java
tsml-java-master/src/main/java/weka/associations/FPGrowth.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FPGrowth.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Enumeration; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Vector; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.SparseInstance; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Class implementing the FP-growth algorithm for finding large item sets without candidate generation. Iteratively reduces the minimum support until it finds the required number of rules with the given minimum metric. For more information see:<br/> * <br/> * J. Han, J.Pei, Y. Yin: Mining frequent patterns without candidate generation. In: Proceedings of the 2000 ACM-SIGMID International Conference on Management of Data, 1-12, 2000. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Han2000, * author = {J. Han and J.Pei and Y. Yin}, * booktitle = {Proceedings of the 2000 ACM-SIGMID International Conference on Management of Data}, * pages = {1-12}, * title = {Mining frequent patterns without candidate generation}, * year = {2000} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;attribute index of positive value&gt; * Set the index of the attribute value to consider as 'positive' * for binary attributes in normal dense instances. Index 2 is always * used for sparse instances. (default = 2)</pre> * * <pre> -I &lt;max items&gt; * The maximum number of items to include in large items sets (and rules). (default = -1, i.e. no limit.)</pre> * * <pre> -N &lt;require number of rules&gt; * The required number of rules. (default = 10)</pre> * * <pre> -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric by which to rank rules. (default = confidence)</pre> * * <pre> -C &lt;minimum metric score of a rule&gt; * The minimum metric score of a rule. (default = 0.9)</pre> * * <pre> -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0)</pre> * * <pre> -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1)</pre> * * <pre> -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05)</pre> * * <pre> -S * Find all rules that meet the lower bound on * minimum support and the minimum metric constraint. * Turning this mode on will disable the iterative support reduction * procedure to find the specified number of rules.</pre> * * <pre> -transactions &lt;comma separated list of attribute names&gt; * Only consider transactions that contain these items (default = no restriction)</pre> * * <pre> -rules &lt;comma separated list of attribute names&gt; * Only print rules that contain these items. (default = no restriction)</pre> * * <pre> -use-or * Use OR instead of AND for must contain list(s). Use in conjunction * with -transactions and/or -rules</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public class FPGrowth extends AbstractAssociator implements AssociationRulesProducer, OptionHandler, TechnicalInformationHandler { /** For serialization */ private static final long serialVersionUID = 3620717108603442911L; /** * Class for maintaining a frequent item set. */ protected static class FrequentBinaryItemSet implements Serializable, Cloneable { /** For serialization */ private static final long serialVersionUID = -6543815873565829448L; /** The list of items in the item set */ protected ArrayList<BinaryItem> m_items = new ArrayList<BinaryItem>(); /** the support of this item set **/ protected int m_support; /** * Constructor * * @param items the items that make up the frequent item set. * @param support the support of this item set. */ public FrequentBinaryItemSet(ArrayList<BinaryItem> items, int support) { m_items = items; m_support = support; Collections.sort(m_items); } /** * Add an item to this item set. * * @param i the item to add. */ public void addItem(BinaryItem i) { m_items.add(i); Collections.sort(m_items); } /** * Set the support for this item set. * * @param support the support for this item set. */ public void setSupport(int support) { m_support = support; } /** * Get the support of this item set. * * @return the support of this item set. */ public int getSupport() { return m_support; } /** * Get the items in this item set. * * @return the items in this item set. */ public Collection<BinaryItem> getItems() { return m_items; } /** * Get a particular item from this item set. * * @param index the index of the item to get. * @return the item. */ public BinaryItem getItem(int index) { return m_items.get(index); } /** * Get the number of items in this item set. * * @return the number of items in this item set. */ public int numberOfItems() { return m_items.size(); } /** * Get a textual description of this item set. * * @return a textual description of this item set. */ public String toString() { StringBuffer buff = new StringBuffer(); Iterator<BinaryItem> i = m_items.iterator(); while (i.hasNext()) { buff.append(i.next().toString() + " "); } buff.append(": " + m_support); return buff.toString(); } /** * Make a copy of this item set. * * @return a copy of this item set. */ public Object clone() { ArrayList<BinaryItem> items = new ArrayList<BinaryItem>(m_items); return new FrequentBinaryItemSet(items, m_support); } } /** * Maintains a list of frequent item sets. */ protected static class FrequentItemSets implements Serializable { /** For serialization */ private static final long serialVersionUID = 4173606872363973588L; /** The list of frequent item sets */ protected ArrayList<FrequentBinaryItemSet> m_sets = new ArrayList<FrequentBinaryItemSet>(); /** The total number of transactions in the data */ protected int m_numberOfTransactions; /** * Constructor. * * @param numTransactions the total number of transactions in the data. */ public FrequentItemSets(int numTransactions) { m_numberOfTransactions = numTransactions; } /** * Get an item set. * * @param index the index of the item set to get. * @return an item set. */ public FrequentBinaryItemSet getItemSet(int index) { return m_sets.get(index); } /** * Get an iterator that can be used to access all the item sets. * * @return an iterator. */ public Iterator<FrequentBinaryItemSet> iterator() { return m_sets.iterator(); } /** * Get the total number of transactions in the data that the item * sets were derived from. * * @return the total number of transactions in the data. */ public int getNumberOfTransactions() { return m_numberOfTransactions; } /** * Add an item set. * * @param setToAdd the item set to add. */ public void addItemSet(FrequentBinaryItemSet setToAdd) { m_sets.add(setToAdd); } /** * Sort the item sets according to the supplied comparator. * * @param comp the comparator to use. */ public void sort(Comparator<FrequentBinaryItemSet> comp) { Collections.sort(m_sets, comp); } /** * Get the number of item sets. * * @return the number of item sets. */ public int size() { return m_sets.size(); } /** * Sort the item sets. Sorts by item set length. Ties are broken by comparing * the items in the two item sets. */ public void sort() { Comparator<FrequentBinaryItemSet> compF = new Comparator<FrequentBinaryItemSet>() { public int compare(FrequentBinaryItemSet one, FrequentBinaryItemSet two) { Collection<BinaryItem> compOne = one.getItems(); Collection<BinaryItem> compTwo = two.getItems(); // if (one.getSupport() == two.getSupport()) { // if supports are equal then list shorter item sets before longer ones if (compOne.size() < compTwo.size()) { return -1; } else if (compOne.size() > compTwo.size()) { return 1; } else { // compare items Iterator<BinaryItem> twoIterator = compTwo.iterator(); for (BinaryItem oneI : compOne) { BinaryItem twoI = twoIterator.next(); int result = oneI.compareTo(twoI); if (result != 0) { return result; } } return 0; // equal } // return 0; /* } else if (one.getSupport() > two.getSupport()) { // reverse ordering (i.e. descending by support) return -1; } */ // return 1; } }; sort(compF); } /** * Get a textual description of this list of item sets. * * @param numSets the number of item sets to display. * @return a textual description of the item sets. */ public String toString(int numSets) { if (m_sets.size() == 0) { return "No frequent items sets found!"; } StringBuffer result = new StringBuffer(); result.append("" + m_sets.size() + " frequent item sets found"); if (numSets > 0) { result.append(" , displaying " + numSets); } result.append(":\n\n"); int count = 0; for (FrequentBinaryItemSet i : m_sets) { if (numSets > 0 && count > numSets) { break; } result.append(i.toString() + "\n"); count++; } return result.toString(); } } /** * This class holds the counts for projected tree nodes * and header lists. */ protected static class ShadowCounts implements Serializable { /** For serialization */ private static final long serialVersionUID = 4435433714185969155L; /** Holds the counts at different recursion levels */ private ArrayList<Integer> m_counts = new ArrayList<Integer>(); /** * Get the count at the specified recursion depth. * * @param recursionLevel the depth of the recursion. * @return the count. */ public int getCount(int recursionLevel) { if (recursionLevel >= m_counts.size()) { return 0; } else { return m_counts.get(recursionLevel); } } /** * Increase the count at a given recursion level. * * @param recursionLevel the level at which to increase the count. * @param incr the amount by which to increase the count. */ public void increaseCount(int recursionLevel, int incr) { // basically treat the list like a stack where we // can add a new element, or increment the element // at the top if (recursionLevel == m_counts.size()) { // new element m_counts.add(incr); } else if (recursionLevel == m_counts.size() - 1) { // otherwise increment the top int n = m_counts.get(recursionLevel).intValue(); m_counts.set(recursionLevel, (n + incr)); } } /** * Remove the count at the given recursion level. * * @param recursionLevel the level at which to remove the count. */ public void removeCount(int recursionLevel) { if (recursionLevel < m_counts.size()) { m_counts.remove(recursionLevel); } } } /** * A node in the FP-tree. */ protected static class FPTreeNode implements Serializable { /** For serialization */ private static final long serialVersionUID = 4396315323673737660L; /** link to another sibling at this level in the tree */ protected FPTreeNode m_levelSibling; /** link to the parent node */ protected FPTreeNode m_parent; /** item at this node */ protected BinaryItem m_item; /** ID (for graphing the tree) */ protected int m_ID; /** the children of this node */ protected Map<BinaryItem, FPTreeNode> m_children = new HashMap<BinaryItem, FPTreeNode>(); /** counts associated with projected versions of this node */ protected ShadowCounts m_projectedCounts = new ShadowCounts(); /** * Construct a new node with the given parent link and item. * * @param parent a pointer to the parent of this node. * @param item the item at this node. */ public FPTreeNode(FPTreeNode parent, BinaryItem item) { m_parent = parent; m_item = item; } /** * Insert an item set into the tree at this node. Removes the first item * from the supplied item set and makes a recursive call to insert the * remaining items. * * @param itemSet the item set to insert. * @param headerTable the header table for the tree. * @param incr the amount by which to increase counts. */ public void addItemSet(Collection<BinaryItem> itemSet, Map<BinaryItem, FPTreeRoot.Header> headerTable, int incr) { Iterator<BinaryItem> i = itemSet.iterator(); if (i.hasNext()) { BinaryItem first = i.next(); FPTreeNode aChild; if (!m_children.containsKey(first)) { // not in the tree, so add it. aChild = new FPTreeNode(this, first); m_children.put(first, aChild); // update the header if (!headerTable.containsKey(first)) { headerTable.put(first, new FPTreeRoot.Header()); } // append new node to header list headerTable.get(first).addToList(aChild); } else { // get the appropriate child node aChild = m_children.get(first); } // update counts in header table headerTable.get(first).getProjectedCounts().increaseCount(0, incr); // increase the child's count aChild.increaseProjectedCount(0, incr); // proceed recursively itemSet.remove(first); aChild.addItemSet(itemSet, headerTable, incr); } } /** * Increase the projected count at the given recursion level at this * node * * @param recursionLevel the recursion level to increase the node count * at. * @param incr the amount by which to increase the count. */ public void increaseProjectedCount(int recursionLevel, int incr) { m_projectedCounts.increaseCount(recursionLevel, incr); } /** * Remove the projected count at the given recursion level for this * node. * * @param recursionLevel the recursion level at which to remove the count. */ public void removeProjectedCount(int recursionLevel) { m_projectedCounts.removeCount(recursionLevel); } /** * Get the projected count at the given recursion level for this node. * * @param recursionLevel the recursion level at which to get the count. * @return the count. */ public int getProjectedCount(int recursionLevel) { return m_projectedCounts.getCount(recursionLevel); } /** * Get the parent node. * * @return the parent node. */ public FPTreeNode getParent() { return m_parent; } /** * Get the item at this node. * * @return the item at this node. */ public BinaryItem getItem() { return m_item; } /** * Return a textual description of this node for a given recursion * level. * * @param recursionLevel the recursion depth to use. * @return a textual description of this node. */ public String toString(int recursionLevel) { return toString("", recursionLevel); } /** * Return a textual description of this node for a given recursion * level. * * @param prefix a prefix string to prepend. * @param recursionLevel the recursion level to use. * @return a textual description of this node. */ public String toString(String prefix, int recursionLevel) { StringBuffer buffer = new StringBuffer(); buffer.append(prefix); buffer.append("| "); buffer.append(m_item.toString()); buffer.append(" ("); buffer.append(m_projectedCounts.getCount(recursionLevel)); buffer.append(")\n"); for (FPTreeNode node : m_children.values()) { buffer.append(node.toString(prefix + "| ", recursionLevel)); } return buffer.toString(); } protected int assignIDs(int lastID) { int currentLastID = lastID + 1; m_ID = currentLastID; if (m_children != null) { Collection<FPTreeNode> kids = m_children.values(); for (FPTreeNode n : kids) { currentLastID = n.assignIDs(currentLastID); } } return currentLastID; } /** * Generate a dot graph description string for the tree. * * @param text a StringBuffer to store the graph description * in. */ public void graphFPTree(StringBuffer text) { if (m_children != null) { Collection<FPTreeNode> kids = m_children.values(); for (FPTreeNode n : kids) { text.append("N" + n.m_ID); text.append(" [label=\""); text.append(n.getItem().toString() + " (" + n.getProjectedCount(0) + ")\\n"); text.append("\"]\n"); n.graphFPTree(text); text.append("N" + m_ID + "->" + "N" + n.m_ID + "\n"); } } } } /** * Root of the FPTree */ private static class FPTreeRoot extends FPTreeNode { /** For serialization */ private static final long serialVersionUID = 632150939785333297L; /** * Stores a header entry for an FPTree */ protected static class Header implements Serializable { /** For serialization */ private static final long serialVersionUID = -6583156284891368909L; /** The list of pointers into the tree structure */ protected List<FPTreeNode> m_headerList = new LinkedList<FPTreeNode>(); /** Projected header counts for this entry */ protected ShadowCounts m_projectedHeaderCounts = new ShadowCounts(); /** * Add a tree node into the list for this header entry. * * @param toAdd the node to add. */ public void addToList(FPTreeNode toAdd) { m_headerList.add(toAdd); } /** * Get the list of nodes for this header entry. * * @return the list of nodes for this header entry. */ public List<FPTreeNode> getHeaderList() { return m_headerList; } /** * Get the projected counts for this header entry. * * @return the projected counts for this header entry. */ public ShadowCounts getProjectedCounts() { return m_projectedHeaderCounts; } } /** Stores the header table as mapped Header entries */ protected Map<BinaryItem, Header> m_headerTable = new HashMap<BinaryItem, Header>(); /** * Create a new FPTreeRoot. */ public FPTreeRoot() { super(null, null); } /** * Insert an item set into the tree. * * @param itemSet the item set to insert into the tree. * @param incr the increment by which to increase counters. */ public void addItemSet(Collection<BinaryItem> itemSet, int incr) { super.addItemSet(itemSet, m_headerTable, incr); } /** * Get the header table for this tree. * * @return the header table for this tree. */ public Map<BinaryItem, Header> getHeaderTable() { return m_headerTable; } public boolean isEmpty(int recursionLevel) { for (FPTreeNode c : m_children.values()) { if (c.getProjectedCount(recursionLevel) > 0) { return false; } } return true; } /** * Get a textual description of the tree at a given recursion * (projection) level. * * @param pad the string to use as a prefix for indenting nodes. * @param recursionLevel the recursion level (projection) to use. * @return the textual description of the tree. */ public String toString(String pad, int recursionLevel) { StringBuffer result = new StringBuffer(); result.append(pad); result.append("+ ROOT\n"); for (FPTreeNode node : m_children.values()) { result.append(node.toString(pad + "| ", recursionLevel)); } return result.toString(); } /** * Get a textual description of the header table for this tree. * * @param recursionLevel the recursion level to use. * @return a textual description of the header table for this * tree at a given recursion level. */ public String printHeaderTable(int recursionLevel) { StringBuffer buffer = new StringBuffer(); for (BinaryItem item : m_headerTable.keySet()) { buffer.append(item.toString()); buffer.append(" : "); buffer.append(m_headerTable.get(item).getProjectedCounts().getCount(recursionLevel)); buffer.append("\n"); } return buffer.toString(); } public void graphHeaderTable(StringBuffer text, int maxID) { for (BinaryItem item : m_headerTable.keySet()) { Header h = m_headerTable.get(item); List<FPTreeNode> headerList = h.getHeaderList(); if (headerList.size() > 1) { text.append("N" + maxID + " [label=\"" + headerList.get(0).getItem().toString() + " (" + h.getProjectedCounts().getCount(0) + ")" + "\" shape=plaintext]\n"); text.append("N" + maxID + "->" + "N" + headerList.get(1).m_ID + "\n"); for (int i = 1; i < headerList.size() - 1; i++) { text.append("N" + headerList.get(i).m_ID + "->" + "N" + headerList.get(i+1).m_ID + "\n"); } maxID++; } } } } private static void nextSubset(boolean[] subset) { for (int i = 0; i < subset.length; i++) { if (!subset[i]) { subset[i] = true; break; } else { subset[i] = false; } } } private static Collection<Item> getPremise(FrequentBinaryItemSet fis, boolean[] subset) { boolean ok = false; for (int i = 0; i < subset.length; i++){ if (!subset[i]) { ok = true; break; } } if (!ok) { return null; } List<Item> premise = new ArrayList<Item>(); ArrayList<Item> items = new ArrayList<Item>(fis.getItems()); for (int i = 0; i < subset.length; i++) { if (subset[i]) { premise.add(items.get(i)); } } return premise; } private static Collection<Item> getConsequence(FrequentBinaryItemSet fis, boolean[] subset) { List<Item> consequence = new ArrayList<Item>(); ArrayList<Item> items = new ArrayList<Item>(fis.getItems()); for (int i = 0; i < subset.length; i++) { if (!subset[i]) { consequence.add(items.get(i)); } } return consequence; } /** * Generate all association rules, from the supplied frequet item sets, * that meet a given minimum metric threshold. Uses a brute force approach. * * @param largeItemSets the set of frequent item sets * @param metricToUse the metric to use * @param metricThreshold the threshold value that a rule must meet * @param upperBoundMinSuppAsInstances the upper bound on the support * in order to accept the rule * @param lowerBoundMinSuppAsInstances the lower bound on the support * in order to accept the rule * @param totalTransactions the total number of transactions in the data * @return a list of association rules */ public static List<AssociationRule> generateRulesBruteForce(FrequentItemSets largeItemSets, DefaultAssociationRule.METRIC_TYPE metricToUse, double metricThreshold, int upperBoundMinSuppAsInstances, int lowerBoundMinSuppAsInstances, int totalTransactions) { List<AssociationRule> rules = new ArrayList<AssociationRule>(); largeItemSets.sort(); Map<Collection<BinaryItem>, Integer> frequencyLookup = new HashMap<Collection<BinaryItem>, Integer>(); Iterator<FrequentBinaryItemSet> setI = largeItemSets.iterator(); // process each large item set while (setI.hasNext()) { FrequentBinaryItemSet fis = setI.next(); frequencyLookup.put(fis.getItems(), fis.getSupport()); if (fis.getItems().size() > 1) { // generate all the possible subsets for the premise boolean[] subset = new boolean[fis.getItems().size()]; Collection<Item> premise = null; Collection<Item> consequence = null; while ((premise = getPremise(fis, subset)) != null) { if (premise.size() > 0 && premise.size() < fis.getItems().size()) { consequence = getConsequence(fis, subset); int totalSupport = fis.getSupport(); int supportPremise = frequencyLookup.get(premise).intValue(); int supportConsequence = frequencyLookup.get(consequence).intValue(); // a candidate rule DefaultAssociationRule candidate = new DefaultAssociationRule(premise, consequence, metricToUse, supportPremise, supportConsequence, totalSupport, totalTransactions); if (candidate.getPrimaryMetricValue() > metricThreshold && candidate.getTotalSupport() >= lowerBoundMinSuppAsInstances && candidate.getTotalSupport() <= upperBoundMinSuppAsInstances) { // accept this rule rules.add(candidate); } } nextSubset(subset); } } } return rules; } public static List<AssociationRule> pruneRules(List<AssociationRule> rulesToPrune, ArrayList<Item> itemsToConsider, boolean useOr) { ArrayList<AssociationRule> result = new ArrayList<AssociationRule>(); for (AssociationRule r : rulesToPrune) { if (r.containsItems(itemsToConsider, useOr)) { result.add(r); } } return result; } /** The number of rules to find */ protected int m_numRulesToFind = 10; //protected double m_upperBoundMinSupport = 0.36; /** The upper bound on the minimum support */ protected double m_upperBoundMinSupport = 1.0; /** The lower bound on minimum support */ protected double m_lowerBoundMinSupport = 0.1; /** The amount by which to decrease the support in each iteration */ protected double m_delta = 0.05; /** The number of instances in the data */ protected int m_numInstances; /** * When processing data off of disk report progress * this frequently (number of instances). */ protected int m_offDiskReportingFrequency = 10000; /** * If true, just all rules meeting the lower bound on the minimum * support will be found. The number of rules to find will be * ignored and the iterative reduction of support will not * be done. */ protected boolean m_findAllRulesForSupportLevel = false; //protected double m_lowerBoundMinSupport = 0.0; /** The index (1 based) of binary attributes to treat as the positive value */ protected int m_positiveIndex = 2; protected DefaultAssociationRule.METRIC_TYPE m_metric = DefaultAssociationRule.METRIC_TYPE.CONFIDENCE; protected double m_metricThreshold = 0.9; /** Holds the large item sets found */ protected FrequentItemSets m_largeItemSets; /** Holds the rules */ protected List<AssociationRule> m_rules; // maximum number of items in a large item set (zero means no limit) protected int m_maxItems = -1; /** * If set, limit the transactions (instances) input to the * algorithm to those that contain these items */ protected String m_transactionsMustContain = ""; /** Use OR rather than AND when considering must contain lists */ protected boolean m_mustContainOR = false; /** If set, then only output rules containing these itmes */ protected String m_rulesMustContain = ""; /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // enable what we can handle // attributes result.enable(Capability.UNARY_ATTRIBUTES); result.enable(Capability.BINARY_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * Returns a string describing this associator * * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class implementing the FP-growth algorithm for finding" + " large item sets without candidate generation. Iteratively" + " reduces the minimum support until it finds the required" + " number of rules with the given minimum metric." + " For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "J. Han and J.Pei and Y. Yin"); result.setValue(Field.TITLE, "Mining frequent patterns without candidate generation"); result.setValue(Field.BOOKTITLE, "Proceedings of the 2000 ACM-SIGMID International" + " Conference on Management of Data"); result.setValue(Field.YEAR, "2000"); result.setValue(Field.PAGES, "1-12"); return result; } private boolean passesMustContain(Instance inst, boolean[] transactionsMustContainIndexes, int numInTransactionsMustContainList) { boolean result = false; if (inst instanceof SparseInstance) { int containsCount = 0; for (int i = 0; i < inst.numValues(); i++) { int attIndex = inst.index(i); if (m_mustContainOR) { if (transactionsMustContainIndexes[attIndex]) { // break here since the operator is OR and this // instance contains at least one of the items return true; } } else { if (transactionsMustContainIndexes[attIndex]) { containsCount++; } } } if (!m_mustContainOR) { if (containsCount == numInTransactionsMustContainList) { return true; } } } else { int containsCount = 0; for (int i = 0; i < transactionsMustContainIndexes.length; i++) { if (transactionsMustContainIndexes[i]) { if ((int)inst.value(i) == m_positiveIndex - 1) { if (m_mustContainOR) { // break here since the operator is OR and // this instance contains at least one of the // requested items return true; } else { containsCount++; } } } } if (!m_mustContainOR) { if (containsCount == numInTransactionsMustContainList) { return true; } } } return result; } private void processSingleton(Instance current, ArrayList<BinaryItem> singletons) throws Exception { if (current instanceof SparseInstance) { for (int j = 0; j < current.numValues(); j++) { int attIndex = current.index(j); singletons.get(attIndex).increaseFrequency(); } } else { for (int j = 0; j < current.numAttributes(); j++) { if (!current.isMissing(j)) { if (current.attribute(j).numValues() == 1 || current.value(j) == m_positiveIndex - 1) { singletons.get(j).increaseFrequency(); } } } } } /** * Get the singleton items in the data * * @param source the source of the data (either Instances or * an ArffLoader). * @return a list of singleton item sets * @throws Exception if the singletons can't be found for some reason */ protected ArrayList<BinaryItem> getSingletons(Object source) throws Exception { ArrayList<BinaryItem> singletons = new ArrayList<BinaryItem>(); Instances data = null; if (source instanceof Instances) { data = (Instances)source; } else if (source instanceof weka.core.converters.ArffLoader) { data = ((weka.core.converters.ArffLoader)source).getStructure(); } for (int i = 0; i < data.numAttributes(); i++) { singletons.add(new BinaryItem(data.attribute(i), m_positiveIndex - 1)); } if (source instanceof Instances) { // set the number of instances m_numInstances = data.numInstances(); for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance(i); processSingleton(current, singletons); } } else if (source instanceof weka.core.converters.ArffLoader) { weka.core.converters.ArffLoader loader = (weka.core.converters.ArffLoader)source; Instance current = null; int count = 0; while ((current = loader.getNextInstance(data)) != null) { processSingleton(current, singletons); count++; if (count % m_offDiskReportingFrequency == 0) { System.err.println("Singletons: done " + count); } } // set the number of instances m_numInstances = count; loader.reset(); } return singletons; } /** * Get the singleton items in the data * * @param data the Instances to process * @return a list of singleton item sets * @throws Exception if the singletons can't be found for some reason */ protected ArrayList<BinaryItem> getSingletons(Instances data) throws Exception { return getSingletons((Object)data); /*ArrayList<BinaryItem> singletons = new ArrayList<BinaryItem>(); for (int i = 0; i < data.numAttributes(); i++) { singletons.add(new BinaryItem(data.attribute(i), m_positiveIndex - 1)); } for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance(i); if (current instanceof SparseInstance) { for (int j = 0; j < current.numValues(); j++) { int attIndex = current.index(j); singletons.get(attIndex).increaseFrequency(); } } else { for (int j = 0; j < data.numAttributes(); j++) { if (!current.isMissing(j)) { if (current.attribute(j).numValues() == 1 || current.value(j) == m_positiveIndex - 1) { singletons.get(j).increaseFrequency(); } } } } } return singletons;*/ } /*protected ArrayList<BinaryItem> getFrequent(ArrayList<BinaryItem> items, int minSupport) { ArrayList<BinaryItem> frequent = new ArrayList<BinaryItem>(); for (BinaryItem b : items) { if (b.getFrequency() > minSupport) { frequent.add(b); } } // sort in descending order of support Collections.sort(frequent); return frequent; } */ /** * Inserts a single instance into the FPTree. * * @param current the instance to insert * @param singletons the singleton item sets * @param tree the tree to insert into * @param minSupport the minimum support threshold */ private void insertInstance(Instance current, ArrayList<BinaryItem> singletons, FPTreeRoot tree, int minSupport) { ArrayList<BinaryItem> transaction = new ArrayList<BinaryItem>(); if (current instanceof SparseInstance) { for (int j = 0; j < current.numValues(); j++) { int attIndex = current.index(j); if (singletons.get(attIndex).getFrequency() >= minSupport) { transaction.add(singletons.get(attIndex)); } } Collections.sort(transaction); tree.addItemSet(transaction, 1); } else { for (int j = 0; j < current.numAttributes(); j++) { if (!current.isMissing(j)) { if (current.attribute(j).numValues() == 1 || current.value(j) == m_positiveIndex - 1) { if (singletons.get(j).getFrequency() >= minSupport) { transaction.add(singletons.get(j)); } } } } Collections.sort(transaction); tree.addItemSet(transaction, 1); } } /** * Construct the frequent pattern tree by inserting each transaction * in the data into the tree. Only those items from each transaction that * meet the minimum support threshold are inserted. * * @param singletons the singleton item sets * @param data the Instances containing the transactions * @param minSupport the minimum support * @return the root of the tree */ protected FPTreeRoot buildFPTree(ArrayList<BinaryItem> singletons, Object dataSource, int minSupport) throws Exception { FPTreeRoot tree = new FPTreeRoot(); Instances data = null; if (dataSource instanceof Instances) { data = (Instances)dataSource; } else if (dataSource instanceof weka.core.converters.ArffLoader) { data = ((weka.core.converters.ArffLoader)dataSource).getStructure(); } if (dataSource instanceof Instances) { for (int i = 0; i < data.numInstances(); i++) { insertInstance(data.instance(i), singletons, tree, minSupport); } } else if (dataSource instanceof weka.core.converters.ArffLoader) { weka.core.converters.ArffLoader loader = (weka.core.converters.ArffLoader)dataSource; Instance current = null; int count = 0; while ((current = loader.getNextInstance(data)) != null) { insertInstance(current, singletons, tree, minSupport); count++; if (count % m_offDiskReportingFrequency == 0) { System.err.println("build tree done: " + count); } } } return tree; } /** * Construct the frequent pattern tree by inserting each transaction * in the data into the tree. Only those items from each transaction that * meet the minimum support threshold are inserted. * * @param singletons the singleton item sets * @param data the Instances containing the transactions * @param minSupport the minimum support * @return the root of the tree */ /*protected FPTreeRoot buildFPTree(ArrayList<BinaryItem> singletons, Instances data, int minSupport) { FPTreeRoot tree = new FPTreeRoot(); for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance(i); ArrayList<BinaryItem> transaction = new ArrayList<BinaryItem>(); if (current instanceof SparseInstance) { for (int j = 0; j < current.numValues(); j++) { int attIndex = current.index(j); if (singletons.get(attIndex).getFrequency() >= minSupport) { transaction.add(singletons.get(attIndex)); } } Collections.sort(transaction); tree.addItemSet(transaction, 1); } else { for (int j = 0; j < data.numAttributes(); j++) { if (!current.isMissing(j)) { if (current.attribute(j).numValues() == 1 || current.value(j) == m_positiveIndex - 1) { if (singletons.get(j).getFrequency() >= minSupport) { transaction.add(singletons.get(j)); } } } } Collections.sort(transaction); tree.addItemSet(transaction, 1); } } return tree; }*/ /** * Find large item sets in the FP-tree. * * @param tree the root of the tree to mine * @param largeItemSets holds the large item sets found * @param recursionLevel the recursion level for the current projected * counts * @param conditionalItems the current set of items that the current * (projected) tree is conditional on * @param minSupport the minimum acceptable support */ protected void mineTree(FPTreeRoot tree, FrequentItemSets largeItemSets, int recursionLevel, FrequentBinaryItemSet conditionalItems, int minSupport) { if (!tree.isEmpty(recursionLevel)) { if (m_maxItems > 0 && recursionLevel >= m_maxItems) { // don't mine any further return; } Map<BinaryItem, FPTreeRoot.Header> headerTable = tree.getHeaderTable(); Set<BinaryItem> keys = headerTable.keySet(); // System.err.println("Number of freq item sets collected " + largeItemSets.size()); Iterator<BinaryItem> i = keys.iterator(); while (i.hasNext()) { BinaryItem item = i.next(); FPTreeRoot.Header itemHeader = headerTable.get(item); // check for minimum support at this level int support = itemHeader.getProjectedCounts().getCount(recursionLevel); if (support >= minSupport) { // process header list at this recursion level for (FPTreeNode n : itemHeader.getHeaderList()) { // push count up path to root int currentCount = n.getProjectedCount(recursionLevel); if (currentCount > 0) { FPTreeNode temp = n.getParent(); while (temp != tree) { // set/increase for the node temp.increaseProjectedCount(recursionLevel + 1, currentCount); // set/increase for the header table headerTable.get(temp.getItem()). getProjectedCounts().increaseCount(recursionLevel + 1, currentCount); temp = temp.getParent(); } } } FrequentBinaryItemSet newConditional = (FrequentBinaryItemSet) conditionalItems.clone(); // this item gets added to the conditional items newConditional.addItem(item); newConditional.setSupport(support); // now add this conditional item set to the list of large item sets largeItemSets.addItemSet(newConditional); // now recursively process the new tree mineTree(tree, largeItemSets, recursionLevel + 1, newConditional, minSupport); // reverse the propagated counts for (FPTreeNode n : itemHeader.getHeaderList()) { FPTreeNode temp = n.getParent(); while (temp != tree) { temp.removeProjectedCount(recursionLevel + 1); temp = temp.getParent(); } } // reverse the propagated counts in the header list // at this recursion level for (FPTreeRoot.Header h : headerTable.values()) { h.getProjectedCounts().removeCount(recursionLevel + 1); } } } } } /** * Construct a new FPGrowth object. */ public FPGrowth() { resetOptions(); } /** * Reset all options to their default values. */ public void resetOptions() { m_delta = 0.05; m_metricThreshold = 0.9; m_numRulesToFind = 10; m_lowerBoundMinSupport = 0.1; m_upperBoundMinSupport = 1.0; // m_minSupport = -1; m_positiveIndex = 2; m_transactionsMustContain = ""; m_rulesMustContain = ""; m_mustContainOR = false; } /** * Tip text for this property suitable for displaying * in the GUI. * * @return the tip text for this property. */ public String positiveIndexTipText() { return "Set the index of binary valued attributes that is to be considered" + " the positive index. Has no effect for sparse data (in this case" + " the first index (i.e. non-zero values) is always treated as " + " positive. Also has no effect for unary valued attributes (i.e." + " when using the Weka Apriori-style format for market basket data," + " which uses missing value \"?\" to indicate" + " absence of an item."; } /** * Set the index of the attribute value to consider as positive * for binary attributes in normal dense instances. Index 1 is always * used for sparse instances. * * @param index the index to use for positive values in binary attributes. */ public void setPositiveIndex(int index) { m_positiveIndex = index; } /** * Get the index of the attribute value to consider as positive * for binary attributes in normal dense instances. Index 1 is always * used for sparse instances. * * @return the index to use for positive values in binary attributes. */ public int getPositiveIndex() { return m_positiveIndex; } /** * Set the desired number of rules to find. * * @param numR the number of rules to find. */ public void setNumRulesToFind(int numR) { m_numRulesToFind = numR; } /** * Get the number of rules to find. * * @return the number of rules to find. */ public int getNumRulesToFind() { return m_numRulesToFind; } /** * Tip text for this property suitable for displaying * in the GUI. * * @return the tip text for this property. */ public String numRulesToFindTipText() { return "The number of rules to output"; } /** * Set the metric type to use. * * @param d the metric type */ public void setMetricType(SelectedTag d) { int ordinal = d.getSelectedTag().getID(); for (DefaultAssociationRule.METRIC_TYPE m : DefaultAssociationRule.METRIC_TYPE.values()) { if (m.ordinal() == ordinal) { m_metric = m; break; } } } /** * Set the maximum number of items to include in large items sets. * * @param max the maxim number of items to include in large item sets. */ public void setMaxNumberOfItems(int max) { m_maxItems = max; } /** * Gets the maximum number of items to be included in large item sets. * * @return the maximum number of items to be included in large items sets. */ public int getMaxNumberOfItems() { return m_maxItems; } /** * Tip text for this property suitable for displaying * in the GUI. * * @return the tip text for this property. */ public String maxNumberOfItemsTipText() { return "The maximum number of items to include in frequent item sets. -1 " + "means no limit."; } /** * Get the metric type to use. * * @return the metric type to use. */ public SelectedTag getMetricType() { return new SelectedTag(m_metric.ordinal(), DefaultAssociationRule.TAGS_SELECTION); } /** * Tip text for this property suitable for displaying * in the GUI. * * @return the tip text for this property. */ public String metricTypeTipText() { return "Set the type of metric by which to rank rules. Confidence is " +"the proportion of the examples covered by the premise that are also " +"covered by the consequence(Class association rules can only be mined using confidence). Lift is confidence divided by the " +"proportion of all examples that are covered by the consequence. This " +"is a measure of the importance of the association that is independent " +"of support. Leverage is the proportion of additional examples covered " +"by both the premise and consequence above those expected if the " +"premise and consequence were independent of each other. The total " +"number of examples that this represents is presented in brackets " +"following the leverage. Conviction is " +"another measure of departure from independence."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minMetricTipText() { return "Minimum metric score. Consider only rules with scores higher than " +"this value."; } /** * Get the value of minConfidence. * * @return Value of minConfidence. */ public double getMinMetric() { return m_metricThreshold; } /** * Set the value of minConfidence. * * @param v Value to assign to minConfidence. */ public void setMinMetric(double v) { m_metricThreshold = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String transactionsMustContainTipText() { return "Limit input to FPGrowth to those transactions (instances)" + " that contain these items. Provide a comma separated" + " list of attribute names."; } /** * Set the comma separated list of items that transactions * must contain in order to be considered for large * item sets and rules. * * @param list a comma separated list of items (empty * string indicates no restriction on the transactions). */ public void setTransactionsMustContain(String list) { m_transactionsMustContain = list; } /** * Gets the comma separated list of items that * transactions must contain in order to be considered * for large item sets and rules. * * @return return the comma separated list of * items that transactions must contain. */ public String getTransactionsMustContain() { return m_transactionsMustContain; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String rulesMustContainTipText() { return "Only print rules that contain these items. Provide " + "a comma separated list of attribute names."; } /** * Set the comma separated list of items that rules * must contain in order to be output. * * @param list a comma separated list of items (empty * string indicates no restriction on the rules). */ public void setRulesMustContain(String list) { m_rulesMustContain = list; } /** * Get the comma separated list of items that * rules must contain in order to be output. * * @return the comma separated list of items * that rules must contain in order to be output. */ public String getRulesMustContain() { return m_rulesMustContain; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useORForMustContainListTipText() { return "Use OR instead of AND for transactions/rules must contain lists."; } /** * Set whether to use OR rather than AND when considering * must contain lists. * * @param b true if OR should be used instead of AND when * considering transaction and rules must contain lists. */ public void setUseORForMustContainList(boolean b) { m_mustContainOR = b; } /** * Gets whether OR is to be used rather than AND when * considering must contain lists. * * @return true if OR is used instead of AND. */ public boolean getUseORForMustContainList() { return m_mustContainOR; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying, in the explorer/experimenter gui */ public String deltaTipText() { return "Iteratively decrease support by this factor. Reduces support " +"until min support is reached or required number of rules has been " +"generated."; } /** * Get the value of delta. * * @return Value of delta. */ public double getDelta() { return m_delta; } /** * Set the value of delta. * * @param v Value to assign to delta. */ public void setDelta(double v) { m_delta = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String lowerBoundMinSupportTipText() { return "Lower bound for minimum support as a fraction or number of instances."; } /** * Get the value of lowerBoundMinSupport. * * @return Value of lowerBoundMinSupport. */ public double getLowerBoundMinSupport() { return m_lowerBoundMinSupport; } /** * Set the value of lowerBoundMinSupport. * * @param v Value to assign to lowerBoundMinSupport. */ public void setLowerBoundMinSupport(double v) { m_lowerBoundMinSupport = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String upperBoundMinSupportTipText() { return "Upper bound for minimum support as a fraction or number of instances. " + "Start iteratively decreasing " + "minimum support from this value."; } /** * Get the value of upperBoundMinSupport. * * @return Value of upperBoundMinSupport. */ public double getUpperBoundMinSupport() { return m_upperBoundMinSupport; } /** * Set the value of upperBoundMinSupport. * * @param v Value to assign to upperBoundMinSupport. */ public void setUpperBoundMinSupport(double v) { m_upperBoundMinSupport = v; } /** * Tip text for this property suitable for displaying * in the GUI. * * @return the tip text for this property. */ public String findAllRulesForSupportLevelTipText() { return "Find all rules that meet " + "the lower bound on minimum support and the minimum metric constraint. " + "Turning this mode on will disable the iterative support reduction " + "procedure to find the specified number of rules."; } /** * If true then turn off the iterative support reduction method * of finding x rules that meet the minimum support and metric * thresholds and just return all the rules that meet the * lower bound on minimum support and the minimum metric. * * @param s true if all rules meeting the lower bound on the support * and minimum metric thresholds are to be found. */ public void setFindAllRulesForSupportLevel(boolean s) { m_findAllRulesForSupportLevel = s; } /** * Get whether all rules meeting the lower bound on min support * and the minimum metric threshold are to be found. * * @return true if all rules meeting the lower bound on min * support and the min metric threshold are to be found. */ public boolean getFindAllRulesForSupportLevel() { return m_findAllRulesForSupportLevel; } /** * Set how often to report some progress when the data is * being read incrementally off of the disk rather than * loaded into memory. * * @param freq the frequency to print progress. */ public void setOffDiskReportingFrequency(int freq) { m_offDiskReportingFrequency = freq; } /* public void setMinimumSupport(double minSupp) { m_minSupport = minSupp; } public double getMinimumSupport() { return m_minSupport; } */ /** * Gets the list of mined association rules. * * @return the list of association rules discovered during mining. * Returns null if mining hasn't been performed yet. */ public AssociationRules getAssociationRules() { List<AssociationRule> rulesToReturn = new ArrayList<AssociationRule>(); int count = 0; for (AssociationRule r : m_rules) { rulesToReturn.add(r); count++; if (!m_findAllRulesForSupportLevel && count == m_numRulesToFind) { break; } } return new AssociationRules(rulesToReturn, this); } /** * Gets a list of the names of the metrics output for * each rule. This list should be the same (in terms of * the names and order thereof) as that produced by * AssociationRule.getMetricNamesForRule(). * * @return an array of the names of the metrics available * for each rule learned by this producer. */ public String[] getRuleMetricNames() { String[] metricNames = new String[DefaultAssociationRule.TAGS_SELECTION.length]; for (int i = 0; i < DefaultAssociationRule.TAGS_SELECTION.length; i++) { metricNames[i] = DefaultAssociationRule.TAGS_SELECTION[i].getReadable(); } return metricNames; } /** * Returns true if this AssociationRulesProducer can actually * produce rules. Most implementing classes will always return * true from this method (obviously :-)). However, an implementing * class that actually acts as a wrapper around things that may * or may not implement AssociationRulesProducer will want to * return false if the thing they wrap can't produce rules. * * @return true if this producer can produce rules in its current * configuration */ public boolean canProduceRules() { return true; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); String string00 = "\tSet the index of the attribute value to consider as 'positive'\n\t" + "for binary attributes in normal dense instances. Index 2 is always\n\t" + "used for sparse instances. (default = 2)"; String string0 = "\tThe maximum number of items to include " + "in large items sets (and rules). (default " + "= -1, i.e. no limit.)"; String string1 = "\tThe required number of rules. (default = " + m_numRulesToFind + ")"; String string2 = "\tThe minimum metric score of a rule. (default" + " = " + m_metricThreshold + ")"; String string3 = "\tThe metric by which to rank rules. (default" + " = confidence)"; String string4 = "\tThe lower bound for the minimum support as a fraction" + " or number of instances. (default = " + m_lowerBoundMinSupport + ")"; String string5 = "\tUpper bound for minimum support as a fraction or number of instances. " + "(default = 1.0)"; String string6 = "\tThe delta by which the minimum support is decreased in\n" + "\teach iteration as a fraction or number of instances. (default = " + m_delta + ")"; String string7 = "\tFind all rules that meet the lower bound on\n\t" + "minimum support and the minimum metric constraint.\n\t" + "Turning this mode on will disable the iterative support reduction\n\t" + "procedure to find the specified number of rules."; String string8 = "\tOnly consider transactions that contain these items (default = no restriction)"; String string9 = "\tOnly print rules that contain these items. (default = no restriction)"; String string10 = "\tUse OR instead of AND for must contain list(s). Use in conjunction" + "\n\twith -transactions and/or -rules"; newVector.add(new Option(string00, "P", 1, "-P <attribute index of positive value>")); newVector.add(new Option(string0, "I", 1, "-I <max items>")); newVector.add(new Option(string1, "N", 1, "-N <require number of rules>")); newVector.add(new Option(string3, "T", 1, "-T <0=confidence | 1=lift | " + "2=leverage | 3=Conviction>")); newVector.add(new Option(string2, "C", 1, "-C <minimum metric score of a rule>")); newVector.add(new Option(string5, "U", 1, "-U <upper bound for minimum support>")); newVector.add(new Option(string4, "M", 1, "-M <lower bound for minimum support>")); newVector.add(new Option(string6, "D", 1, "-D <delta for minimum support>")); newVector.add(new Option(string7, "S", 0, "-S")); newVector.add(new Option(string8, "transactions", 1, "-transactions <comma separated " + "list of attribute names>")); newVector.add(new Option(string9, "rules", 1, "-rules <comma separated list " + "of attribute names>")); newVector.add(new Option(string10, "use-or", 0, "-use-or")); return newVector.elements(); } /** * * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;attribute index of positive value&gt; * Set the index of the attribute value to consider as 'positive' * for binary attributes in normal dense instances. Index 2 is always * used for sparse instances. (default = 2)</pre> * * <pre> -I &lt;max items&gt; * The maximum number of items to include in large items sets (and rules). (default = -1, i.e. no limit.)</pre> * * <pre> -N &lt;require number of rules&gt; * The required number of rules. (default = 10)</pre> * * <pre> -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric by which to rank rules. (default = confidence)</pre> * * <pre> -C &lt;minimum metric score of a rule&gt; * The minimum metric score of a rule. (default = 0.9)</pre> * * <pre> -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0)</pre> * * <pre> -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1)</pre> * * <pre> -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05)</pre> * * <pre> -S * Find all rules that meet the lower bound on * minimum support and the minimum metric constraint. * Turning this mode on will disable the iterative support reduction * procedure to find the specified number of rules.</pre> * * <pre> -transactions &lt;comma separated list of attribute names&gt; * Only consider transactions that contain these items (default = no restriction)</pre> * * <pre> -rules &lt;comma separated list of attribute names&gt; * Only print rules that contain these items. (default = no restriction)</pre> * * <pre> -use-or * Use OR instead of AND for must contain list(s). Use in conjunction * with -transactions and/or -rules</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { resetOptions(); String positiveIndexString = Utils.getOption('P', options); String maxItemsString = Utils.getOption('I', options); String numRulesString = Utils.getOption('N', options); String minMetricString = Utils.getOption('C', options); String metricTypeString = Utils.getOption("T", options); String lowerBoundSupportString = Utils.getOption("M", options); String upperBoundSupportString = Utils.getOption("U", options); String deltaString = Utils.getOption("D", options); String transactionsString = Utils.getOption("transactions", options); String rulesString = Utils.getOption("rules", options); if (positiveIndexString.length() != 0) { setPositiveIndex(Integer.parseInt(positiveIndexString)); } if (maxItemsString.length() != 0) { setMaxNumberOfItems(Integer.parseInt(maxItemsString)); } if (metricTypeString.length() != 0) { setMetricType(new SelectedTag(Integer.parseInt(metricTypeString), DefaultAssociationRule.TAGS_SELECTION)); } if (numRulesString.length() != 0) { setNumRulesToFind(Integer.parseInt(numRulesString)); } if (minMetricString.length() != 0) { setMinMetric(Double.parseDouble(minMetricString)); } if (deltaString.length() != 0) { setDelta(Double.parseDouble(deltaString)); } if (lowerBoundSupportString.length() != 0) { setLowerBoundMinSupport(Double.parseDouble(lowerBoundSupportString)); } if (upperBoundSupportString.length() != 0) { setUpperBoundMinSupport(Double.parseDouble(upperBoundSupportString)); } if (transactionsString.length() != 0) { setTransactionsMustContain(transactionsString); } if (rulesString.length() > 0) { setRulesMustContain(rulesString); } setUseORForMustContainList(Utils.getFlag("use-or", options)); setFindAllRulesForSupportLevel(Utils.getFlag('S', options)); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); options.add("-P"); options.add("" + getPositiveIndex()); options.add("-I"); options.add("" + getMaxNumberOfItems()); options.add("-N"); options.add("" + getNumRulesToFind()); options.add("-T"); options.add("" + getMetricType().getSelectedTag().getID()); options.add("-C"); options.add("" + getMinMetric()); options.add("-D"); options.add("" + getDelta()); options.add("-U"); options.add("" + getUpperBoundMinSupport()); options.add("-M"); options.add("" + getLowerBoundMinSupport()); if (getFindAllRulesForSupportLevel()) { options.add("-S"); } if (getTransactionsMustContain().length() > 0) { options.add("-transactions"); options.add(getTransactionsMustContain()); } if (getRulesMustContain().length() > 0) { options.add("-rules"); options.add(getRulesMustContain()); } if (getUseORForMustContainList()) { options.add("-use-or"); } return options.toArray(new String[1]); } private Instances parseTransactionsMustContain(Instances data) { String[] split = m_transactionsMustContain.trim().split(","); boolean[] transactionsMustContainIndexes = new boolean[data.numAttributes()]; int numInTransactionsMustContainList = split.length; for (int i = 0; i < split.length; i++) { String attName = split[i].trim(); Attribute att = data.attribute(attName); if (att == null) { System.err.println("[FPGrowth] : WARNING - can't find attribute " + attName + " in the data."); numInTransactionsMustContainList--; } else { transactionsMustContainIndexes[att.index()] = true; } } if (numInTransactionsMustContainList == 0) { return data; } else { Instances newInsts = new Instances(data, 0); for (int i = 0; i < data.numInstances(); i++) { if (passesMustContain(data.instance(i), transactionsMustContainIndexes, numInTransactionsMustContainList)) { newInsts.add(data.instance(i)); } } newInsts.compactify(); return newInsts; } } private ArrayList<Item> parseRulesMustContain(Instances data) { ArrayList<Item> result = new ArrayList<Item>(); String[] split = m_rulesMustContain.trim().split(","); for (int i = 0; i < split.length; i++) { String attName = split[i].trim(); Attribute att = data.attribute(attName); if (att == null) { System.err.println("[FPGrowth] : WARNING - can't find attribute " + attName + " in the data."); } else { BinaryItem tempI = null; try { tempI = new BinaryItem(att, m_positiveIndex - 1); } catch (Exception e) { // this should never happen e.printStackTrace(); } result.add(tempI); } } return result; } /** * Method that generates all large item sets with a minimum support, and from * these all association rules with a minimum metric (i.e. confidence, * lift etc.). * * @param source the source of the data. May be an Instances object or * an ArffLoader. In the case of the latter, the two passes over the * data that FPGrowth requires will be done off of disk (i.e. only one * instance will be in memory at any one time). * @throws Exception if rules can't be built successfully */ private void buildAssociations(Object source) throws Exception { Instances data = null; Capabilities capabilities = getCapabilities(); boolean arffLoader = false; boolean breakOnNext = false; if (source instanceof weka.core.converters.ArffLoader) { data = ((weka.core.converters.ArffLoader)source).getStructure(); capabilities.setMinimumNumberInstances(0); arffLoader = true; } else { data = (Instances)source; } // can we handle the data? capabilities.testWithFail(data); // prune any instances that don't contain the requested items (if any) // can only do this if we are not reading the data incrementally if (m_transactionsMustContain.length() > 0 && (source instanceof Instances)) { data = parseTransactionsMustContain(data); getCapabilities().testWithFail(data); } ArrayList<Item> rulesMustContain = null; if (m_rulesMustContain.length() > 0) { rulesMustContain = parseRulesMustContain(data); } ArrayList<BinaryItem> singletons = getSingletons(source); int upperBoundMinSuppAsInstances = (m_upperBoundMinSupport > 1) ? (int) m_upperBoundMinSupport : (int)Math.ceil(m_upperBoundMinSupport * m_numInstances); int lowerBoundMinSuppAsInstances = (m_lowerBoundMinSupport > 1) ? (int)m_lowerBoundMinSupport : (int)Math.ceil(m_lowerBoundMinSupport * m_numInstances); double upperBoundMinSuppAsFraction = (m_upperBoundMinSupport > 1) ? m_upperBoundMinSupport / m_numInstances : m_upperBoundMinSupport; double lowerBoundMinSuppAsFraction = (m_lowerBoundMinSupport > 1) ? m_lowerBoundMinSupport / m_numInstances : m_lowerBoundMinSupport; double deltaAsFraction = (m_delta > 1) ? m_delta / m_numInstances : m_delta; //double currentSupport = upperBoundMinSuppAsFraction; double currentSupport = 1.0; if (m_findAllRulesForSupportLevel) { currentSupport = lowerBoundMinSuppAsFraction; } do { if (arffLoader) { ((weka.core.converters.ArffLoader)source).reset(); } int currentSupportAsInstances = (currentSupport > 1) ? (int)currentSupport : (int)Math.ceil(currentSupport * m_numInstances); // build the FPTree if (arffLoader) { System.err.println("Building FP-tree..."); } FPTreeRoot tree = buildFPTree(singletons, source, currentSupportAsInstances); FrequentItemSets largeItemSets = new FrequentItemSets(m_numInstances); if (arffLoader) { System.err.println("Mining tree for min supp " + currentSupport); } // mine the tree FrequentBinaryItemSet conditionalItems = new FrequentBinaryItemSet(new ArrayList<BinaryItem>(), 0); mineTree(tree, largeItemSets, 0, conditionalItems, currentSupportAsInstances); m_largeItemSets = largeItemSets; if (arffLoader) { System.err.println("Number of large item sets: " + m_largeItemSets.size()); } // save memory tree = null; m_rules = generateRulesBruteForce(m_largeItemSets, m_metric, m_metricThreshold, upperBoundMinSuppAsInstances, lowerBoundMinSuppAsInstances, m_numInstances); if (arffLoader) { System.err.println("Number of rules found " + m_rules.size()); } if (rulesMustContain != null && rulesMustContain.size() > 0) { m_rules = pruneRules(m_rules, rulesMustContain, m_mustContainOR); } if (!m_findAllRulesForSupportLevel) { if (breakOnNext) { break; } currentSupport -= deltaAsFraction; // System.err.println("currentSupport " + currentSupport + " lowBoundAsFrac " + lowerBoundMinSuppAsFraction); if (currentSupport < lowerBoundMinSuppAsFraction) { if (currentSupport + deltaAsFraction > lowerBoundMinSuppAsFraction) { // ensure that the lower bound does get evaluated currentSupport = lowerBoundMinSuppAsFraction; breakOnNext = true; } else { break; } } } else { // just break out of the loop as we are just finding all rules // with a minimum support + metric break; } } while (m_rules.size() < m_numRulesToFind); Collections.sort(m_rules); } /** * Method that generates all large item sets with a minimum support, and from * these all association rules with a minimum metric (i.e. confidence, * lift etc.). * * @param data the instances to be used for generating the associations * @throws Exception if rules can't be built successfully */ public void buildAssociations(Instances data) throws Exception { buildAssociations((Object)data); return; } /** * Output the association rules. * * @return a string representation of the model. */ public String toString() { // return m_largeItemSets.toString(m_numItemSetsToFind); if (m_rules == null) { return "FPGrowth hasn't been trained yet!"; } StringBuffer result = new StringBuffer(); int numRules = (m_rules.size() < m_numRulesToFind) ? m_rules.size() : m_numRulesToFind; if (m_rules.size() == 0) { return "No rules found!"; } else { result.append("FPGrowth found " + m_rules.size() + " rules"); if (!m_findAllRulesForSupportLevel) { result.append(" (displaying top " + numRules + ")"); } if (m_transactionsMustContain.length() > 0 || m_rulesMustContain.length() > 0) { result.append("\n"); if (m_transactionsMustContain.length() > 0) { result.append("\nUsing only transactions that contain: " + m_transactionsMustContain); } if (m_rulesMustContain.length() > 0) { result.append("\nShowing only rules that contain: " + m_rulesMustContain); } } result.append("\n\n"); } int count = 0; for (AssociationRule r : m_rules) { result.append(Utils.doubleToString((double)count+1, (int)(Math.log(numRules)/Math.log(10)+1), 0) + ". "); result.append(r + "\n"); count++; if (!m_findAllRulesForSupportLevel && count == m_numRulesToFind) { break; } } return result.toString(); } /** * Assemble a dot graph representation of the FP-tree. * * @param tree the root of the FP-tree * @return a graph representation as a String in dot format. */ public String graph(FPTreeRoot tree) { //int maxID = tree.assignIDs(-1); StringBuffer text = new StringBuffer(); text.append("digraph FPTree {\n"); text.append("N0 [label=\"ROOT\"]\n"); tree.graphFPTree(text); // tree.graphHeaderTable(text, maxID+1); text.append("}\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method. * * @param args the commandline options */ public static void main(String[] args) { try { String[] argsCopy = args.clone(); if (Utils.getFlag('h', argsCopy) || Utils.getFlag("help", argsCopy)) { runAssociator(new FPGrowth(), args); System.out.println("-disk\n\tProcess data off of disk instead of loading\n\t" + "into main memory. This is a command line only option."); return; } if (!Utils.getFlag("disk", args)) { runAssociator(new FPGrowth(), args); } else { String filename; filename = Utils.getOption('t', args); weka.core.converters.ArffLoader loader = null; if (filename.length() != 0) { loader = new weka.core.converters.ArffLoader(); loader.setFile(new java.io.File(filename)); } else { throw new Exception("No training file specified!"); } FPGrowth fpGrowth = new FPGrowth(); fpGrowth.setOptions(args); Utils.checkForRemainingOptions(args); fpGrowth.buildAssociations(loader); System.out.print(fpGrowth.toString()); } } catch (Exception ex) { ex.printStackTrace(); } } }
79,406
31.161604
249
java
tsml-java
tsml-java-master/src/main/java/weka/associations/FilteredAssociationRules.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FilteredAssociationRules.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.util.List; import weka.filters.Filter; /** * Class encapsulating a list of association rules and the preprocessing filter * that was applied before they were generated. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ * */ public class FilteredAssociationRules extends AssociationRules { /** For serialization */ private static final long serialVersionUID = -4223408305476916955L; protected Filter m_filter; protected AssociationRules m_wrappedRules; /** * Constructs a new FilteredAssociationRules. * * @param producer a string describing the scheme that produced these rules. * @param filter the filter applied to preprocess the data used to learn the rules. * @param rules the wrapped AssociationRules object. */ public FilteredAssociationRules(String producer, Filter filter, AssociationRules rules) { super(null, producer); m_filter = filter; m_wrappedRules = rules; } /** * Constructs a new FilteredAssociationRules. * * @param producer the scheme that produced the rules * @param filter the filter applied to preprocess the data used to learn the rules. * @param rules the wrapped AssociationRules object. */ public FilteredAssociationRules(Object producer, Filter filter, AssociationRules rules) { super(null, producer); m_filter = filter; m_wrappedRules = rules; } /** * Constructs a new FilteredAssociationRules. * * @param filter the filter applied to preprocess the data used to learn the rules. * @param rules the wrapped AssociationRules object. */ public FilteredAssociationRules(Filter filter, AssociationRules rules) { super(null); m_filter = filter; m_wrappedRules = rules; } /** * Set the rules to use. Passes them to the wrapped AssociationRules object. * * @param rules the rules to use. */ public void setRules(List<AssociationRule> rules) { // delegate to our wrapped association rules m_wrappedRules.setRules(rules); } /** * Get the rules. * * @return the rules. */ public List<AssociationRule> getRules() { // delegate to our wrapped association rules return m_wrappedRules.getRules(); } /** * Get the number of rules. * * @return the number of rules. */ public int getNumRules() { // delegate to our wrapped association rules return m_wrappedRules.getNumRules(); } /** * Set the wrapped <code>AssociationRules</code> object to use. * * @param rules the <code>AssociationRules</code> object to wrap. */ public void setWrappedRules(AssociationRules rules) { m_wrappedRules = rules; } /** * Get the wrapped <code>AssociationRules</code> object. * * @return the wrapped <code>AssociationRules</code> object. */ public AssociationRules getWrappedRules() { return m_wrappedRules; } }
3,790
26.671533
91
java
tsml-java
tsml-java-master/src/main/java/weka/associations/FilteredAssociator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FilteredAssociator.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.MultiFilter; /** <!-- globalinfo-start --> * Class for running an arbitrary associator on data that has been passed through an arbitrary filter. Like the associator, the structure of the filter is based exclusively on the training data and test instances will be processed by the filter without changing their structure. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2" * (default: weka.filters.MultiFilter with * weka.filters.unsupervised.attribute.ReplaceMissingValues)</pre> * * <pre> -c &lt;the class index&gt; * The class index. * (default: -1, i.e. unset)</pre> * * <pre> -W * Full name of base associator. * (default: weka.associations.Apriori)</pre> * * <pre> * Options specific to associator weka.associations.Apriori: * </pre> * * <pre> -N &lt;required number of rules output&gt; * The required number of rules. (default = 10)</pre> * * <pre> -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric type by which to rank rules. (default = confidence)</pre> * * <pre> -C &lt;minimum metric score of a rule&gt; * The minimum confidence of a rule. (default = 0.9)</pre> * * <pre> -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05)</pre> * * <pre> -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0)</pre> * * <pre> -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1)</pre> * * <pre> -S &lt;significance level&gt; * If used, rules are tested for significance at * the given level. Slower. (default = no significance testing)</pre> * * <pre> -I * If set the itemsets found are also output. (default = no)</pre> * * <pre> -R * Remove columns that contain all missing values (default = no)</pre> * * <pre> -V * Report progress iteratively. (default = no)</pre> * * <pre> -A * If set class association rules are mined. (default = no)</pre> * * <pre> -c &lt;the class index&gt; * The class index. (default = last)</pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class FilteredAssociator extends SingleAssociatorEnhancer implements AssociationRulesProducer { /** for serialization */ static final long serialVersionUID = -4523450618538717400L; /** The filter */ protected Filter m_Filter; /** The instance structure of the filtered instances */ protected Instances m_FilteredInstances; /** The class index. */ protected int m_ClassIndex; /** * Default constructor. */ public FilteredAssociator() { m_Associator = new Apriori(); m_Filter = new MultiFilter(); ((MultiFilter) m_Filter).setFilters(new Filter[]{ new weka.filters.unsupervised.attribute.ReplaceMissingValues()}); m_ClassIndex = -1; } /** * Returns a string describing this Associator * * @return a description of the Associator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for running an arbitrary associator on data that has been passed " + "through an arbitrary filter. Like the associator, the structure of the filter " + "is based exclusively on the training data and test instances will be processed " + "by the filter without changing their structure."; } /** * String describing default associator. * * @return the default associator classname */ protected String defaultAssociatorString() { return Apriori.class.getName(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tFull class name of filter to use, followed\n" + "\tby filter options.\n" + "\teg: \"weka.filters.unsupervised.attribute.Remove -V -R 1,2\"\n" + "\t(default: weka.filters.MultiFilter with\n" + "\tweka.filters.unsupervised.attribute.ReplaceMissingValues)", "F", 1, "-F <filter specification>")); result.addElement(new Option( "\tThe class index.\n" + "\t(default: -1, i.e. unset)", "c", 1, "-c <the class index>")); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2" * (default: weka.filters.MultiFilter with * weka.filters.unsupervised.attribute.ReplaceMissingValues)</pre> * * <pre> -c &lt;the class index&gt; * The class index. * (default: -1, i.e. unset)</pre> * * <pre> -W * Full name of base associator. * (default: weka.associations.Apriori)</pre> * * <pre> * Options specific to associator weka.associations.Apriori: * </pre> * * <pre> -N &lt;required number of rules output&gt; * The required number of rules. (default = 10)</pre> * * <pre> -T &lt;0=confidence | 1=lift | 2=leverage | 3=Conviction&gt; * The metric type by which to rank rules. (default = confidence)</pre> * * <pre> -C &lt;minimum metric score of a rule&gt; * The minimum confidence of a rule. (default = 0.9)</pre> * * <pre> -D &lt;delta for minimum support&gt; * The delta by which the minimum support is decreased in * each iteration. (default = 0.05)</pre> * * <pre> -U &lt;upper bound for minimum support&gt; * Upper bound for minimum support. (default = 1.0)</pre> * * <pre> -M &lt;lower bound for minimum support&gt; * The lower bound for the minimum support. (default = 0.1)</pre> * * <pre> -S &lt;significance level&gt; * If used, rules are tested for significance at * the given level. Slower. (default = no significance testing)</pre> * * <pre> -I * If set the itemsets found are also output. (default = no)</pre> * * <pre> -R * Remove columns that contain all missing values (default = no)</pre> * * <pre> -V * Report progress iteratively. (default = no)</pre> * * <pre> -A * If set class association rules are mined. (default = no)</pre> * * <pre> -c &lt;the class index&gt; * The class index. (default = last)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('F', options); if (tmpStr.length() > 0) { String[] filterSpec = Utils.splitOptions(tmpStr); if (filterSpec.length == 0) throw new IllegalArgumentException("Invalid filter specification string"); String filterName = filterSpec[0]; filterSpec[0] = ""; setFilter((Filter) Utils.forName(Filter.class, filterName, filterSpec)); } else { setFilter(new weka.filters.supervised.attribute.Discretize()); } tmpStr = Utils.getOption('c', options); if (tmpStr.length() > 0) { if (tmpStr.equalsIgnoreCase("last")) { setClassIndex(0); } else if (tmpStr.equalsIgnoreCase("first")) { setClassIndex(1); } else { setClassIndex(Integer.parseInt(tmpStr)); } } else { setClassIndex(-1); } super.setOptions(options); } /** * Gets the current settings of the Associator. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; int i; String[] options; result = new Vector<String>(); result.add("-F"); result.add("" + getFilterSpec()); result.add("-c"); result.add("" + getClassIndex()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTipText() { return "The filter to be used."; } /** * Sets the filter * * @param value the filter with all options set. */ public void setFilter(Filter value) { m_Filter = value; } /** * Gets the filter used. * * @return the current filter */ public Filter getFilter() { return m_Filter; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classIndexTipText() { return "Index of the class attribute. If set to -1, the last attribute is taken as class attribute."; } /** * Sets the class index * * @param value the class index */ public void setClassIndex(int value){ m_ClassIndex = value; } /** * Gets the class index * * @return the index of the class attribute */ public int getClassIndex(){ return m_ClassIndex; } /** * Gets the filter specification string, which contains the class name of * the filter and any options to the filter * * @return the filter string. */ protected String getFilterSpec() { Filter c = getFilter(); if (c instanceof OptionHandler) return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); else return c.getClass().getName(); } /** * Returns default capabilities of the associator. * * @return the capabilities of this associator */ public Capabilities getCapabilities() { Capabilities result; if (getFilter() == null) { result = super.getCapabilities(); result.disableAll(); } else { result = getFilter().getCapabilities(); } result.enable(Capability.NO_CLASS); // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); return result; } /** * Build the associator on the filtered data. * * @param data the training data * @throws Exception if the Associator could not be built successfully */ public void buildAssociations(Instances data) throws Exception { if (m_Associator == null) throw new Exception("No base associator has been set!"); // create copy and set class-index data = new Instances(data); if (getClassIndex() == 0) { data.setClassIndex(data.numAttributes() - 1); } else { data.setClassIndex(getClassIndex() - 1); } if (getClassIndex() != -1) { // remove instances with missing class data.deleteWithMissingClass(); } m_Filter.setInputFormat(data); // filter capabilities are checked here data = Filter.useFilter(data, m_Filter); // can associator handle the data? getAssociator().getCapabilities().testWithFail(data); m_FilteredInstances = data.stringFreeStructure(); m_Associator.buildAssociations(data); } /** * Gets the list of mined association rules. * * @return the list of association rules discovered during mining. * Returns null if mining hasn't been performed yet. */ public AssociationRules getAssociationRules() { if (m_Associator instanceof AssociationRulesProducer) { AssociationRules rules = ((AssociationRulesProducer)m_Associator).getAssociationRules(); // construct a new FilteredAssociationRules FilteredAssociationRules fRules = new FilteredAssociationRules(FilteredAssociator.this, m_Filter, rules); return fRules; } // return null if we don't wrap an association rules producer return null; } /** * Gets a list of the names of the metrics output for * each rule. This list should be the same (in terms of * the names and order thereof) as that produced by * AssociationRule.getMetricNamesForRule(). * * @return an array of the names of the metrics available * for each rule learned by this producer. */ public String[] getRuleMetricNames() { if (m_Associator instanceof AssociationRulesProducer) { return ((AssociationRulesProducer)m_Associator).getRuleMetricNames(); } return new String[0]; } /** * Returns true if this AssociationRulesProducer can actually * produce rules. Most implementing classes will always return * true from this method (obviously :-)). However, an implementing * class that actually acts as a wrapper around things that may * or may not implement AssociationRulesProducer will want to * return false if the thing they wrap can't produce rules. * * @return true if this producer can produce rules in its current * configuration */ public boolean canProduceRules() { if (m_Associator instanceof AssociationRulesProducer) { return ((AssociationRulesProducer)m_Associator).canProduceRules(); } return false; } /** * Output a representation of this associator * * @return a representation of this associator */ public String toString() { String result; if (m_FilteredInstances == null) { result = "FilteredAssociator: No model built yet."; } else { result = "FilteredAssociator using " + getAssociatorSpec() + " on data filtered through " + getFilterSpec() + "\n\nFiltered Header\n" + m_FilteredInstances.toString() + "\n\nAssociator Model\n" + m_Associator.toString(); } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for running this class. * * @param args commandline arguments, use "-h" for full list */ public static void main(String[] args) { runAssociator(new FilteredAssociator(), args); } }
15,784
28.123616
278
java
tsml-java
tsml-java-master/src/main/java/weka/associations/GeneralizedSequentialPatterns.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * GeneralizedSequentialPatterns.java * Copyright (C) 2007 Sebastian Beer * */ package weka.associations; import weka.associations.gsp.Element; import weka.associations.gsp.Sequence; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * Class implementing a GSP algorithm for discovering sequential patterns in a sequential data set.<br/> * The attribute identifying the distinct data sequences contained in the set can be determined by the respective option. Furthermore, the set of output results can be restricted by specifying one or more attributes that have to be contained in each element/itemset of a sequence.<br/> * <br/> * For further information see:<br/> * <br/> * Ramakrishnan Srikant, Rakesh Agrawal (1996). Mining Sequential Patterns: Generalizations and Performance Improvements. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;proceedings{Srikant1996, * author = {Ramakrishnan Srikant and Rakesh Agrawal}, * booktitle = {Advances in Database Technology EDBT '96}, * publisher = {Springer}, * title = {Mining Sequential Patterns: Generalizations and Performance Improvements}, * year = {1996} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, algorithm is run in debug mode and * may output additional info to the console</pre> * * <pre> -S &lt;minimum support threshold&gt; * The miminum support threshold. * (default: 0.9)</pre> * * <pre> -I &lt;attribute number representing the data sequence ID * The attribute number representing the data sequence ID. * (default: 0)</pre> * * <pre> -F &lt;attribute numbers used for result filtering * The attribute numbers used for result filtering. * (default: -1)</pre> * <!-- options-end --> * * @author Sebastian Beer * @version $Revision: 5504 $ */ public class GeneralizedSequentialPatterns extends AbstractAssociator implements OptionHandler, TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -4119691320812254676L; /** the minimum support threshold */ protected double m_MinSupport; /** number indicating the attribute holding the data sequence ID */ protected int m_DataSeqID; /** original sequential data set to be used for sequential patterns extraction */ protected Instances m_OriginalDataSet; /** all generated frequent sequences, i.e. sequential patterns */ protected FastVector m_AllSequentialPatterns; /** number of cycles performed until termination */ protected int m_Cycles; /** String indicating the starting time of an cycle. */ protected String m_CycleStart; /** String indicating the ending time of an cycle. */ protected String m_CycleEnd; /** String indicating the starting time of the algorithm. */ protected String m_AlgorithmStart; /** String containing the attribute numbers that are used for result * filtering; -1 means no filtering */ protected String m_FilterAttributes; /** Vector containing the attribute numbers that are used for result * filtering; -1 means no filtering */ protected FastVector m_FilterAttrVector; /** Whether the classifier is run in debug mode. */ protected boolean m_Debug = false; /** * Constructor. */ public GeneralizedSequentialPatterns() { resetOptions(); } /** * Returns global information about the algorithm. * * @return the global information */ public String globalInfo() { return "Class implementing a GSP algorithm for discovering sequential " + "patterns in a sequential data set.\n" + "The attribute identifying the distinct data sequences contained in " + "the set can be determined by the respective option. Furthermore, the " + "set of output results can be restricted by specifying one or more " + "attributes that have to be contained in each element/itemset of a " + "sequence.\n\n" + "For further information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns TechnicalInformation about the paper related to the algorithm. * * @return the TechnicalInformation */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation paper = new TechnicalInformation(Type.PROCEEDINGS); paper.setValue(Field.AUTHOR, "Ramakrishnan Srikant and Rakesh Agrawal"); paper.setValue(Field.TITLE, "Mining Sequential Patterns: Generalizations and Performance Improvements"); paper.setValue(Field.BOOKTITLE, "Advances in Database Technology EDBT '96"); paper.setValue(Field.YEAR, "1996"); paper.setValue(Field.PUBLISHER, "Springer"); return paper; } /** * Returns an enumeration of the available options. * * @return the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tIf set, algorithm is run in debug mode and\n" + "\tmay output additional info to the console", "D", 0, "-D")); result.addElement(new Option( "\tThe miminum support threshold.\n" + "\t(default: 0.9)", "S", 1, "-S <minimum support threshold>")); result.addElement(new Option( "\tThe attribute number representing the data sequence ID.\n" + "\t(default: 0)", "I", 1, "-I <attribute number representing the data sequence ID")); result.addElement(new Option( "\tThe attribute numbers used for result filtering.\n" + "\t(default: -1)", "F", 1, "-F <attribute numbers used for result filtering")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, algorithm is run in debug mode and * may output additional info to the console</pre> * * <pre> -S &lt;minimum support threshold&gt; * The miminum support threshold. * (default: 0.9)</pre> * * <pre> -I &lt;attribute number representing the data sequence ID * The attribute number representing the data sequence ID. * (default: 0)</pre> * * <pre> -F &lt;attribute numbers used for result filtering * The attribute numbers used for result filtering. * (default: -1)</pre> * <!-- options-end --> * * @param options the Array containing the options */ public void setOptions(String[] options) throws Exception { String tmpStr; resetOptions(); setDebug(Utils.getFlag('D', options)); tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setMinSupport(Double.parseDouble(tmpStr)); tmpStr = Utils.getOption('I', options); if (tmpStr.length() != 0) setDataSeqID(Integer.parseInt(tmpStr)); tmpStr = Utils.getOption('F', options); if (tmpStr.length() != 0) setFilterAttributes(tmpStr); } /** * Returns an Array containing the current options settings. * * @return the Array containing the settings */ public String[] getOptions() { Vector<String> result; result = new Vector<String>(); if (getDebug()) result.add("-D"); result.add("-S"); result.add("" + getMinSupport()); result.add("-I"); result.add("" + getDataSeqID()); result.add("-F"); result.add(getFilterAttributes()); return result.toArray(new String[result.size()]); } /** * Resets the algorithm's options to the default values. */ protected void resetOptions() { m_MinSupport = 0.9; m_DataSeqID = 0; m_FilterAttributes = "-1"; } /** * Returns the Capabilities of the algorithm. * * @return the Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NO_CLASS); return result; } /** * Extracts all sequential patterns out of a given sequential data set and * prints out the results. * * @param data the original data set */ public void buildAssociations(Instances data) throws Exception { // can associator handle the data? getCapabilities().testWithFail(data); m_AllSequentialPatterns = new FastVector(); m_Cycles = 0; m_FilterAttrVector = new FastVector(); m_AlgorithmStart = getTimeAndDate(); m_OriginalDataSet = new Instances(data); extractFilterAttributes(m_FilterAttributes); findFrequentSequences(); } /** * Calculates the total number of extracted frequent sequences. * * @return the total number of frequent sequences */ protected int calcFreqSequencesTotal() { int total = 0; Enumeration allSeqPatternsEnum = m_AllSequentialPatterns.elements(); while (allSeqPatternsEnum.hasMoreElements()) { FastVector kSequences = (FastVector) allSeqPatternsEnum.nextElement(); total += kSequences.size(); } return total; } /** * Extracts the data sequences out of the original data set according to * their sequence id attribute, which is removed after extraction. * * @param originalDataSet the original data set * @param dataSeqID the squence ID to use * @return set of distinct data sequences */ protected FastVector extractDataSequences (Instances originalDataSet, int dataSeqID) { FastVector dataSequences = new FastVector(); int firstInstance = 0; int lastInstance = 0; Attribute seqIDAttribute = originalDataSet.attribute(dataSeqID); for (int i = 0; i < seqIDAttribute.numValues(); i++) { double sequenceID = originalDataSet.instance(firstInstance).value(dataSeqID); while (lastInstance < originalDataSet.numInstances() && sequenceID == originalDataSet.instance(lastInstance).value(dataSeqID)) { lastInstance++; } Instances dataSequence = new Instances(originalDataSet, firstInstance, (lastInstance)-firstInstance); dataSequence.deleteAttributeAt(dataSeqID); dataSequences.addElement(dataSequence); firstInstance = lastInstance; } return dataSequences; } /** * Parses a given String containing attribute numbers which are used for * result filtering. * * @param attrNumbers the String of attribute numbers */ public void extractFilterAttributes(String attrNumbers) { String numbers = attrNumbers.trim(); while (!numbers.equals("")) { int commaLoc = numbers.indexOf(','); if (commaLoc != -1) { String number = numbers.substring(0, commaLoc); numbers = numbers.substring(commaLoc + 1).trim(); m_FilterAttrVector.addElement(Integer.decode(number)); } else { m_FilterAttrVector.addElement(Integer.decode(numbers)); break; } } } /** * The actual method for extracting frequent sequences. * * @throws CloneNotSupportedException */ protected void findFrequentSequences() throws CloneNotSupportedException { m_CycleStart = getTimeAndDate(); Instances originalDataSet = m_OriginalDataSet; FastVector dataSequences = extractDataSequences(m_OriginalDataSet, m_DataSeqID); long minSupportCount = Math.round(m_MinSupport * dataSequences.size()); FastVector kMinusOneSequences; FastVector kSequences; originalDataSet.deleteAttributeAt(0); FastVector oneElements = Element.getOneElements(originalDataSet); m_Cycles = 1; kSequences = Sequence.oneElementsToSequences(oneElements); Sequence.updateSupportCount(kSequences, dataSequences); kSequences = Sequence.deleteInfrequentSequences(kSequences, minSupportCount); m_CycleEnd = getTimeAndDate(); if (kSequences.size() == 0) { return; } while (kSequences.size() > 0) { m_CycleStart = getTimeAndDate(); m_AllSequentialPatterns.addElement(kSequences.copy()); kMinusOneSequences = kSequences; kSequences = Sequence.aprioriGen(kMinusOneSequences); Sequence.updateSupportCount(kSequences, dataSequences); kSequences = Sequence.deleteInfrequentSequences(kSequences, minSupportCount); m_CycleEnd = getTimeAndDate(); if (getDebug()) System.out.println( "Cycle " + m_Cycles + " from " + m_CycleStart + " to " + m_CycleEnd); m_Cycles++; } } /** * Returns the dataSeqID option tip text for the Weka GUI. * * @return the option tip text */ public String dataSeqIDTipText() { return "The attribute number representing the data sequence ID."; } /** * Returns the attribute representing the data sequence ID. * * @return the data sequence ID */ public int getDataSeqID() { return m_DataSeqID; } /** * Sets the attribute representing the data sequence ID. * * @param value the data sequence ID to set */ public void setDataSeqID(int value) { m_DataSeqID = value; } /** * Returns the filterAttributes option tip text for the Weka GUI. * * @return the option tip text */ public String filterAttributesTipText() { return "The attribute numbers (eg \"0, 1\") used for result filtering; only " + "sequences containing the specified attributes in each of their " + "elements/itemsets will be output; -1 prints all."; } /** * Returns the String containing the attributes which are used for output * filtering. * * @return the String containing the attributes */ public String getFilterAttributes() { return m_FilterAttributes; } /** * Sets the String containing the attributes which are used for output * filtering. * * @param value the String containing the attributes */ public void setFilterAttributes(String value) { m_FilterAttributes = value; } /** * Returns the minimum support option tip text for the Weka GUI. * * @return the option tip text */ public String minSupportTipText() { return "Minimum support threshold."; } /** * Returns the minimum support threshold. * * @return the minimum support threshold */ public double getMinSupport() { return m_MinSupport; } /** * Sets the minimum support threshold. * * @param value the minimum support threshold */ public void setMinSupport(double value) { m_MinSupport = value; } /** * Set debugging mode. * * @param value true if debug output should be printed */ public void setDebug(boolean value) { m_Debug = value; } /** * Get whether debugging is turned on. * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "If set to true, algorithm may output additional info to the console."; } /** * Returns the current time and date. * * @return the time and date */ protected String getTimeAndDate() { SimpleDateFormat dateFormat; dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); return dateFormat.format(new Date()); } /** * Returns the time/date string the algorithm was started * * @return the time and date the algorithm was started */ public String getAlgorithmStart() { return m_AlgorithmStart; } /** * Returns the time/date string the cycle was started * * @return the time and date the cycle was started */ public String getCycleStart() { return m_CycleStart; } /** * Returns the time/date string the cycle ended * * @return the time and date the cycle ended */ public String getCycleEnd() { return m_CycleEnd; } /** * Returns a String containing the result information of the algorithm. * * @return the String containing the result information */ public String toString() { StringBuffer result = new StringBuffer(); result.append("GeneralizedSequentialPatterns\n"); result.append("=============================\n\n"); result.append("Number of cycles performed: " + (m_Cycles-1) + "\n"); result.append("Total number of frequent sequences: " + calcFreqSequencesTotal() + "\n\n"); result.append("Frequent Sequences Details (filtered):\n\n"); for (int i = 0; i < m_AllSequentialPatterns.size(); i++) { result.append("- " + (i+1) + "-sequences\n\n"); FastVector kSequences = (FastVector) m_AllSequentialPatterns.elementAt(i); result.append(Sequence.setOfSequencesToString(kSequences, m_OriginalDataSet, m_FilterAttrVector) + "\n"); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5504 $"); } /** * Main method. * * @param args commandline options, use -h for help */ public static void main(String[] args) { runAssociator(new GeneralizedSequentialPatterns(), args); } }
18,509
28.287975
285
java
tsml-java
tsml-java-master/src/main/java/weka/associations/Item.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Item.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import weka.core.Attribute; /** * Class that encapsulates information about an individual item. An item * is a value of a nominal attribute, so this class has a backing Attribute. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com). * @version $Revision: 8034 $ */ public abstract class Item implements Serializable, Comparable<Item> { /** For serialization */ private static final long serialVersionUID = -430198211081183575L; /** The frequency of this item */ protected int m_frequency; /** The attribute that backs this item */ protected Attribute m_attribute; public Item(Attribute att) { m_attribute = att; } /** * Increase the frequency of this item. * * @param f the amount to increase the frequency by. */ public void increaseFrequency(int f) { m_frequency += f; } /** * Decrease the frequency of this item. * * @param f the amount by which to decrease the frequency. */ public void decreaseFrequency(int f) { m_frequency -= f; } /** * Increment the frequency of this item. */ public void increaseFrequency() { m_frequency++; } /** * Decrement the frequency of this item. */ public void decreaseFrequency() { m_frequency--; } /** * Get the frequency of this item. * * @return the frequency. */ public int getFrequency() { return m_frequency; } /** * Get the attribute that this item originates from. * * @return the corresponding attribute. */ public Attribute getAttribute() { return m_attribute; } /** * Get this item's value as a String. * * @return this item's value as a String. */ public abstract String getItemValueAsString(); /** * Get this item's comparison operator as a String. * * @return this item's comparison operator as a String. */ public abstract String getComparisonAsString(); /** * A string representation of this item. (i.e. * <attribute name> <comparison operator> <item value>). * * @return a string representation of this item. */ public String toString() { return toString(false); } /** * A string representation of this item, (i.e. * <attribute name> <comparison operator> <item value>). * This default implementation just prints the attribute * name and (optionally) frequency information. * * @param freq true if the frequency should be included. * @return a string representation of this item. */ public String toString(boolean freq) { String result = m_attribute.name(); if (freq) { result += ":" + m_frequency; } return result; } /** * Ensures that items will be sorted in descending order of frequency. * Ties are ordered by attribute name. * * @param comp the Item to compare against. */ public int compareTo(Item comp) { if (m_frequency == comp.getFrequency()) { // sort by name return -1 * m_attribute.name().compareTo(comp.getAttribute().name()); } if (comp.getFrequency() < m_frequency) { return -1; } return 1; } /** * Equals. Just compares attribute. * @return true if this Item is equal to the argument. */ public boolean equals(Object compareTo) { if (!(compareTo instanceof Item)) { return false; } Item b = (Item)compareTo; if (m_attribute.equals(b.getAttribute())) { return true; } return false; } public int hashCode() { return (m_attribute.name().hashCode() ^ m_attribute.numValues()) * m_frequency; } }
4,456
24.039326
76
java
tsml-java
tsml-java-master/src/main/java/weka/associations/ItemSet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ItemSet.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import java.util.Enumeration; import java.util.Hashtable; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Class for storing a set of items. Item sets are stored in a lexicographic * order, which is determined by the header information of the set of instances * used for generating the set of items. All methods in this class assume that * item sets are stored in lexicographic order. The class provides the general * methods used for item sets in class - and standard association rule mining. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9722 $ */ public class ItemSet implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 2724000045282835791L; /** The items stored as an array of of ints. */ protected int[] m_items; /** Counter for how many transactions contain this item set. */ protected int m_counter; /** The total number of transactions */ protected int m_totalTransactions; /** * Constructor * * @param totalTrans the total number of transactions in the data */ public ItemSet(int totalTrans) { m_totalTransactions = totalTrans; } /** * Constructor * * @param totalTrans the total number of transactions in the data * @param array the attribute values encoded in an int array */ public ItemSet(int totalTrans, int[] array) { m_totalTransactions = totalTrans; m_items = array; m_counter = 1; } /** * Contsructor * * @param array the item set represented as an int array */ public ItemSet(int[] array) { m_items = array; m_counter = 0; } /** * Checks if an instance contains an item set. * * @param instance the instance to be tested * @return true if the given instance contains this item set */ public boolean containedByTreatZeroAsMissing(Instance instance) { if (instance instanceof weka.core.SparseInstance) { int numInstVals = instance.numValues(); int numItemSetVals = m_items.length; for (int p1 = 0, p2 = 0; p1 < numInstVals || p2 < numItemSetVals;) { int instIndex = Integer.MAX_VALUE; if (p1 < numInstVals) { instIndex = instance.index(p1); } int itemIndex = p2; if (m_items[itemIndex] > -1) { if (itemIndex != instIndex) { return false; } else { if (instance.isMissingSparse(p1)) { return false; } if (m_items[itemIndex] != (int) instance.valueSparse(p1)) { return false; } } p1++; p2++; } else { if (itemIndex < instIndex) { p2++; } else if (itemIndex == instIndex) { p2++; p1++; } } } } else { for (int i = 0; i < instance.numAttributes(); i++) if (m_items[i] > -1) { if (instance.isMissing(i) || (int) instance.value(i) == 0) return false; if (m_items[i] != (int) instance.value(i)) return false; } } return true; } /** * Checks if an instance contains an item set. * * @param instance the instance to be tested * @return true if the given instance contains this item set */ public boolean containedBy(Instance instance) { for (int i = 0; i < instance.numAttributes(); i++) if (m_items[i] > -1) { if (instance.isMissing(i)) return false; if (m_items[i] != (int) instance.value(i)) return false; } return true; } /** * Deletes all item sets that don't have minimum support. * * @return the reduced set of item sets * @param maxSupport the maximum support * @param itemSets the set of item sets to be pruned * @param minSupport the minimum number of transactions to be covered */ public static FastVector deleteItemSets(FastVector itemSets, int minSupport, int maxSupport) { FastVector newVector = new FastVector(itemSets.size()); for (int i = 0; i < itemSets.size(); i++) { ItemSet current = (ItemSet) itemSets.elementAt(i); if ((current.m_counter >= minSupport) && (current.m_counter <= maxSupport)) newVector.addElement(current); } return newVector; } /** * Tests if two item sets are equal. * * @param itemSet another item set * @return true if this item set contains the same items as the given one */ @Override public boolean equals(Object itemSet) { if ((itemSet == null) || !(itemSet.getClass().equals(this.getClass()))) { return false; } if (m_items.length != ((ItemSet) itemSet).m_items.length) return false; for (int i = 0; i < m_items.length; i++) if (m_items[i] != ((ItemSet) itemSet).m_items[i]) return false; return true; } /** * Return a hashtable filled with the given item sets. * * @param itemSets the set of item sets to be used for filling the hash table * @param initialSize the initial size of the hashtable * @return the generated hashtable */ public static Hashtable getHashtable(FastVector itemSets, int initialSize) { Hashtable hashtable = new Hashtable(initialSize); for (int i = 0; i < itemSets.size(); i++) { ItemSet current = (ItemSet) itemSets.elementAt(i); hashtable.put(current, new Integer(current.m_counter)); } return hashtable; } /** * Produces a hash code for a item set. * * @return a hash code for a set of items */ @Override public int hashCode() { long result = 0; for (int i = m_items.length - 1; i >= 0; i--) result += (i * m_items[i]); return (int) result; } /** * Merges all item sets in the set of (k-1)-item sets to create the (k)-item * sets and updates the counters. * * @return the generated (k)-item sets * @param totalTrans thetotal number of transactions * @param itemSets the set of (k-1)-item sets * @param size the value of (k-1) */ public static FastVector mergeAllItemSets(FastVector itemSets, int size, int totalTrans) { FastVector newVector = new FastVector(); ItemSet result; int numFound, k; for (int i = 0; i < itemSets.size(); i++) { ItemSet first = (ItemSet) itemSets.elementAt(i); out: for (int j = i + 1; j < itemSets.size(); j++) { ItemSet second = (ItemSet) itemSets.elementAt(j); result = new ItemSet(totalTrans); result.m_items = new int[first.m_items.length]; // Find and copy common prefix of size 'size' numFound = 0; k = 0; while (numFound < size) { if (first.m_items[k] == second.m_items[k]) { if (first.m_items[k] != -1) numFound++; result.m_items[k] = first.m_items[k]; } else break out; k++; } // Check difference while (k < first.m_items.length) { if ((first.m_items[k] != -1) && (second.m_items[k] != -1)) break; else { if (first.m_items[k] != -1) result.m_items[k] = first.m_items[k]; else result.m_items[k] = second.m_items[k]; } k++; } if (k == first.m_items.length) { result.m_counter = 0; newVector.addElement(result); } } } return newVector; } /** * Prunes a set of (k)-item sets using the given (k-1)-item sets. * * @param toPrune the set of (k)-item sets to be pruned * @param kMinusOne the (k-1)-item sets to be used for pruning * @return the pruned set of item sets */ public static FastVector pruneItemSets(FastVector toPrune, Hashtable kMinusOne) { FastVector newVector = new FastVector(toPrune.size()); int help, j; for (int i = 0; i < toPrune.size(); i++) { ItemSet current = (ItemSet) toPrune.elementAt(i); for (j = 0; j < current.m_items.length; j++) if (current.m_items[j] != -1) { help = current.m_items[j]; current.m_items[j] = -1; if (kMinusOne.get(current) == null) { current.m_items[j] = help; break; } else { current.m_items[j] = help; } } if (j == current.m_items.length) newVector.addElement(current); } return newVector; } /** * Prunes a set of rules. * * @param rules a two-dimensional array of lists of item sets. The first list * of item sets contains the premises, the second one the * consequences. * @param minConfidence the minimum confidence the rules have to have */ public static void pruneRules(FastVector[] rules, double minConfidence) { FastVector newPremises = new FastVector(rules[0].size()), newConsequences = new FastVector( rules[1].size()), newConf = new FastVector(rules[2].size()); FastVector newLift = null, newLev = null, newConv = null; if (rules.length > 3) { newLift = new FastVector(rules[3].size()); newLev = new FastVector(rules[4].size()); newConv = new FastVector(rules[5].size()); } for (int i = 0; i < rules[0].size(); i++) if (!(((Double) rules[2].elementAt(i)).doubleValue() < minConfidence)) { newPremises.addElement(rules[0].elementAt(i)); newConsequences.addElement(rules[1].elementAt(i)); newConf.addElement(rules[2].elementAt(i)); if (rules.length > 3) { newLift.addElement(rules[3].elementAt(i)); newLev.addElement(rules[4].elementAt(i)); newConv.addElement(rules[5].elementAt(i)); } } rules[0] = newPremises; rules[1] = newConsequences; rules[2] = newConf; if (rules.length > 3) { rules[3] = newLift; rules[4] = newLev; rules[5] = newConv; } } /** * Converts the header info of the given set of instances into a set of item * sets (singletons). The ordering of values in the header file determines the * lexicographic order. * * @param instances the set of instances whose header info is to be used * @return a set of item sets, each containing a single item * @exception Exception if singletons can't be generated successfully */ public static FastVector singletons(Instances instances) throws Exception { FastVector setOfItemSets = new FastVector(); ItemSet current; for (int i = 0; i < instances.numAttributes(); i++) { if (instances.attribute(i).isNumeric()) throw new Exception("Can't handle numeric attributes!"); for (int j = 0; j < instances.attribute(i).numValues(); j++) { current = new ItemSet(instances.numInstances()); current.m_items = new int[instances.numAttributes()]; for (int k = 0; k < instances.numAttributes(); k++) current.m_items[k] = -1; current.m_items[i] = j; setOfItemSets.addElement(current); } } return setOfItemSets; } /** * Outputs the support for an item set. * * @return the support */ public int support() { return m_counter; } /** * Returns the contents of an item set as a string. * * @param instances contains the relevant header information * @return string describing the item set */ public String toString(Instances instances) { StringBuffer text = new StringBuffer(); for (int i = 0; i < instances.numAttributes(); i++) if (m_items[i] != -1) { text.append(instances.attribute(i).name() + '='); text.append(instances.attribute(i).value(m_items[i]) + ' '); } text.append(m_counter); return text.toString(); } /** * Returns the contents of an item set as a delimited string. * * @param instances contains the relevant header information * @param outerDelim the outer delimiter * @param innerDelim the inner delimiter * @return string describing the item set */ public String toString(Instances instances, char outerDelim, char innerDelim) { StringBuffer text = new StringBuffer(); for (int i = 0; i < instances.numAttributes(); i++) if (m_items[i] != -1) { text.append(instances.attribute(i).name()).append('=') .append(instances.attribute(i).value(m_items[i])) .append(innerDelim); } int n = text.length(); if (n > 0) { text.setCharAt(n - 1, outerDelim); } else { if (outerDelim != ' ' || innerDelim != ' ') { text.append(outerDelim); } } text.append(m_counter); return text.toString(); } /** * Updates counter of item set with respect to given transaction. * * @param instance the instance to be used for ubdating the counter */ public void upDateCounter(Instance instance) { if (containedBy(instance)) m_counter++; } /** * Updates counter of item set with respect to given transaction. * * @param instance the instance to be used for ubdating the counter */ public void updateCounterTreatZeroAsMissing(Instance instance) { if (containedByTreatZeroAsMissing(instance)) { m_counter++; } } /** * Updates counters for a set of item sets and a set of instances. * * @param itemSets the set of item sets which are to be updated * @param instances the instances to be used for updating the counters */ public static void upDateCounters(FastVector itemSets, Instances instances) { for (int i = 0; i < instances.numInstances(); i++) { Enumeration enu = itemSets.elements(); while (enu.hasMoreElements()) ((ItemSet) enu.nextElement()).upDateCounter(instances.instance(i)); } } /** * Updates counters for a set of item sets and a set of instances. * * @param itemSets the set of item sets which are to be updated * @param instances the instances to be used for updating the counters */ public static void upDateCountersTreatZeroAsMissing(FastVector itemSets, Instances instances) { for (int i = 0; i < instances.numInstances(); i++) { Enumeration enu = itemSets.elements(); while (enu.hasMoreElements()) ((ItemSet) enu.nextElement()).updateCounterTreatZeroAsMissing(instances .instance(i)); } } /** * Gets the counter * * @return the counter */ public int counter() { return m_counter; } /** * Gest the item set as an int array * * @return int array represneting an item set */ public int[] items() { return m_items; } /** * Gest the index of the value of the specified attribute * * @param k the attribute index * @return the index of the attribute value */ public int itemAt(int k) { return m_items[k]; } /** * Sets the counter * * @param count the counter */ public void setCounter(int count) { m_counter = count; } /** * Sets an item sets * * @param items an int array representing an item set */ public void setItem(int[] items) { m_items = items; } /** * Sets the index of an attribute value * * @param value the inex of the attribute value * @param k the index of the attribute */ public void setItemAt(int value, int k) { m_items[k] = value; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9722 $"); } }
16,510
27.175768
95
java
tsml-java
tsml-java-master/src/main/java/weka/associations/LabeledItemSet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LabeledItemSet.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import java.util.Enumeration; import java.util.Hashtable; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Class for storing a set of items together with a class label. Item sets are * stored in a lexicographic order, which is determined by the header * information of the set of instances used for generating the set of items. All * methods in this class assume that item sets are stored in lexicographic * order. The class provides the methods used for item sets in class association * rule mining. Because every item set knows its class label the training set * can be splitted up virtually. * * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 9722 $ */ public class LabeledItemSet extends ItemSet implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4158771925518299903L; /** The class label. */ protected int m_classLabel; /** The support of the rule. */ protected int m_ruleSupCounter; /** * Constructor * * @param totalTrans the total number of transactions * @param classLabel the class lebel */ public LabeledItemSet(int totalTrans, int classLabel) { super(totalTrans); m_classLabel = classLabel; } /** * Deletes all item sets that don't have minimum support and have more than * maximum support * * @return the reduced set of item sets * @param maxSupport the maximum support * @param itemSets the set of item sets to be pruned * @param minSupport the minimum number of transactions to be covered */ public static FastVector deleteItemSets(FastVector itemSets, int minSupport, int maxSupport) { FastVector newVector = new FastVector(itemSets.size()); for (int i = 0; i < itemSets.size(); i++) { LabeledItemSet current = (LabeledItemSet) itemSets.elementAt(i); if ((current.m_ruleSupCounter >= minSupport) && (current.m_ruleSupCounter <= maxSupport)) newVector.addElement(current); } return newVector; } /** * Tests if two item sets are equal. * * @param itemSet another item set * @return true if this item set contains the same items as the given one */ @Override public final boolean equals(Object itemSet) { if (!(this.equalCondset(itemSet))) return false; if (m_classLabel != ((LabeledItemSet) itemSet).m_classLabel) return false; return true; } /** * Compares two item sets * * @param itemSet an item set * @return true if the the item sets are equal, false otherwise */ public final boolean equalCondset(Object itemSet) { if ((itemSet == null) || !(itemSet.getClass().equals(this.getClass()))) { return false; } if (m_items.length != ((ItemSet) itemSet).items().length) return false; for (int i = 0; i < m_items.length; i++) if (m_items[i] != ((ItemSet) itemSet).itemAt(i)) return false; return true; } /** * Return a hashtable filled with the given item sets. * * @param itemSets the set of item sets to be used for filling the hash table * @param initialSize the initial size of the hashtable * @return the generated hashtable */ public static Hashtable getHashtable(FastVector itemSets, int initialSize) { Hashtable hashtable = new Hashtable(initialSize); for (int i = 0; i < itemSets.size(); i++) { LabeledItemSet current = (LabeledItemSet) itemSets.elementAt(i); hashtable.put(current, new Integer(current.m_classLabel)); } return hashtable; } /** * Merges all item sets in the set of (k-1)-item sets to create the (k)-item * sets and updates the counters. * * @return the generated (k)-item sets * @param totalTrans the total number of transactions * @param itemSets the set of (k-1)-item sets * @param size the value of (k-1) */ public static FastVector mergeAllItemSets(FastVector itemSets, int size, int totalTrans) { FastVector newVector = new FastVector(); LabeledItemSet result; int numFound, k; for (int i = 0; i < itemSets.size(); i++) { LabeledItemSet first = (LabeledItemSet) itemSets.elementAt(i); out: for (int j = i + 1; j < itemSets.size(); j++) { LabeledItemSet second = (LabeledItemSet) itemSets.elementAt(j); while (first.m_classLabel != second.m_classLabel) { j++; if (j == itemSets.size()) break out; second = (LabeledItemSet) itemSets.elementAt(j); } result = new LabeledItemSet(totalTrans, first.m_classLabel); result.m_items = new int[first.m_items.length]; // Find and copy common prefix of size 'size' numFound = 0; k = 0; while (numFound < size) { if (first.m_items[k] == second.m_items[k]) { if (first.m_items[k] != -1) numFound++; result.m_items[k] = first.m_items[k]; } else break out; k++; } // Check difference while (k < first.m_items.length) { if ((first.m_items[k] != -1) && (second.m_items[k] != -1)) break; else { if (first.m_items[k] != -1) result.m_items[k] = first.m_items[k]; else result.m_items[k] = second.m_items[k]; } k++; } if (k == first.m_items.length) { result.m_ruleSupCounter = 0; result.m_counter = 0; newVector.addElement(result); } } } return newVector; } /** * Splits the class attribute away. Depending on the invert flag, the * instances without class attribute or only the class attribute of all * instances is returned * * @param instances the instances * @param invert flag; if true only the class attribute remains, otherweise * the class attribute is the only attribute that is deleted. * @throws Exception exception if instances cannot be splitted * @return Instances without the class attribute or instances with only the * class attribute */ public static Instances divide(Instances instances, boolean invert) throws Exception { Instances newInstances = new Instances(instances); if (instances.classIndex() < 0) throw new Exception( "For class association rule mining a class attribute has to be specified."); if (invert) { for (int i = 0; i < newInstances.numAttributes(); i++) { if (i != newInstances.classIndex()) { newInstances.deleteAttributeAt(i); i--; } } return newInstances; } else { newInstances.setClassIndex(-1); newInstances.deleteAttributeAt(instances.classIndex()); return newInstances; } } /** * Converts the header info of the given set of instances into a set of item * sets (singletons). The ordering of values in the header file determines the * lexicographic order. Each item set knows its class label. * * @return a set of item sets, each containing a single item * @param instancesNoClass instances without the class attribute * @param classes the values of the class attribute sorted according to * instances * @exception Exception if singletons can't be generated successfully */ public static FastVector singletons(Instances instancesNoClass, Instances classes) throws Exception { FastVector cSet, setOfItemSets = new FastVector(); LabeledItemSet current; // make singletons for (int i = 0; i < instancesNoClass.numAttributes(); i++) { if (instancesNoClass.attribute(i).isNumeric()) throw new Exception("Can't handle numeric attributes!"); for (int j = 0; j < instancesNoClass.attribute(i).numValues(); j++) { for (int k = 0; k < (classes.attribute(0)).numValues(); k++) { current = new LabeledItemSet(instancesNoClass.numInstances(), k); current.m_items = new int[instancesNoClass.numAttributes()]; for (int l = 0; l < instancesNoClass.numAttributes(); l++) current.m_items[l] = -1; current.m_items[i] = j; setOfItemSets.addElement(current); } } } return setOfItemSets; } /** * Prunes a set of (k)-item sets using the given (k-1)-item sets. * * @param toPrune the set of (k)-item sets to be pruned * @param kMinusOne the (k-1)-item sets to be used for pruning * @return the pruned set of item sets */ public static FastVector pruneItemSets(FastVector toPrune, Hashtable kMinusOne) { FastVector newVector = new FastVector(toPrune.size()); int help, j; for (int i = 0; i < toPrune.size(); i++) { LabeledItemSet current = (LabeledItemSet) toPrune.elementAt(i); for (j = 0; j < current.m_items.length; j++) { if (current.m_items[j] != -1) { help = current.m_items[j]; current.m_items[j] = -1; if (kMinusOne.get(current) != null && (current.m_classLabel == (((Integer) kMinusOne.get(current)) .intValue()))) current.m_items[j] = help; else { current.m_items[j] = help; break; } } } if (j == current.m_items.length) newVector.addElement(current); } return newVector; } /** * Outputs the support for an item set. * * @return the support */ @Override public final int support() { return m_ruleSupCounter; } /** * Updates counter of item set with respect to given transaction. * * @param instanceNoClass instances without the class attribute * @param instanceClass the values of the class attribute sorted according to * instances */ public final void upDateCounter(Instance instanceNoClass, Instance instanceClass) { if (containedBy(instanceNoClass)) { m_counter++; if (this.m_classLabel == instanceClass.value(0)) m_ruleSupCounter++; } } /** * Updates counter of item set with respect to given transaction. * * @param instanceNoClass instances without the class attribute * @param instanceClass the values of the class attribute sorted according to * instances */ public final void upDateCounterTreatZeroAsMissing(Instance instanceNoClass, Instance instanceClass) { if (containedByTreatZeroAsMissing(instanceNoClass)) { m_counter++; if (this.m_classLabel == instanceClass.value(0)) m_ruleSupCounter++; } } /** * Updates counter of a specific item set * * @param itemSets an item sets * @param instancesNoClass instances without the class attribute * @param instancesClass the values of the class attribute sorted according to * instances */ public static void upDateCounters(FastVector itemSets, Instances instancesNoClass, Instances instancesClass) { for (int i = 0; i < instancesNoClass.numInstances(); i++) { Enumeration enu = itemSets.elements(); while (enu.hasMoreElements()) ((LabeledItemSet) enu.nextElement()).upDateCounter( instancesNoClass.instance(i), instancesClass.instance(i)); } } /** * Updates counter of a specific item set * * @param itemSets an item sets * @param instancesNoClass instances without the class attribute * @param instancesClass the values of the class attribute sorted according to * instances */ public static void upDateCountersTreatZeroAsMissing(FastVector itemSets, Instances instancesNoClass, Instances instancesClass) { for (int i = 0; i < instancesNoClass.numInstances(); i++) { Enumeration enu = itemSets.elements(); while (enu.hasMoreElements()) ((LabeledItemSet) enu.nextElement()).upDateCounterTreatZeroAsMissing( instancesNoClass.instance(i), instancesClass.instance(i)); } } /** * Generates rules out of item sets * * @param minConfidence the minimum confidence * @param noPrune flag indicating whether the rules are pruned accoridng to * the minimum confidence value * @return a set of rules */ public final FastVector[] generateRules(double minConfidence, boolean noPrune) { FastVector premises = new FastVector(), consequences = new FastVector(), conf = new FastVector(); FastVector[] rules = new FastVector[3]; ItemSet premise, consequence; // Generate all rules with class in the consequence. premise = new ItemSet(m_totalTransactions); consequence = new ItemSet(m_totalTransactions); int[] premiseItems = new int[m_items.length]; int[] consequenceItems = new int[1]; System.arraycopy(m_items, 0, premiseItems, 0, m_items.length); consequence.setItem(consequenceItems); premise.setItem(premiseItems); consequence.setItemAt(m_classLabel, 0); consequence.setCounter(this.m_ruleSupCounter); premise.setCounter(this.m_counter); premises.addElement(premise); consequences.addElement(consequence); conf.addElement(new Double((double) this.m_ruleSupCounter / (double) this.m_counter)); rules[0] = premises; rules[1] = consequences; rules[2] = conf; if (!noPrune) pruneRules(rules, minConfidence); return rules; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9722 $"); } }
14,514
31.399554
101
java
tsml-java
tsml-java-master/src/main/java/weka/associations/NominalItem.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NominalItem.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import weka.core.Attribute; /** * Class that encapsulates a nominal item. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public class NominalItem extends Item implements Serializable { /** For serialization */ private static final long serialVersionUID = 2182122099990462066L; /** The index of the value considered to be positive */ protected int m_valueIndex; /** * Constructs a new NominalItem. * * @param att the attribute that backs the item. * @param valueIndex the index of the value for this item. * @throws Exception if the NominalItem can't be constructed. */ public NominalItem(Attribute att, int valueIndex) throws Exception { super(att); if (att.isNumeric()) { throw new Exception("NominalItem must be constructed using a nominal attribute"); } m_attribute = att; if (m_attribute.numValues() == 1) { m_valueIndex = 0; // unary attribute (? used to indicate absence from a basket) } else { m_valueIndex = valueIndex; } } /** * Get the value index for this item. * * @return the value index. */ public int getValueIndex() { return m_valueIndex; } /** * Get this item's value as a String. * * @return this item's value as a String. */ public String getItemValueAsString() { return m_attribute.value(m_valueIndex); } /** * Get this item's comparison operator as a String. * * @return this item's comparison operator as a String. */ public String getComparisonAsString() { return "="; } /** * A string representation of this item, (i.e. * <attribute name> <comparison operator> <item value>). * This default implementation just prints the attribute * name and (optionally) frequency information. * * @param freq true if the frequency should be included. * @return a string representation of this item. */ public String toString(boolean freq) { String result = m_attribute.name() + "=" + m_attribute.value(m_valueIndex); if (freq) { result += ":" + m_frequency; } return result; } /** * Equals. Just compares attribute and valueIndex. * @return true if this NominalItem is equal to the argument. */ public boolean equals(Object compareTo) { if (!(compareTo instanceof NominalItem)) { return false; } NominalItem b = (NominalItem)compareTo; if (m_attribute.equals(b.getAttribute()) && // m_frequency == b.getFrequency() && m_valueIndex == b.getValueIndex()) { return true; } return false; } }
3,499
26.34375
87
java
tsml-java
tsml-java-master/src/main/java/weka/associations/NumericItem.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NumericItem.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import java.io.Serializable; import weka.core.Attribute; import weka.core.Utils; /** * Class that encapsulates a numeric item. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public class NumericItem extends Item implements Serializable { /** For serialization */ private static final long serialVersionUID = -7869433770765864800L; public static enum Comparison { NONE, EQUAL, LESS_THAN_OR_EQUAL_TO, GREATER_THAN; } /** The numeric test */ protected double m_splitPoint; /** The comparison operator */ protected Comparison m_comparison; /** * Constructs a new <code>NumericItem</code> * * @param att the attribute that backs the item. * @param splitPoint the numeric test value. * @param comp the comparison operator. * @throws Exception if the NumericItem can't be constructed. */ public NumericItem(Attribute att, double splitPoint, Comparison comp) throws Exception { super(att); if (!att.isNumeric()) { throw new Exception("NumericItem must be constructed using a numeric attribute"); } m_comparison = comp; m_splitPoint = splitPoint; } /** * Gets the numeric test. * * @return the numeric test value for this item. */ public double getSplitPoint() { return m_splitPoint; } /** * Gets the comparison operator for this item. * * @return the comparison operator for this item. */ public Comparison getComparison() { return m_comparison; } /** * Get this item's value as a String. * * @return this item's value as a String. */ public String getItemValueAsString() { return Utils.doubleToString(m_splitPoint, 3); } /** * Get this item's comparison operator as a String. * * @return this item's comparison operator as a String. */ public String getComparisonAsString() { String result = null; switch (m_comparison) { case EQUAL: result = "="; break; case LESS_THAN_OR_EQUAL_TO: result = "<="; break; case GREATER_THAN: result = ">"; break; } return result; } /** * A string representation of this item, (i.e. * <attribute name> <comparison operator> <item value>). * This default implementation just prints the attribute * name and (optionally) frequency information. * * @param freq true if the frequency should be included. * @return a string representation of this item. */ public String toString(boolean freq) { StringBuffer result = new StringBuffer(); result.append(m_attribute.name() + " "); switch (m_comparison) { case EQUAL: result.append("="); break; case LESS_THAN_OR_EQUAL_TO: result.append("<="); break; case GREATER_THAN: result.append(">"); break; } result.append(" " + Utils.doubleToString(m_splitPoint, 4)); if (freq) { result.append(":" + m_frequency); } return result.toString(); } /** * Equals. Compares the attribute, numeric test and comparison * operator * * @return true if this NumericItem is equal to the argument. */ public boolean equals(Object compareTo) { if (!(compareTo instanceof NumericItem)) { return false; } NumericItem b = (NumericItem)compareTo; if (m_attribute.equals(b.getAttribute()) && m_comparison == b.getComparison() && (new Double(m_splitPoint).equals(new Double(b.getSplitPoint())))) { return true; } return false; } }
4,429
24.170455
87
java
tsml-java
tsml-java-master/src/main/java/weka/associations/PredictiveApriori.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * PredictiveApriori.java * Copyright (C) 2004 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Hashtable; import java.util.TreeSet; import java.util.Vector; /** <!-- globalinfo-start --> * Class implementing the predictive apriori algorithm to mine association rules.<br/> * It searches with an increasing support threshold for the best 'n' rules concerning a support-based corrected confidence value.<br/> * <br/> * For more information see:<br/> * <br/> * Tobias Scheffer: Finding Association Rules That Trade Support Optimally against Confidence. In: 5th European Conference on Principles of Data Mining and Knowledge Discovery, 424-435, 2001.<br/> * <br/> * The implementation follows the paper expect for adding a rule to the output of the 'n' best rules. A rule is added if:<br/> * the expected predictive accuracy of this rule is among the 'n' best and it is not subsumed by a rule with at least the same expected predictive accuracy (out of an unpublished manuscript from T. Scheffer). * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Scheffer2001, * author = {Tobias Scheffer}, * booktitle = {5th European Conference on Principles of Data Mining and Knowledge Discovery}, * pages = {424-435}, * publisher = {Springer}, * title = {Finding Association Rules That Trade Support Optimally against Confidence}, * year = {2001} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;required number of rules output&gt; * The required number of rules. (default = 100)</pre> * * <pre> -A * If set class association rules are mined. (default = no)</pre> * * <pre> -c &lt;the class index&gt; * The class index. (default = last)</pre> * <!-- options-end --> * * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 6365 $ */ public class PredictiveApriori extends AbstractAssociator implements OptionHandler, CARuleMiner, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 8109088846865075341L; /** The minimum support. */ protected int m_premiseCount; /** The maximum number of rules that are output. */ protected int m_numRules; /** The number of rules created for the prior estimation. */ protected static final int m_numRandRules = 1000; /** The number of intervals used for the prior estimation. */ protected static final int m_numIntervals = 100; /** The set of all sets of itemsets. */ protected FastVector m_Ls; /** The same information stored in hash tables. */ protected FastVector m_hashtables; /** The list of all generated rules. */ protected FastVector[] m_allTheRules; /** The instances (transactions) to be used for generating the association rules. */ protected Instances m_instances; /** The hashtable containing the prior probabilities. */ protected Hashtable m_priors; /** The mid points of the intervals used for the prior estimation. */ protected double[] m_midPoints; /** The expected predictive accuracy a rule needs to be a candidate for the output. */ protected double m_expectation; /** The n best rules. */ protected TreeSet m_best; /** Flag keeping track if the list of the n best rules has changed. */ protected boolean m_bestChanged; /** Counter for the time of generation for an association rule. */ protected int m_count; /** The prior estimator. */ protected PriorEstimation m_priorEstimator; /** The class index. */ protected int m_classIndex; /** Flag indicating whether class association rules are mined. */ protected boolean m_car; /** * Returns a string describing this associator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class implementing the predictive apriori algorithm to mine " + "association rules.\n" + "It searches with an increasing support threshold for the best 'n' " + "rules concerning a support-based corrected confidence value.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString() + "\n\n" + "The implementation follows the paper expect for adding a rule to the " + "output of the 'n' best rules. A rule is added if:\n" + "the expected predictive accuracy of this rule is among the 'n' best " + "and it is not subsumed by a rule with at least the same expected " + "predictive accuracy (out of an unpublished manuscript from T. " + "Scheffer)."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Tobias Scheffer"); result.setValue(Field.TITLE, "Finding Association Rules That Trade Support Optimally against Confidence"); result.setValue(Field.BOOKTITLE, "5th European Conference on Principles of Data Mining and Knowledge Discovery"); result.setValue(Field.YEAR, "2001"); result.setValue(Field.PAGES, "424-435"); result.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Constructor that allows to sets default values for the * minimum confidence and the maximum number of rules * the minimum confidence. */ public PredictiveApriori() { resetOptions(); } /** * Resets the options to the default values. */ public void resetOptions() { m_numRules = 105; m_premiseCount = 1; m_best = new TreeSet(); m_bestChanged = false; m_expectation = 0; m_count = 1; m_car = false; m_classIndex = -1; m_priors = new Hashtable(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NO_CLASS); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method that generates all large itemsets with a minimum support, and from * these all association rules. * * @param instances the instances to be used for generating the associations * @throws Exception if rules can't be built successfully */ public void buildAssociations(Instances instances) throws Exception { int temp = m_premiseCount, exactNumber = m_numRules-5; m_premiseCount = 1; m_best = new TreeSet(); m_bestChanged = false; m_expectation = 0; m_count = 1; m_instances = new Instances(instances); if (m_classIndex == -1) m_instances.setClassIndex(m_instances.numAttributes()-1); else if (m_classIndex < m_instances.numAttributes() && m_classIndex >= 0) m_instances.setClassIndex(m_classIndex); else throw new Exception("Invalid class index."); // can associator handle the data? getCapabilities().testWithFail(m_instances); //prior estimation m_priorEstimator = new PriorEstimation(m_instances,m_numRandRules,m_numIntervals,m_car); m_priors = m_priorEstimator.estimatePrior(); m_midPoints = m_priorEstimator.getMidPoints(); m_Ls = new FastVector(); m_hashtables = new FastVector(); for(int i =1; i < m_instances.numAttributes();i++){ m_bestChanged = false; if(!m_car){ // find large item sets findLargeItemSets(i); //find association rules (rule generation procedure) findRulesQuickly(); } else{ findLargeCarItemSets(i); findCaRulesQuickly(); } if(m_bestChanged){ temp =m_premiseCount; while(RuleGeneration.expectation(m_premiseCount, m_premiseCount,m_midPoints,m_priors) <= m_expectation){ m_premiseCount++; if(m_premiseCount > m_instances.numInstances()) break; } } if(m_premiseCount > m_instances.numInstances()){ // Reserve space for variables m_allTheRules = new FastVector[3]; m_allTheRules[0] = new FastVector(); m_allTheRules[1] = new FastVector(); m_allTheRules[2] = new FastVector(); int k = 0; while(m_best.size()>0 && exactNumber > 0){ m_allTheRules[0].insertElementAt((ItemSet)((RuleItem)m_best.last()).premise(),k); m_allTheRules[1].insertElementAt((ItemSet)((RuleItem)m_best.last()).consequence(),k); m_allTheRules[2].insertElementAt(new Double(((RuleItem)m_best.last()).accuracy()),k); m_best.remove(m_best.last()); k++; exactNumber--; } return; } if(temp != m_premiseCount && m_Ls.size() > 0){ FastVector kSets = (FastVector)m_Ls.lastElement(); m_Ls.removeElementAt(m_Ls.size()-1); kSets = ItemSet.deleteItemSets(kSets, m_premiseCount,Integer.MAX_VALUE); m_Ls.addElement(kSets); } } // Reserve space for variables m_allTheRules = new FastVector[3]; m_allTheRules[0] = new FastVector(); m_allTheRules[1] = new FastVector(); m_allTheRules[2] = new FastVector(); int k = 0; while(m_best.size()>0 && exactNumber > 0){ m_allTheRules[0].insertElementAt((ItemSet)((RuleItem)m_best.last()).premise(),k); m_allTheRules[1].insertElementAt((ItemSet)((RuleItem)m_best.last()).consequence(),k); m_allTheRules[2].insertElementAt(new Double(((RuleItem)m_best.last()).accuracy()),k); m_best.remove(m_best.last()); k++; exactNumber--; } } /** * Method that mines the n best class association rules. * @return an sorted array of FastVector (depending on the expected predictive accuracy) containing the rules and metric information * @param data the instances for which class association rules should be mined * @throws Exception if rules can't be built successfully */ public FastVector[] mineCARs(Instances data) throws Exception{ m_car = true; m_best = new TreeSet(); m_premiseCount = 1; m_bestChanged = false; m_expectation = 0; m_count = 1; buildAssociations(data); FastVector[] allCARRules = new FastVector[3]; allCARRules[0] = new FastVector(); allCARRules[1] = new FastVector(); allCARRules[2] = new FastVector(); for(int k =0; k < m_allTheRules[0].size();k++){ int[] newPremiseArray = new int[m_instances.numAttributes()-1]; int help = 0; for(int j = 0;j < m_instances.numAttributes();j++){ if(j != m_instances.classIndex()){ newPremiseArray[help] = ((ItemSet)m_allTheRules[0].elementAt(k)).itemAt(j); help++; } } ItemSet newPremise = new ItemSet(m_instances.numInstances(), newPremiseArray); newPremise.setCounter (((ItemSet)m_allTheRules[0].elementAt(k)).counter()); allCARRules[0].addElement(newPremise); int[] newConsArray = new int[1]; newConsArray[0] =((ItemSet)m_allTheRules[1].elementAt(k)).itemAt(m_instances.classIndex()); ItemSet newCons = new ItemSet(m_instances.numInstances(), newConsArray); newCons.setCounter(((ItemSet)m_allTheRules[1].elementAt(k)).counter()); allCARRules[1].addElement(newCons); allCARRules[2].addElement(m_allTheRules[2].elementAt(k)); } return allCARRules; } /** * Gets the instances without the class attribute * @return instances without class attribute */ public Instances getInstancesNoClass() { Instances noClass = null; try{ noClass = LabeledItemSet.divide(m_instances,false); } catch(Exception e){ e.printStackTrace(); System.out.println("\n"+e.getMessage()); } //System.out.println(noClass); return noClass; } /** * Gets the class attribute of all instances * @return Instances containing only the class attribute */ public Instances getInstancesOnlyClass() { Instances onlyClass = null; try{ onlyClass = LabeledItemSet.divide(m_instances,true); } catch(Exception e){ e.printStackTrace(); System.out.println("\n"+e.getMessage()); } return onlyClass; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { String string1 = "\tThe required number of rules. (default = " + (m_numRules-5) + ")", string2 = "\tIf set class association rules are mined. (default = no)", string3 = "\tThe class index. (default = last)"; FastVector newVector = new FastVector(3); newVector.addElement(new Option(string1, "N", 1, "-N <required number of rules output>")); newVector.addElement(new Option(string2, "A", 0, "-A")); newVector.addElement(new Option(string3, "c", 1, "-c <the class index>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;required number of rules output&gt; * The required number of rules. (default = 100)</pre> * * <pre> -A * If set class association rules are mined. (default = no)</pre> * * <pre> -c &lt;the class index&gt; * The class index. (default = last)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { resetOptions(); String numRulesString = Utils.getOption('N', options); if (numRulesString.length() != 0) m_numRules = Integer.parseInt(numRulesString)+5; else m_numRules = Integer.MAX_VALUE; String classIndexString = Utils.getOption('c',options); if (classIndexString.length() != 0) m_classIndex = Integer.parseInt(classIndexString); m_car = Utils.getFlag('A', options); } /** * Gets the current settings of the PredictiveApriori object. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector result; result = new Vector(); result.add("-N"); result.add("" + (m_numRules-5)); if (m_car) result.add("-A"); result.add("-c"); result.add("" + m_classIndex); return (String[]) result.toArray(new String[result.size()]); } /** * Outputs the association rules. * * @return a string representation of the model */ public String toString() { StringBuffer text = new StringBuffer(); if (m_allTheRules[0].size() == 0) return "\nNo large itemsets and rules found!\n"; text.append("\nPredictiveApriori\n===================\n\n"); text.append("\nBest rules found:\n\n"); for (int i = 0; i < m_allTheRules[0].size(); i++) { text.append(Utils.doubleToString((double)i+1, (int)(Math.log(m_numRules)/Math.log(10)+1),0)+ ". " + ((ItemSet)m_allTheRules[0].elementAt(i)). toString(m_instances) + " ==> " + ((ItemSet)m_allTheRules[1].elementAt(i)). toString(m_instances) +" acc:("+ Utils.doubleToString(((Double)m_allTheRules[2]. elementAt(i)).doubleValue(),5)+")"); text.append('\n'); } return text.toString(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numRulesTipText() { return "Number of rules to find."; } /** * Get the value of the number of required rules. * * @return Value of the number of required rules. */ public int getNumRules() { return m_numRules-5; } /** * Set the value of required rules. * * @param v Value to assign to number of required rules. */ public void setNumRules(int v) { m_numRules = v+5; } /** * Sets the class index * @param index the index of the class attribute */ public void setClassIndex(int index){ m_classIndex = index; } /** * Gets the index of the class attribute * @return the index of the class attribute */ public int getClassIndex(){ return m_classIndex; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classIndexTipText() { return "Index of the class attribute.\n If set to -1, the last attribute will be taken as the class attribute."; } /** * Sets class association rule mining * @param flag if class association rules are mined, false otherwise */ public void setCar(boolean flag){ m_car = flag; } /** * Gets whether class association ruels are mined * @return true if class association rules are mined, false otherwise */ public boolean getCar(){ return m_car; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String carTipText() { return "If enabled class association rules are mined instead of (general) association rules."; } /** * Returns the metric string for the chosen metric type. * Predictive apriori uses the estimated predictive accuracy. * Therefore the metric string is "acc". * @return string "acc" */ public String metricString() { return "acc"; } /** * Method that finds all large itemsets for the given set of instances. * * @param index the instances to be used * @throws Exception if an attribute is numeric */ private void findLargeItemSets(int index) throws Exception { FastVector kMinusOneSets, kSets = new FastVector(); Hashtable hashtable; int i = 0; // Find large itemsets //of length 1 if(index == 1){ kSets = ItemSet.singletons(m_instances); ItemSet.upDateCounters(kSets, m_instances); kSets = ItemSet.deleteItemSets(kSets, m_premiseCount,Integer.MAX_VALUE); if (kSets.size() == 0) return; m_Ls.addElement(kSets); } //of length > 1 if(index >1){ if(m_Ls.size() > 0) kSets = (FastVector)m_Ls.lastElement(); m_Ls.removeAllElements(); i = index-2; kMinusOneSets = kSets; kSets = ItemSet.mergeAllItemSets(kMinusOneSets, i, m_instances.numInstances()); hashtable = ItemSet.getHashtable(kMinusOneSets, kMinusOneSets.size()); m_hashtables.addElement(hashtable); kSets = ItemSet.pruneItemSets(kSets, hashtable); ItemSet.upDateCounters(kSets, m_instances); kSets = ItemSet.deleteItemSets(kSets, m_premiseCount,Integer.MAX_VALUE); if(kSets.size() == 0) return; m_Ls.addElement(kSets); } } /** * Method that finds all association rules. * * @throws Exception if an attribute is numeric */ private void findRulesQuickly() throws Exception { RuleGeneration currentItemSet; // Build rules for (int j = 0; j < m_Ls.size(); j++) { FastVector currentItemSets = (FastVector)m_Ls.elementAt(j); Enumeration enumItemSets = currentItemSets.elements(); while (enumItemSets.hasMoreElements()) { currentItemSet = new RuleGeneration((ItemSet)enumItemSets.nextElement()); m_best = currentItemSet.generateRules(m_numRules-5, m_midPoints,m_priors,m_expectation, m_instances,m_best,m_count); m_count = currentItemSet.m_count; if(!m_bestChanged && currentItemSet.m_change) m_bestChanged = true; //update minimum expected predictive accuracy to get into the n best if(m_best.size() >= m_numRules-5) m_expectation = ((RuleItem)m_best.first()).accuracy(); else m_expectation =0; } } } /** * Method that finds all large itemsets for class association rule mining for the given set of instances. * @param index the size of the large item sets * @throws Exception if an attribute is numeric */ private void findLargeCarItemSets(int index) throws Exception { FastVector kMinusOneSets, kSets = new FastVector(); Hashtable hashtable; int i = 0; // Find large itemsets if(index == 1){ kSets = CaRuleGeneration.singletons(m_instances); ItemSet.upDateCounters(kSets, m_instances); kSets = ItemSet.deleteItemSets(kSets, m_premiseCount,Integer.MAX_VALUE); if (kSets.size() == 0) return; m_Ls.addElement(kSets); } if(index >1){ if(m_Ls.size() > 0) kSets = (FastVector)m_Ls.lastElement(); m_Ls.removeAllElements(); i = index-2; kMinusOneSets = kSets; kSets = ItemSet.mergeAllItemSets(kMinusOneSets, i, m_instances.numInstances()); hashtable = ItemSet.getHashtable(kMinusOneSets, kMinusOneSets.size()); m_hashtables.addElement(hashtable); kSets = ItemSet.pruneItemSets(kSets, hashtable); ItemSet.upDateCounters(kSets, m_instances); kSets = ItemSet.deleteItemSets(kSets, m_premiseCount,Integer.MAX_VALUE); if(kSets.size() == 0) return; m_Ls.addElement(kSets); } } /** * Method that finds all class association rules. * * @throws Exception if an attribute is numeric */ private void findCaRulesQuickly() throws Exception { CaRuleGeneration currentLItemSet; // Build rules for (int j = 0; j < m_Ls.size(); j++) { FastVector currentItemSets = (FastVector)m_Ls.elementAt(j); Enumeration enumItemSets = currentItemSets.elements(); while (enumItemSets.hasMoreElements()) { currentLItemSet = new CaRuleGeneration((ItemSet)enumItemSets.nextElement()); m_best = currentLItemSet.generateRules(m_numRules-5, m_midPoints,m_priors,m_expectation, m_instances,m_best,m_count); m_count = currentLItemSet.count(); if(!m_bestChanged && currentLItemSet.change()) m_bestChanged = true; if(m_best.size() == m_numRules-5) m_expectation = ((RuleItem)m_best.first()).accuracy(); else m_expectation = 0; } } } /** * returns all the rules * * @return all the rules * @see #m_allTheRules */ public FastVector[] getAllTheRules() { return m_allTheRules; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6365 $"); } /** * Main method. * * @param args the commandline parameters */ public static void main(String[] args) { runAssociator(new PredictiveApriori(), args); } }
25,198
30.617315
208
java
tsml-java
tsml-java-master/src/main/java/weka/associations/PriorEstimation.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * PriorEstimation.java * Copyright (C) 2004 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SpecialFunctions; import weka.core.Utils; import java.io.Serializable; import java.util.Hashtable; import java.util.Random; /** * Class implementing the prior estimattion of the predictive apriori algorithm * for mining association rules. * * Reference: T. Scheffer (2001). <i>Finding Association Rules That Trade Support * Optimally against Confidence</i>. Proc of the 5th European Conf. * on Principles and Practice of Knowledge Discovery in Databases (PKDD'01), * pp. 424-435. Freiburg, Germany: Springer-Verlag. <p> * * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 1.7 $ */ public class PriorEstimation implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 5570863216522496271L; /** The number of rnadom rules. */ protected int m_numRandRules; /** The number of intervals. */ protected int m_numIntervals; /** The random seed used for the random rule generation step. */ protected static final int SEED = 0; /** The maximum number of attributes for which a prior can be estimated. */ protected static final int MAX_N = 1024; /** The random number generator. */ protected Random m_randNum; /** The instances for which association rules are mined. */ protected Instances m_instances; /** Flag indicating whether standard association rules or class association rules are mined. */ protected boolean m_CARs; /** Hashtable to store the confidence values of randomly generated rules. */ protected Hashtable m_distribution; /** Hashtable containing the estimated prior probabilities. */ protected Hashtable m_priors; /** Sums up the confidences of all rules with a certain length. */ protected double m_sum; /** The mid points of the discrete intervals in which the interval [0,1] is divided. */ protected double[] m_midPoints; /** * Constructor * * @param instances the instances to be used for generating the associations * @param numRules the number of random rules used for generating the prior * @param numIntervals the number of intervals to discretise [0,1] * @param car flag indicating whether standard or class association rules are mined */ public PriorEstimation(Instances instances,int numRules,int numIntervals,boolean car) { m_instances = instances; m_CARs = car; m_numRandRules = numRules; m_numIntervals = numIntervals; m_randNum = m_instances.getRandomNumberGenerator(SEED); } /** * Calculates the prior distribution. * * @exception Exception if prior can't be estimated successfully */ public final void generateDistribution() throws Exception{ boolean jump; int i,maxLength = m_instances.numAttributes(), count =0,count1=0, ruleCounter; int [] itemArray; m_distribution = new Hashtable(maxLength*m_numIntervals); RuleItem current; ItemSet generate; if(m_instances.numAttributes() == 0) throw new Exception("Dataset has no attributes!"); if(m_instances.numAttributes() >= MAX_N) throw new Exception("Dataset has to many attributes for prior estimation!"); if(m_instances.numInstances() == 0) throw new Exception("Dataset has no instances!"); for (int h = 0; h < maxLength; h++) { if (m_instances.attribute(h).isNumeric()) throw new Exception("Can't handle numeric attributes!"); } if(m_numIntervals == 0 || m_numRandRules == 0) throw new Exception("Prior initialisation impossible"); //calculate mid points for the intervals midPoints(); //create random rules of length i and measure their support and if support >0 their confidence for(i = 1;i <= maxLength; i++){ m_sum = 0; int j = 0; count = 0; count1 = 0; while(j < m_numRandRules){ count++; jump =false; if(!m_CARs){ itemArray = randomRule(maxLength,i,m_randNum); current = splitItemSet(m_randNum.nextInt(i), itemArray); } else{ itemArray = randomCARule(maxLength,i,m_randNum); current = addCons(itemArray); } int [] ruleItem = new int[maxLength]; for(int k =0; k < itemArray.length;k++){ if(current.m_premise.m_items[k] != -1) ruleItem[k] = current.m_premise.m_items[k]; else if(current.m_consequence.m_items[k] != -1) ruleItem[k] = current.m_consequence.m_items[k]; else ruleItem[k] = -1; } ItemSet rule = new ItemSet(ruleItem); updateCounters(rule); ruleCounter = rule.m_counter; if(ruleCounter > 0) jump =true; updateCounters(current.m_premise); j++; if(jump){ buildDistribution((double)ruleCounter/(double)current.m_premise.m_counter, (double)i); } } //normalize if(m_sum > 0){ for(int w = 0; w < m_midPoints.length;w++){ String key = (String.valueOf(m_midPoints[w])).concat(String.valueOf((double)i)); Double oldValue = (Double)m_distribution.remove(key); if(oldValue == null){ m_distribution.put(key,new Double(1.0/m_numIntervals)); m_sum += 1.0/m_numIntervals; } else m_distribution.put(key,oldValue); } for(int w = 0; w < m_midPoints.length;w++){ double conf =0; String key = (String.valueOf(m_midPoints[w])).concat(String.valueOf((double)i)); Double oldValue = (Double)m_distribution.remove(key); if(oldValue != null){ conf = oldValue.doubleValue() / m_sum; m_distribution.put(key,new Double(conf)); } } } else{ for(int w = 0; w < m_midPoints.length;w++){ String key = (String.valueOf(m_midPoints[w])).concat(String.valueOf((double)i)); m_distribution.put(key,new Double(1.0/m_numIntervals)); } } } } /** * Constructs an item set of certain length randomly. * This method is used for standard association rule mining. * @param maxLength the number of attributes of the instances * @param actualLength the number of attributes that should be present in the item set * @param randNum the random number generator * @return a randomly constructed item set in form of an int array */ public final int[] randomRule(int maxLength, int actualLength, Random randNum){ int[] itemArray = new int[maxLength]; for(int k =0;k < itemArray.length;k++) itemArray[k] = -1; int help =actualLength; if(help == maxLength){ help = 0; for(int h = 0; h < itemArray.length; h++){ itemArray[h] = m_randNum.nextInt((m_instances.attribute(h)).numValues()); } } while(help > 0){ int mark = randNum.nextInt(maxLength); if(itemArray[mark] == -1){ help--; itemArray[mark] = m_randNum.nextInt((m_instances.attribute(mark)).numValues()); } } return itemArray; } /** * Constructs an item set of certain length randomly. * This method is used for class association rule mining. * @param maxLength the number of attributes of the instances * @param actualLength the number of attributes that should be present in the item set * @param randNum the random number generator * @return a randomly constructed item set in form of an int array */ public final int[] randomCARule(int maxLength, int actualLength, Random randNum){ int[] itemArray = new int[maxLength]; for(int k =0;k < itemArray.length;k++) itemArray[k] = -1; if(actualLength == 1) return itemArray; int help =actualLength-1; if(help == maxLength-1){ help = 0; for(int h = 0; h < itemArray.length; h++){ if(h != m_instances.classIndex()){ itemArray[h] = m_randNum.nextInt((m_instances.attribute(h)).numValues()); } } } while(help > 0){ int mark = randNum.nextInt(maxLength); if(itemArray[mark] == -1 && mark != m_instances.classIndex()){ help--; itemArray[mark] = m_randNum.nextInt((m_instances.attribute(mark)).numValues()); } } return itemArray; } /** * updates the distribution of the confidence values. * For every confidence value the interval to which it belongs is searched * and the confidence is added to the confidence already found in this * interval. * @param conf the confidence of the randomly created rule * @param length the legnth of the randomly created rule */ public final void buildDistribution(double conf, double length){ double mPoint = findIntervall(conf); String key = (String.valueOf(mPoint)).concat(String.valueOf(length)); m_sum += conf; Double oldValue = (Double)m_distribution.remove(key); if(oldValue != null) conf = conf + oldValue.doubleValue(); m_distribution.put(key,new Double(conf)); } /** * searches the mid point of the interval a given confidence value falls into * @param conf the confidence of a rule * @return the mid point of the interval the confidence belongs to */ public final double findIntervall(double conf){ if(conf == 1.0) return m_midPoints[m_midPoints.length-1]; int end = m_midPoints.length-1; int start = 0; while (Math.abs(end-start) > 1) { int mid = (start + end) / 2; if (conf > m_midPoints[mid]) start = mid+1; if (conf < m_midPoints[mid]) end = mid-1; if(conf == m_midPoints[mid]) return m_midPoints[mid]; } if(Math.abs(conf-m_midPoints[start]) <= Math.abs(conf-m_midPoints[end])) return m_midPoints[start]; else return m_midPoints[end]; } /** * calculates the numerator and the denominator of the prior equation * @param weighted indicates whether the numerator or the denominator is calculated * @param mPoint the mid Point of an interval * @return the numerator or denominator of the prior equation */ public final double calculatePriorSum(boolean weighted, double mPoint){ double distr, sum =0, max = logbinomialCoefficient(m_instances.numAttributes(),(int)m_instances.numAttributes()/2); for(int i = 1; i <= m_instances.numAttributes(); i++){ if(weighted){ String key = (String.valueOf(mPoint)).concat(String.valueOf((double)i)); Double hashValue = (Double)m_distribution.get(key); if(hashValue !=null) distr = hashValue.doubleValue(); else distr = 0; //distr = 1.0/m_numIntervals; if(distr != 0){ double addend = Utils.log2(distr) - max + Utils.log2((Math.pow(2,i)-1)) + logbinomialCoefficient(m_instances.numAttributes(),i); sum = sum + Math.pow(2,addend); } } else{ double addend = Utils.log2((Math.pow(2,i)-1)) - max + logbinomialCoefficient(m_instances.numAttributes(),i); sum = sum + Math.pow(2,addend); } } return sum; } /** * Method that calculates the base 2 logarithm of a binomial coefficient * @param upperIndex upper Inedx of the binomial coefficient * @param lowerIndex lower index of the binomial coefficient * @return the base 2 logarithm of the binomial coefficient */ public static final double logbinomialCoefficient(int upperIndex, int lowerIndex){ double result =1.0; if(upperIndex == lowerIndex || lowerIndex == 0) return result; result = SpecialFunctions.log2Binomial((double)upperIndex, (double)lowerIndex); return result; } /** * Method to estimate the prior probabilities * @throws Exception throws exception if the prior cannot be calculated * @return a hashtable containing the prior probabilities */ public final Hashtable estimatePrior() throws Exception{ double distr, prior, denominator, mPoint; Hashtable m_priors = new Hashtable(m_numIntervals); denominator = calculatePriorSum(false,1.0); generateDistribution(); for(int i = 0; i < m_numIntervals; i++){ mPoint = m_midPoints[i]; prior = calculatePriorSum(true,mPoint) / denominator; m_priors.put(new Double(mPoint), new Double(prior)); } return m_priors; } /** * split the interval [0,1] into a predefined number of intervals and calculates their mid points */ public final void midPoints(){ m_midPoints = new double[m_numIntervals]; for(int i = 0; i < m_numIntervals; i++) m_midPoints[i] = midPoint(1.0/m_numIntervals, i); } /** * calculates the mid point of an interval * @param size the size of each interval * @param number the number of the interval. * The intervals are numbered from 0 to m_numIntervals. * @return the mid point of the interval */ public double midPoint(double size, int number){ return (size * (double)number) + (size / 2.0); } /** * returns an ordered array of all mid points * @return an ordered array of doubles conatining all midpoints */ public final double[] getMidPoints(){ return m_midPoints; } /** * splits an item set into premise and consequence and constructs therefore * an association rule. The length of the premise is given. The attributes * for premise and consequence are chosen randomly. The result is a RuleItem. * @param premiseLength the length of the premise * @param itemArray a (randomly generated) item set * @return a randomly generated association rule stored in a RuleItem */ public final RuleItem splitItemSet (int premiseLength, int[] itemArray){ int[] cons = new int[m_instances.numAttributes()]; System.arraycopy(itemArray, 0, cons, 0, itemArray.length); int help = premiseLength; while(help > 0){ int mark = m_randNum.nextInt(itemArray.length); if(cons[mark] != -1){ help--; cons[mark] =-1; } } if(premiseLength == 0) for(int i =0; i < itemArray.length;i++) itemArray[i] = -1; else for(int i =0; i < itemArray.length;i++) if(cons[i] != -1) itemArray[i] = -1; ItemSet premise = new ItemSet(itemArray); ItemSet consequence = new ItemSet(cons); RuleItem current = new RuleItem(); current.m_premise = premise; current.m_consequence = consequence; return current; } /** * generates a class association rule out of a given premise. * It randomly chooses a class label as consequence. * @param itemArray the (randomly constructed) premise of the class association rule * @return a class association rule stored in a RuleItem */ public final RuleItem addCons (int[] itemArray){ ItemSet premise = new ItemSet(itemArray); int[] cons = new int[itemArray.length]; for(int i =0;i < itemArray.length;i++) cons[i] = -1; cons[m_instances.classIndex()] = m_randNum.nextInt((m_instances.attribute(m_instances.classIndex())).numValues()); ItemSet consequence = new ItemSet(cons); RuleItem current = new RuleItem(); current.m_premise = premise; current.m_consequence = consequence; return current; } /** * updates the support count of an item set * @param itemSet the item set */ public final void updateCounters(ItemSet itemSet){ for (int i = 0; i < m_instances.numInstances(); i++) itemSet.upDateCounter(m_instances.instance(i)); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.7 $"); } }
18,538
36.680894
142
java
tsml-java
tsml-java-master/src/main/java/weka/associations/RuleGeneration.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RuleGeneration.java * Copyright (C) 2004 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.Utils; import java.io.Serializable; import java.util.Hashtable; import java.util.TreeSet; /** * Class implementing the rule generation procedure of the predictive apriori algorithm. * * Reference: T. Scheffer (2001). <i>Finding Association Rules That Trade Support * Optimally against Confidence</i>. Proc of the 5th European Conf. * on Principles and Practice of Knowledge Discovery in Databases (PKDD'01), * pp. 424-435. Freiburg, Germany: Springer-Verlag. <p> * * The implementation follows the paper expect for adding a rule to the output of the * <i>n</i> best rules. A rule is added if: * the expected predictive accuracy of this rule is among the <i>n</i> best and it is * not subsumed by a rule with at least the same expected predictive accuracy * (out of an unpublished manuscript from T. Scheffer). * * @author Stefan Mutter (mutter@cs.waikato.ac.nz) * @version $Revision: 1.4 $ */ public class RuleGeneration implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -8927041669872491432L; /** The items stored as an array of of integer. */ protected int[] m_items; /** Counter for how many transactions contain this item set. */ protected int m_counter; /** The total number of transactions */ protected int m_totalTransactions; /** Flag indicating whether the list fo the best rules has changed. */ protected boolean m_change = false; /** The minimum expected predictive accuracy that is needed to be a candidate for the list of the best rules. */ protected double m_expectation; /** Threshold. If the support of the premise is higher the binomial distrubution is approximated by a normal one. */ protected static final int MAX_N = 300; /** The minimum support a rule needs to be a candidate for the list of the best rules. */ protected int m_minRuleCount; /** Sorted array of the mied points of the intervals used for prior estimation. */ protected double[] m_midPoints; /** Hashtable conatining the estimated prior probabilities. */ protected Hashtable m_priors; /** The list of the actual <i>n</i> best rules. */ protected TreeSet m_best; /** Integer indicating the generation time of a rule. */ protected int m_count; /** The instances. */ protected Instances m_instances; /** * Constructor * @param itemSet item set for that rules should be generated. * The item set will form the premise of the rules. */ public RuleGeneration(ItemSet itemSet){ m_totalTransactions = itemSet.m_totalTransactions; m_counter = itemSet.m_counter; m_items = itemSet.m_items; } /** * calculates the probability using a binomial distribution. * If the support of the premise is too large this distribution * is approximated by a normal distribution. * @param accuracy the accuracy value * @param ruleCount the support of the whole rule * @param premiseCount the support of the premise * @return the probability value */ public static final double binomialDistribution(double accuracy, double ruleCount, double premiseCount){ double mu, sigma; if(premiseCount < MAX_N) return Math.pow(2,(Utils.log2(Math.pow(accuracy,ruleCount))+Utils.log2(Math.pow((1.0-accuracy),(premiseCount-ruleCount)))+PriorEstimation.logbinomialCoefficient((int)premiseCount,(int)ruleCount))); else{ mu = premiseCount * accuracy; sigma = Math.sqrt((premiseCount * (1.0 - accuracy))*accuracy); return Statistics.normalProbability(((ruleCount+0.5)-mu)/(sigma*Math.sqrt(2))); } } /** * calculates the expected predctive accuracy of a rule * @param ruleCount the support of the rule * @param premiseCount the premise support of the rule * @param midPoints array with all mid points * @param priors hashtable containing the prior probabilities * @return the expected predictive accuracy */ public static final double expectation(double ruleCount, int premiseCount,double[] midPoints, Hashtable priors){ double numerator = 0, denominator = 0; for(int i = 0;i < midPoints.length; i++){ Double actualPrior = (Double)priors.get(new Double(midPoints[i])); if(actualPrior != null){ if(actualPrior.doubleValue() != 0){ double addend = actualPrior.doubleValue() * binomialDistribution(midPoints[i], ruleCount, (double)premiseCount); denominator += addend; numerator += addend*midPoints[i]; } } } if(denominator <= 0 || Double.isNaN(denominator)) System.out.println("RuleItem denominator: "+denominator); if(numerator <= 0 || Double.isNaN(numerator)) System.out.println("RuleItem numerator: "+numerator); return numerator/denominator; } /** * Generates all rules for an item set. The item set is the premise. * @param numRules the number of association rules the use wants to mine. * This number equals the size <i>n</i> of the list of the * best rules. * @param midPoints the mid points of the intervals * @param priors Hashtable that contains the prior probabilities * @param expectation the minimum value of the expected predictive accuracy * that is needed to get into the list of the best rules * @param instances the instances for which association rules are generated * @param best the list of the <i>n</i> best rules. * The list is implemented as a TreeSet * @param genTime the maximum time of generation * @return all the rules with minimum confidence for the given item set */ public TreeSet generateRules(int numRules, double[] midPoints, Hashtable priors, double expectation, Instances instances,TreeSet best,int genTime) { boolean redundant = false; FastVector consequences = new FastVector(), consequencesMinusOne = new FastVector(); ItemSet premise; int s = 0; RuleItem current = null, old; Hashtable hashtable; m_change = false; m_midPoints = midPoints; m_priors = priors; m_best = best; m_expectation = expectation; m_count = genTime; m_instances = instances; //create rule body premise =null; premise = new ItemSet(m_totalTransactions); premise.m_items = new int[m_items.length]; System.arraycopy(m_items, 0, premise.m_items, 0, m_items.length); premise.m_counter = m_counter; do{ m_minRuleCount = 1; while(expectation((double)m_minRuleCount,premise.m_counter,m_midPoints,m_priors) <= m_expectation){ m_minRuleCount++; if(m_minRuleCount > premise.m_counter) return m_best; } redundant = false; for(int i = 0; i < instances.numAttributes();i++){ if(i == 0){ for(int j = 0; j < m_items.length;j++) if(m_items[j] == -1) consequences = singleConsequence(instances, j,consequences); if(premise == null || consequences.size() == 0) return m_best; } FastVector allRuleItems = new FastVector(); int index = 0; do { int h = 0; while(h < consequences.size()){ RuleItem dummie = new RuleItem(); current = dummie.generateRuleItem(premise,(ItemSet)consequences.elementAt(h),instances,m_count,m_minRuleCount,m_midPoints,m_priors); if(current != null){ allRuleItems.addElement(current); h++; } else consequences.removeElementAt(h); } if(index == i) break; consequencesMinusOne = consequences; consequences = ItemSet.mergeAllItemSets(consequencesMinusOne, index, instances.numInstances()); hashtable = ItemSet.getHashtable(consequencesMinusOne, consequencesMinusOne.size()); consequences = ItemSet.pruneItemSets(consequences, hashtable); index++; } while (consequences.size() > 0); for(int h = 0;h < allRuleItems.size();h++){ current = (RuleItem)allRuleItems.elementAt(h); m_count++; if(m_best.size() < numRules){ m_change =true; redundant = removeRedundant(current); } else{ if(current.accuracy() > m_expectation){ m_expectation = ((RuleItem)(m_best.first())).accuracy(); boolean remove = m_best.remove(m_best.first()); m_change = true; redundant = removeRedundant(current); m_expectation = ((RuleItem)(m_best.first())).accuracy(); while(expectation((double)m_minRuleCount, (current.premise()).m_counter,m_midPoints,m_priors) < m_expectation){ m_minRuleCount++; if(m_minRuleCount > (current.premise()).m_counter) break; } } } } } }while(redundant); return m_best; } /** * Methods that decides whether or not rule a subsumes rule b. * The defintion of subsumption is: * Rule a subsumes rule b, if a subsumes b * AND * a has got least the same expected predictive accuracy as b. * @param a an association rule stored as a RuleItem * @param b an association rule stored as a RuleItem * @return true if rule a subsumes rule b or false otherwise. */ public static boolean aSubsumesB(RuleItem a, RuleItem b){ if(a.m_accuracy < b.m_accuracy) return false; for(int k = 0; k < a.premise().m_items.length;k++){ if(a.premise().m_items[k] != b.premise().m_items[k]){ if((a.premise().m_items[k] != -1 && b.premise().m_items[k] != -1) || b.premise().m_items[k] == -1) return false; } if(a.consequence().m_items[k] != b.consequence().m_items[k]){ if((a.consequence().m_items[k] != -1 && b.consequence().m_items[k] != -1) || a.consequence().m_items[k] == -1) return false; } } return true; } /** * generates a consequence of length 1 for an association rule. * @param instances the instances under consideration * @param attNum an item that does not occur in the premise * @param consequences FastVector that possibly already contains other consequences of length 1 * @return FastVector with consequences of length 1 */ public static FastVector singleConsequence(Instances instances, int attNum, FastVector consequences){ ItemSet consequence; for (int i = 0; i < instances.numAttributes(); i++) { if( i == attNum){ for (int j = 0; j < instances.attribute(i).numValues(); j++) { consequence = new ItemSet(instances.numInstances()); consequence.m_items = new int[instances.numAttributes()]; for (int k = 0; k < instances.numAttributes(); k++) consequence.m_items[k] = -1; consequence.m_items[i] = j; consequences.addElement(consequence); } } } return consequences; } /** * Method that removes redundant rules out of the list of the best rules. * A rule is in that list if: * the expected predictive accuracy of this rule is among the best and it is * not subsumed by a rule with at least the same expected predictive accuracy * @param toInsert the rule that should be inserted into the list * @return true if the method has changed the list, false otherwise */ public boolean removeRedundant(RuleItem toInsert){ boolean redundant = false, fSubsumesT = false, tSubsumesF = false; RuleItem first; int subsumes = 0; Object [] best = m_best.toArray(); for(int i=0; i < best.length; i++){ first = (RuleItem)best[i]; fSubsumesT = aSubsumesB(first,toInsert); tSubsumesF = aSubsumesB(toInsert, first); if(fSubsumesT){ subsumes = 1; break; } else{ if(tSubsumesF){ boolean remove = m_best.remove(first); subsumes = 2; redundant =true; } } } if(subsumes == 0 || subsumes == 2) m_best.add(toInsert); return redundant; } /** * Gets the actual maximum value of the generation time * @return the actual maximum value of the generation time */ public int count(){ return m_count; } /** * Gets if the list fo the best rules has been changed * @return whether or not the list fo the best rules has been changed */ public boolean change(){ return m_change; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
13,104
33.306283
203
java
tsml-java
tsml-java-master/src/main/java/weka/associations/RuleItem.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RuleItem.java * Copyright (C) 2004 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.io.Serializable; import java.util.Hashtable; /** * Class for storing an (class) association rule. * The premise and the consequence are stored each as separate item sets. * For every rule their expected predictive accuracy and the time of generation is stored. * These two measures allow to introduce a sort order for rules. * * @author Stefan Mutter * @version $Revision: 1.5 $ */ public class RuleItem implements Comparable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -3761299128347476534L; /** The premise of a rule. */ protected ItemSet m_premise; /** The consequence of a rule. */ protected ItemSet m_consequence; /** The expected predictive accuracy of a rule. */ protected double m_accuracy; /** The generation time of a rule. */ protected int m_genTime; /** * Constructor for an empty RuleItem */ public RuleItem(){ } /** * Constructor that generates a RuleItem out of a given one * @param toCopy RuleItem to copy */ public RuleItem(RuleItem toCopy){ m_premise = toCopy.m_premise; m_consequence = toCopy.m_consequence; m_accuracy = toCopy.m_accuracy; m_genTime = toCopy.m_genTime; } /** * Constructor * @param premise the premise of the future RuleItem * @param consequence the consequence of the future RuleItem * @param genTime the time of generation of the future RuleItem * @param ruleSupport support of the rule * @param m_midPoints the mid poitns of the intervals * @param m_priors Hashtable containing the estimated prior probablilities */ public RuleItem(ItemSet premise, ItemSet consequence, int genTime,int ruleSupport,double [] m_midPoints, Hashtable m_priors){ m_premise = premise; m_consequence = consequence; m_accuracy = RuleGeneration.expectation((double)ruleSupport,m_premise.m_counter,m_midPoints,m_priors); //overflow, underflow if(Double.isNaN(m_accuracy) || m_accuracy < 0){ m_accuracy = Double.MIN_VALUE; } m_consequence.m_counter = ruleSupport; m_genTime = genTime; } /** * Constructs a new RuleItem if the support of the given rule is above the support threshold. * @param premise the premise * @param consequence the consequence * @param instances the instances * @param genTime the time of generation of the current premise and consequence * @param minRuleCount the support threshold * @param m_midPoints the mid points of the intervals * @param m_priors the estimated priori probabilities (in a hashtable) * @return a RuleItem if its support is above the threshold, null otherwise */ public RuleItem generateRuleItem(ItemSet premise, ItemSet consequence, Instances instances,int genTime, int minRuleCount,double[] m_midPoints, Hashtable m_priors){ ItemSet rule = new ItemSet(instances.numInstances()); rule.m_items = new int[(consequence.m_items).length]; System.arraycopy(premise.m_items, 0, rule.m_items, 0, (premise.m_items).length); for(int k = 0;k < consequence.m_items.length; k++){ if(consequence.m_items[k] != -1) rule.m_items[k] = consequence.m_items[k]; } for (int i = 0; i < instances.numInstances(); i++) rule.upDateCounter(instances.instance(i)); int ruleSupport = rule.support(); if(ruleSupport > minRuleCount){ RuleItem newRule = new RuleItem(premise,consequence,genTime,ruleSupport,m_midPoints,m_priors); return newRule; } return null; } //Note: this class has a natural ordering that is inconsistent with equals /** * compares two RuleItems and allows an ordering concerning * expected predictive accuracy and time of generation * Note: this class has a natural ordering that is inconsistent with equals * @param o RuleItem to compare * @return integer indicating the sort oder of the two RuleItems */ public int compareTo(Object o) { if(this.m_accuracy == ((RuleItem)o).m_accuracy){ if((this.m_genTime == ((RuleItem)o).m_genTime)) return 0; if(this.m_genTime > ((RuleItem)o).m_genTime) return -1; if(this.m_genTime < ((RuleItem)o).m_genTime) return 1; } if(this.m_accuracy < ((RuleItem)o).m_accuracy) return -1; return 1; } /** * returns whether two RuleItems are equal * @param o RuleItem to compare * @return true if the rules are equal, false otherwise */ public boolean equals(Object o){ if(o == null) return false; if(m_premise.equals(((RuleItem)o).m_premise) && m_consequence.equals(((RuleItem)o).m_consequence)) return true; return false; } /** * Gets the expected predictive accuracy of a rule * @return the expected predictive accuracy of a rule stored as a RuleItem */ public double accuracy(){ return m_accuracy; } /** * Gets the premise of a rule * @return the premise of a rule stored as a RuleItem */ public ItemSet premise(){ return m_premise; } /** * Gets the consequence of a rule * @return the consequence of a rule stored as a RuleItem */ public ItemSet consequence(){ return m_consequence; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.5 $"); } }
6,358
30.171569
165
java
tsml-java
tsml-java-master/src/main/java/weka/associations/SingleAssociatorEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SingleAssociatorEnhancer.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.associations; import weka.core.Capabilities; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import java.util.Enumeration; import java.util.Vector; /** * Abstract utility class for handling settings common to meta * associators that use a single base associator. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public abstract class SingleAssociatorEnhancer extends AbstractAssociator implements OptionHandler { /** for serialization */ private static final long serialVersionUID = -3665885256363525164L; /** The base associator to use */ protected Associator m_Associator = new Apriori(); /** * String describing default Associator. * * @return default classname */ protected String defaultAssociatorString() { return Apriori.class.getName(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tFull name of base associator.\n" + "\t(default: " + defaultAssociatorString() +")", "W", 1, "-W")); if (m_Associator instanceof OptionHandler) { result.addElement(new Option( "", "", 0, "\nOptions specific to associator " + m_Associator.getClass().getName() + ":")); Enumeration enm = ((OptionHandler) m_Associator).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); } return result.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base associator.<p> * * Options after -- are passed to the designated associator.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('W', options); if (tmpStr.length() > 0) { // This is just to set the associator in case the option // parsing fails. setAssociator(AbstractAssociator.forName(tmpStr, null)); setAssociator(AbstractAssociator.forName(tmpStr, Utils.partitionOptions(options))); } else { // This is just to set the associator in case the option // parsing fails. setAssociator(AbstractAssociator.forName(defaultAssociatorString(), null)); setAssociator(AbstractAssociator.forName(defaultAssociatorString(), Utils.partitionOptions(options))); } } /** * Gets the current settings of the associator. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector<String> result; String[] options; result = new Vector<String>(); result.add("-W"); result.add(getAssociator().getClass().getName()); if (getAssociator() instanceof OptionHandler) { options = ((OptionHandler) getAssociator()).getOptions(); result.add("--"); for (i = 0; i < options.length; i++) result.add(options[i]); } return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String associatorTipText() { return "The base associator to be used."; } /** * Set the base associator. * * @param value the associator to use. */ public void setAssociator(Associator value) { m_Associator = value; } /** * Get the associator used as the base associator. * * @return the currently used associator */ public Associator getAssociator() { return m_Associator; } /** * Gets the associator specification string, which contains the class name of * the associator and any options to the associator * * @return the associator string */ protected String getAssociatorSpec() { Associator c = getAssociator(); return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } /** * Returns default capabilities of the base associator. * * @return the capabilities of the base associator */ public Capabilities getCapabilities() { Capabilities result; if (getAssociator() != null) result = getAssociator().getCapabilities(); else result = new Capabilities(this); // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); result.setOwner(this); return result; } }
5,703
27.098522
108
java
tsml-java
tsml-java-master/src/main/java/weka/associations/gsp/Element.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Element.java * Copyright (C) 2007 Sebastian Beer * */ package weka.associations.gsp; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.io.Serializable; /** * Class representing an Element, i.e., a set of events/items. * * @author Sebastian Beer * @version $Revision: 1.2 $ */ public class Element implements Cloneable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -7900701276019516371L; /** events/items stored as an array of ints */ protected int[] m_Events; /** * Constructor */ public Element() { } /** * Constructor accepting an initial size of the events Array as parameter. * * @param size the size */ public Element(int size) { m_Events = new int[size]; } /** * Returns all events of the given data set as Elements containing a single * event. The order of events is determined by the header information of * the corresponding ARFF file. * * @param instances the data set * @return the set of 1-Elements */ public static FastVector getOneElements (Instances instances) { FastVector setOfOneElements = new FastVector(); Element curElement; for (int i = 0; i < instances.numAttributes(); i++) { for (int j = 0; j < instances.attribute(i).numValues(); j++) { curElement = new Element(); curElement.setEvents(new int [instances.numAttributes()]); for (int k = 0; k < instances.numAttributes(); k++) { curElement.getEvents()[k] = -1; } curElement.getEvents()[i] = j; setOfOneElements.addElement(curElement); } } return setOfOneElements; } /** * Merges two Elements into one. * * @param element1 first Element * @param element2 second Element * @return the merged Element */ public static Element merge(Element element1, Element element2) { int[] element1Events = element1.getEvents(); int[] element2Events = element2.getEvents(); Element resultElement = new Element(element1Events.length); int[] resultEvents = resultElement.getEvents(); for (int i = 0; i < element1Events.length; i++) { if (element2Events[i] > -1) { resultEvents[i] = element2Events[i]; } else { resultEvents[i] = element1Events[i]; } } resultElement.setEvents(resultEvents); return resultElement; } /** * Returns a deep clone of an Element. * * @return the cloned Element */ public Element clone() { try { Element clone = (Element) super.clone(); int[] cloneEvents = new int[m_Events.length]; for (int i = 0; i < m_Events.length; i++) { cloneEvents[i] = m_Events[i]; } clone.setEvents(cloneEvents); return clone; } catch (CloneNotSupportedException exc) { exc.printStackTrace(); } return null; } /** * Checks if an Element contains over one event. * * @return true, if the Element contains over one event, else false */ public boolean containsOverOneEvent() { int numEvents = 0; for (int i = 0; i < m_Events.length; i++) { if (m_Events[i] > -1) { numEvents++; } if (numEvents == 2) { return true; } } return false; } /** * Deletes the first or last event of an Element. * * @param position the position of the event to be deleted (first or last) */ public void deleteEvent(String position) { if (position.equals("first")) { //delete first event for (int i = 0; i < m_Events.length; i++) { if (m_Events[i] > -1) { m_Events[i] = -1; break; } } } if (position.equals("last")) { //delete last event for (int i = m_Events.length-1; i >= 0; i--) { if (m_Events[i] > -1) { m_Events[i] = -1; break; } } } } /** * Checks if two Elements are equal. * * @return true, if the two Elements are equal, else false */ public boolean equals(Object obj) { Element element2 = (Element) obj; for (int i=0; i < m_Events.length; i++) { if (!(m_Events[i] == element2.getEvents()[i])) { return false; } } return true; } /** * Returns the events Array of an Element. * * @return the events Array */ public int[] getEvents() { return m_Events; } /** * Checks if an Element is contained by a given Instance. * * @param instance the given Instance * @return true, if the Instance contains the Element, else false */ public boolean isContainedBy(Instance instance) { for (int i=0; i < instance.numAttributes(); i++) { if (m_Events[i] > -1) { if (instance.isMissing(i)) { return false; } if (m_Events[i] != (int) instance.value(i)) { return false; } } } return true; } /** * Checks if the Element contains any events. * * @return true, if the Element contains no event, else false */ public boolean isEmpty() { for (int i=0; i < m_Events.length; i++) { if (m_Events[i] > -1) { return false; } } return true; } /** * Sets the events Array of an Element. * * @param events the events Array to set */ protected void setEvents(int[] events) { m_Events = events; } /** * Returns a String representation of an Element where the numeric value * of each event/item is represented by its respective nominal value. * * @param dataSet the corresponding data set containing the header information * @return the String representation */ public String toNominalString(Instances dataSet) { StringBuffer result = new StringBuffer(); int addedValues = 0; result.append("{"); for (int i=0; i < m_Events.length; i++) { if (m_Events[i] > -1) { result.append(dataSet.attribute(i).value(m_Events[i]) + ","); addedValues++; } } result.deleteCharAt(result.length()-1); result.append("}"); return result.toString(); } /** * Returns a String representation of an Element. * * @return the String representation */ public String toString() { String result = ""; result += "{"; for (int i=0; i < m_Events.length; i++) { result += m_Events[i]; if (i+1 < m_Events.length) { result += ","; } } result += "}"; return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.2 $"); } }
7,345
23.244224
81
java
tsml-java
tsml-java-master/src/main/java/weka/associations/gsp/Sequence.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Sequence.java * Copyright (C) 2007 Sebastian Beer * */ package weka.associations.gsp; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.io.Serializable; import java.util.Enumeration; /** * Class representing a sequence of elements/itemsets. * * @author Sebastian Beer * @version $Revision: 1.2 $ */ public class Sequence implements Cloneable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -5001018056339156390L; /** the support count of the Sequence */ protected int m_SupportCount; /** ordered list of the comprised elements/itemsets */ protected FastVector m_Elements; /** * Constructor. */ public Sequence() { m_SupportCount = 0; m_Elements = new FastVector(); } /** * Constructor accepting a set of elements as parameter. * * @param elements the Elements of the Sequence */ public Sequence(FastVector elements) { m_SupportCount = 0; m_Elements = elements; } /** * Constructor accepting an int value as parameter to set the support count. * * @param supportCount the support count to set */ public Sequence(int supportCount) { m_SupportCount = supportCount; m_Elements = new FastVector(); } /** * Generates all possible candidate k-Sequences and prunes the ones that * contain an infrequent (k-1)-Sequence. * * @param kMinusOneSequences the set of (k-1)-Sequences, used for verification * @return the generated set of k-candidates * @throws CloneNotSupportedException */ public static FastVector aprioriGen(FastVector kMinusOneSequences) throws CloneNotSupportedException { FastVector allCandidates = generateKCandidates(kMinusOneSequences); FastVector prunedCandidates = pruneCadidates(allCandidates, kMinusOneSequences); return prunedCandidates; } /** * Deletes Sequences of a given set which don't meet the minimum support * count threshold. * * @param sequences the set Sequences to be checked * @param minSupportCount the minimum support count * @return the set of Sequences after deleting */ public static FastVector deleteInfrequentSequences(FastVector sequences, long minSupportCount) { FastVector deletedSequences = new FastVector(); Enumeration seqEnum = sequences.elements(); while (seqEnum.hasMoreElements()) { Sequence currentSeq = (Sequence) seqEnum.nextElement(); long curSupportCount = currentSeq.getSupportCount(); if (curSupportCount >= minSupportCount) { deletedSequences.addElement(currentSeq); } } return deletedSequences; } /** * Generates candidate k-Sequences on the basis of a given (k-1)-Sequence set. * * @param kMinusOneSequences the set of (k-1)-Sequences * @return the set of candidate k-Sequences * @throws CloneNotSupportedException */ protected static FastVector generateKCandidates(FastVector kMinusOneSequences) throws CloneNotSupportedException { FastVector candidates = new FastVector(); FastVector mergeResult = new FastVector(); for (int i = 0; i < kMinusOneSequences.size(); i++) { for (int j = 0; j < kMinusOneSequences.size(); j++) { Sequence originalSeq1 = (Sequence) kMinusOneSequences.elementAt(i); Sequence seq1 = originalSeq1.clone(); Sequence originalSeq2 = (Sequence) kMinusOneSequences.elementAt(j); Sequence seq2 = originalSeq2.clone(); Sequence subseq1 = seq1.deleteEvent("first"); Sequence subseq2 = seq2.deleteEvent("last"); if (subseq1.equals(subseq2)) { //seq1 and seq2 are 1-sequences if ((subseq1.getElements().size() == 0) && (subseq2.getElements().size() == 0)) { if (i >= j) { mergeResult = merge(seq1, seq2, true, true); } else { mergeResult = merge(seq1, seq2, true, false); } //seq1 and seq2 are k-sequences } else { mergeResult = merge(seq1, seq2, false, false); } candidates.appendElements(mergeResult); } } } return candidates; } /** * Merges two Sequences in the course of candidate generation. Differentiates * between merging 1-Sequences and k-Sequences, k > 1. * * @param seq1 Sequence at first position * @param seq2 Sequence at second position * @param oneElements true, if 1-Elements should be merged, else false * @param mergeElements true, if two 1-Elements were not already merged * (regardless of their position), else false * @return set of resulting Sequences */ protected static FastVector merge(Sequence seq1, Sequence seq2, boolean oneElements, boolean mergeElements) { FastVector mergeResult = new FastVector(); //merge 1-sequences if (oneElements) { Element element1 = (Element) seq1.getElements().firstElement(); Element element2 = (Element) seq2.getElements().firstElement(); Element element3 = null; if (mergeElements) { for (int i = 0; i < element1.getEvents().length; i++) { if (element1.getEvents()[i] > -1) { if (element2.getEvents()[i] > -1) { break; } else { element3 = Element.merge(element1, element2); } } } } FastVector newElements1 = new FastVector(); //generate <{x}{y}> newElements1.addElement(element1); newElements1.addElement(element2); mergeResult.addElement(new Sequence(newElements1)); //generate <{x,y}> if (element3 != null) { FastVector newElements2 = new FastVector(); newElements2.addElement(element3); mergeResult.addElement(new Sequence(newElements2)); } return mergeResult; //merge k-sequences, k > 1 } else { Element lastElementSeq1 = (Element) seq1.getElements().lastElement(); Element lastElementSeq2 = (Element) seq2.getElements().lastElement(); Sequence resultSeq = new Sequence(); FastVector resultSeqElements = resultSeq.getElements(); //if last two events/items belong to the same element/itemset if (lastElementSeq2.containsOverOneEvent()) { for (int i = 0; i < (seq1.getElements().size()-1); i++) { resultSeqElements.addElement(seq1.getElements().elementAt(i)); } resultSeqElements.addElement(Element.merge(lastElementSeq1, lastElementSeq2)); mergeResult.addElement(resultSeq); return mergeResult; //if last two events/items belong to different elements/itemsets } else { for (int i = 0; i < (seq1.getElements().size()); i++) { resultSeqElements.addElement(seq1.getElements().elementAt(i)); } resultSeqElements.addElement(lastElementSeq2); mergeResult.addElement(resultSeq); return mergeResult; } } } /** * Converts a set of 1-Elements into a set of 1-Sequences. * * @param elements the set of 1-Elements * @return the set of 1-Sequences */ public static FastVector oneElementsToSequences(FastVector elements) { FastVector sequences = new FastVector(); Enumeration elementEnum = elements.elements(); while (elementEnum.hasMoreElements()) { Sequence seq = new Sequence(); FastVector seqElements = seq.getElements(); seqElements.addElement(elementEnum.nextElement()); sequences.addElement(seq); } return sequences; } /** * Prints a set of Sequences as String output. * * @param setOfSequences the set of sequences */ public static void printSetOfSequences(FastVector setOfSequences) { Enumeration seqEnum = setOfSequences.elements(); int i = 1; while(seqEnum.hasMoreElements()) { Sequence seq = (Sequence) seqEnum.nextElement(); System.out.print("[" + i++ + "]" + " " + seq.toString()); } } /** * Prunes a k-Sequence of a given candidate set if one of its (k-1)-Sequences * is infrequent. * * @param allCandidates the set of all potential k-Sequences * @param kMinusOneSequences the set of (k-1)-Sequences for verification * @return the set of the pruned candidates */ protected static FastVector pruneCadidates(FastVector allCandidates, FastVector kMinusOneSequences) { FastVector prunedCandidates = new FastVector(); boolean isFrequent; //for each candidate for (int i = 0; i < allCandidates.size(); i++) { Sequence candidate = (Sequence) allCandidates.elementAt(i); isFrequent = true; FastVector canElements = candidate.getElements(); //generate each possible (k-1)-sequence and verify if it's frequent for (int j = 0; j < canElements.size(); j++) { if(isFrequent) { Element origElement = (Element) canElements.elementAt(j); int[] origEvents = origElement.getEvents(); for (int k = 0; k < origEvents.length; k++) { if (origEvents[k] > -1) { int helpEvent = origEvents[k]; origEvents[k] = -1; if (origElement.isEmpty()) { canElements.removeElementAt(j); //check if the (k-1)-sequence is contained in the set of kMinusOneSequences int containedAt = kMinusOneSequences.indexOf(candidate); if (containedAt != -1) { origEvents[k] = helpEvent; canElements.insertElementAt(origElement, j); break; } else { isFrequent = false; break; } } else { //check if the (k-1)-sequence is contained in the set of kMinusOneSequences int containedAt = kMinusOneSequences.indexOf(candidate); if (containedAt != -1) { origEvents[k] = helpEvent; continue; } else { isFrequent = false; break; } } } } } else { break; } } if (isFrequent) { prunedCandidates.addElement(candidate); } } return prunedCandidates; } /** * Returns a String representation of a set of Sequences where the numeric * value of each event/item is represented by its respective nominal value. * * @param setOfSequences the set of Sequences * @param dataSet the corresponding data set containing the header * information * @param filterAttributes the attributes to filter out * @return the String representation */ public static String setOfSequencesToString(FastVector setOfSequences, Instances dataSet, FastVector filterAttributes) { StringBuffer resString = new StringBuffer(); Enumeration SequencesEnum = setOfSequences.elements(); int i = 1; boolean printSeq; while(SequencesEnum.hasMoreElements()) { Sequence seq = (Sequence) SequencesEnum.nextElement(); Integer filterAttr = (Integer) filterAttributes.elementAt(0); printSeq = true; if (filterAttr.intValue() != -1) { for (int j=0; j < filterAttributes.size(); j++) { filterAttr = (Integer) filterAttributes.elementAt(j); FastVector seqElements = seq.getElements(); if (printSeq) { for (int k=0; k < seqElements.size(); k++) { Element currentElement = (Element) seqElements.elementAt(k); int[] currentEvents = currentElement.getEvents(); if (currentEvents[filterAttr.intValue()] != -1) { continue; } else { printSeq = false; break; } } } } } if (printSeq) { resString.append("[" + i++ + "]" + " " + seq.toNominalString(dataSet)); } } return resString.toString(); } /** * Updates the support count of a set of Sequence candidates according to a * given set of data sequences. * * @param candidates the set of candidates * @param dataSequences the set of data sequences */ public static void updateSupportCount(FastVector candidates, FastVector dataSequences) { Enumeration canEnumeration = candidates.elements(); while(canEnumeration.hasMoreElements()){ Enumeration dataSeqEnumeration = dataSequences.elements(); Sequence candidate = (Sequence) canEnumeration.nextElement(); while(dataSeqEnumeration.hasMoreElements()) { Instances dataSequence = (Instances) dataSeqEnumeration.nextElement(); if (candidate.isSubsequenceOf(dataSequence)) { candidate.setSupportCount(candidate.getSupportCount() + 1); } } } } /** * Returns a deep clone of a Sequence. * * @return the cloned Sequence */ public Sequence clone() { try { Sequence clone = (Sequence) super.clone(); clone.setSupportCount(m_SupportCount); FastVector cloneElements = new FastVector(m_Elements.size()); for (int i = 0; i < m_Elements.size(); i++) { Element helpElement = (Element) m_Elements.elementAt(i); cloneElements.addElement(helpElement.clone()); } clone.setElements(cloneElements); return clone; } catch (CloneNotSupportedException exc) { exc.printStackTrace(); } return null; } /** * Deletes either the first or the last event/item of a Sequence. If the * deleted event/item is the only value in the Element, it is removed, as well. * * @param position the position of the event/item (first or last) * @return the Sequence with either the first or the last * event/item deleted */ protected Sequence deleteEvent(String position) { Sequence cloneSeq = clone(); if (position.equals("first")) { Element element = (Element) cloneSeq.getElements().firstElement(); element.deleteEvent("first"); if (element.isEmpty()) { cloneSeq.getElements().removeElementAt(0); } return cloneSeq; } if (position.equals("last")) { Element element = (Element) cloneSeq.getElements().lastElement(); element.deleteEvent("last"); if (element.isEmpty()) { cloneSeq.getElements().removeElementAt(m_Elements.size()-1); } return cloneSeq; } return null; } /** * Checks if two Sequences are equal. * * @return true, if the two Sequences are equal, else false */ public boolean equals(Object obj) { Sequence seq2 = (Sequence) obj; FastVector seq2Elements = seq2.getElements(); for (int i = 0; i < m_Elements.size(); i++) { Element thisElement = (Element) m_Elements.elementAt(i); Element seq2Element = (Element) seq2Elements.elementAt(i); if (!thisElement.equals(seq2Element)) { return false; } } return true; } /** * Returns the Elements of the Sequence. * * @return the Elements */ protected FastVector getElements() { return m_Elements; } /** * Returns the support count of the Sequence. * * @return the support count */ protected int getSupportCount() { return m_SupportCount; } /** * Checks if the Sequence is subsequence of a given data sequence. * * @param dataSequence the data sequence to verify against * @return true, if the Sequnce is subsequence of the data * sequence, else false */ protected boolean isSubsequenceOf(Instances dataSequence) { FastVector elements = getElements(); Enumeration elementEnum = elements.elements(); Element curElement = (Element) elementEnum.nextElement(); for (int i = 0; i < dataSequence.numInstances(); i++) { if (curElement.isContainedBy(dataSequence.instance(i))) { if (!elementEnum.hasMoreElements()) { return true; } else { curElement = (Element) elementEnum.nextElement(); continue; } } } return false; } /** * Sets the Elements of the Sequence. * * @param elements the Elements to set */ protected void setElements(FastVector elements) { m_Elements = elements; } /** * Sets the support count of the Sequence. * * @param supportCount the support count to set */ protected void setSupportCount(int supportCount) { m_SupportCount = supportCount; } /** * Returns a String representation of a Sequences where the numeric value * of each event/item is represented by its respective nominal value. * * @param dataSet the corresponding data set containing the header * information * @return the String representation */ public String toNominalString(Instances dataSet) { String result = ""; result += "<"; for (int i = 0; i < m_Elements.size(); i++) { Element element = (Element) m_Elements.elementAt(i); result += element.toNominalString(dataSet); } result += "> (" + getSupportCount() + ")\n"; return result; } /** * Returns a String representation of a Sequence. * * @return the String representation */ public String toString() { String result = ""; result += "Sequence Output\n"; result += "------------------------------\n"; result += "Support Count: " + getSupportCount() + "\n"; result += "contained elements/itemsets:\n"; for (int i = 0; i < m_Elements.size(); i++) { Element element = (Element) m_Elements.elementAt(i); result += element.toString(); } result += "\n\n"; return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.2 $"); } }
17,747
29.338462
122
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ASEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ASEvaluation.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.io.Serializable; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; /** * Abstract attribute selection evaluation class * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class ASEvaluation implements Serializable, CapabilitiesHandler, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 2091705669885950849L; // =============== // Public methods. // =============== /** * Generates a attribute evaluator. Has to initialize all fields of the * evaluator that are not being set via options. * * @param data set of instances serving as training data * @exception Exception if the evaluator has not been * generated successfully */ public abstract void buildEvaluator(Instances data) throws Exception; /** * Provides a chance for a attribute evaluator to do any special * post processing of the selected attribute set. * * @param attributeSet the set of attributes found by the search * @return a possibly ranked list of postprocessed attributes * @exception Exception if postprocessing fails for some reason */ public int [] postProcess(int [] attributeSet) throws Exception { return attributeSet; } /** * Creates a new instance of an attribute/subset evaluator * given it's class name and * (optional) arguments to pass to it's setOptions method. If the * evaluator implements OptionHandler and the options parameter is * non-null, the evaluator will have it's options set. * * @param evaluatorName the fully qualified class name of the evaluator * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created evaluator, ready for use. * @exception Exception if the evaluator name is invalid, or the options * supplied are not acceptable to the evaluator */ public static ASEvaluation forName(String evaluatorName, String [] options) throws Exception { return (ASEvaluation)Utils.forName(ASEvaluation.class, evaluatorName, options); } /** * Creates copies of the current evaluator. Note that this method * now uses Serialization to perform a deep copy, so the evaluator * object must be fully Serializable. Any currently built model will * now be copied as well. * * @param model an example evaluator to copy * @param num the number of evaluator copies to create. * @return an array of evaluators. * @exception Exception if an error occurs */ public static ASEvaluation [] makeCopies(ASEvaluation model, int num) throws Exception { if (model == null) { throw new Exception("No model evaluator set"); } ASEvaluation [] evaluators = new ASEvaluation [num]; SerializedObject so = new SerializedObject(model); for(int i = 0; i < evaluators.length; i++) { evaluators[i] = (ASEvaluation) so.getObject(); } return evaluators; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.enableAll(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * runs the evaluator with the given commandline options * * @param evaluator the evaluator to run * @param options the commandline options */ public static void runEvaluator(ASEvaluation evaluator, String[] options) { try { System.out.println( AttributeSelection.SelectAttributes(evaluator, options)); } catch (Exception e) { String msg = e.toString().toLowerCase(); if ( (msg.indexOf("help requested") == -1) && (msg.indexOf("no training file given") == -1) ) e.printStackTrace(); System.err.println(e.getMessage()); } } }
5,110
30.549383
79
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ASSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ASSearch.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.io.Serializable; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; /** * Abstract attribute selection search class. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class ASSearch implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 7591673350342236548L; // =============== // Public methods. // =============== /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Searches the attribute subset/ranking space. * * @param ASEvaluator the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public abstract int [] search(ASEvaluation ASEvaluator, Instances data) throws Exception; /** * Creates a new instance of a search class given it's class name and * (optional) arguments to pass to it's setOptions method. If the * search method implements OptionHandler and the options parameter is * non-null, the search method will have it's options set. * * @param searchName the fully qualified class name of the search class * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created search object, ready for use. * @throws Exception if the search class name is invalid, or the options * supplied are not acceptable to the search class. */ public static ASSearch forName(String searchName, String [] options) throws Exception { return (ASSearch)Utils.forName(ASSearch.class, searchName, options); } /** * Creates copies of the current search scheme. Note that this method * now uses Serialization to perform a deep copy, so the search * object must be fully Serializable. Any currently built model will * now be copied as well. * * @param model an example search scheme to copy * @param num the number of search scheme copies to create. * @return an array of search schemes. * @throws Exception if an error occurs */ public static ASSearch[] makeCopies(ASSearch model, int num) throws Exception { if (model == null) throw new Exception("No model search scheme set"); ASSearch[] result = new ASSearch[num]; SerializedObject so = new SerializedObject(model); for (int i = 0; i < result.length; i++) result[i] = (ASSearch) so.getObject(); return result; } }
3,624
31.366071
81
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/AttributeEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AttributeEvaluator.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; /** * Interface for classes that evaluate attributes individually. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface AttributeEvaluator { /** * evaluates an individual attribute * * @param attribute the index of the attribute to be evaluated * @return the "merit" of the attribute * @exception Exception if the attribute could not be evaluated */ public abstract double evaluateAttribute(int attribute) throws Exception; }
1,345
28.26087
77
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/AttributeSelection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AttributeSelection.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.beans.BeanInfo; import java.beans.IntrospectionException; import java.beans.Introspector; import java.beans.MethodDescriptor; import java.beans.PropertyDescriptor; import java.io.Serializable; import java.lang.reflect.Method; import java.util.Enumeration; import java.util.Random; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.converters.ConverterUtils.DataSource; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** * Attribute selection class. Takes the name of a search class and * an evaluation class on the command line. <p/> * * Valid options are: <p/> * * -h <br/> * Display help. <p/> * * -i &lt;name of input file&gt; <br/> * Specify the training data file. <p/> * * -c &lt;class index&gt; <br/> * The index of the attribute to use as the class. <p/> * * -s &lt;search method&gt; <br/> * The full class name of the search method followed by search method options * (if any).<br/> * Eg. -s "weka.attributeSelection.BestFirst -N 10" <p/> * * -x &lt;number of folds&gt; <br/> * Perform a cross validation. <p/> * * -n &lt;random number seed&gt; <br/> * Specify a random number seed. Use in conjuction with -x. (Default = 1). <p/> * * ------------------------------------------------------------------------ <p/> * * Example usage as the main of an attribute evaluator (called FunkyEvaluator): * <pre> * public static void main(String [] args) { * runEvaluator(new FunkyEvaluator(), args); * } * </pre> * <p/> * * ------------------------------------------------------------------------ <p/> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class AttributeSelection implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 4170171824147584330L; /** the instances to select attributes from */ private Instances m_trainInstances; /** the attribute/subset evaluator */ private ASEvaluation m_ASEvaluator; /** the search method */ private ASSearch m_searchMethod; /** the number of folds to use for cross validation */ private int m_numFolds; /** holds a string describing the results of the attribute selection */ private StringBuffer m_selectionResults; /** rank features (if allowed by the search method) */ private boolean m_doRank; /** do cross validation */ private boolean m_doXval; /** seed used to randomly shuffle instances for cross validation */ private int m_seed; /** number of attributes requested from ranked results */ private int m_numToSelect; /** the selected attributes */ private int [] m_selectedAttributeSet; /** the attribute indexes and associated merits if a ranking is produced */ private double [][] m_attributeRanking; /** if a feature selection run involves an attribute transformer */ private AttributeTransformer m_transformer = null; /** the attribute filter for processing instances with respect to the most recent feature selection run */ private Remove m_attributeFilter = null; /** hold statistics for repeated feature selection, such as under cross validation */ private double [][] m_rankResults = null; private double [] m_subsetResults = null; private int m_trials = 0; /** * Return the number of attributes selected from the most recent * run of attribute selection * @return the number of attributes selected */ public int numberAttributesSelected() throws Exception { int [] att = selectedAttributes(); return att.length-1; } /** * get the final selected set of attributes. * @return an array of attribute indexes * @exception Exception if attribute selection has not been performed yet */ public int [] selectedAttributes () throws Exception { if (m_selectedAttributeSet == null) { throw new Exception("Attribute selection has not been performed yet!"); } return m_selectedAttributeSet; } /** * get the final ranking of the attributes. * @return a two dimensional array of ranked attribute indexes and their * associated merit scores as doubles. * @exception Exception if a ranking has not been produced */ public double [][] rankedAttributes () throws Exception { if (m_attributeRanking == null) { throw new Exception("Ranking has not been performed"); } return m_attributeRanking; } /** * set the attribute/subset evaluator * @param evaluator the evaluator to use */ public void setEvaluator (ASEvaluation evaluator) { m_ASEvaluator = evaluator; } /** * set the search method * @param search the search method to use */ public void setSearch (ASSearch search) { m_searchMethod = search; if (m_searchMethod instanceof RankedOutputSearch) { setRanking(((RankedOutputSearch)m_searchMethod).getGenerateRanking()); } } /** * set the number of folds for cross validation * @param folds the number of folds */ public void setFolds (int folds) { m_numFolds = folds; } /** * produce a ranking (if possible with the set search and evaluator) * @param r true if a ranking is to be produced */ public void setRanking (boolean r) { m_doRank = r; } /** * do a cross validation * @param x true if a cross validation is to be performed */ public void setXval (boolean x) { m_doXval = x; } /** * set the seed for use in cross validation * @param s the seed */ public void setSeed (int s) { m_seed = s; } /** * get a description of the attribute selection * @return a String describing the results of attribute selection */ public String toResultsString() { return m_selectionResults.toString(); } /** * reduce the dimensionality of a set of instances to include only those * attributes chosen by the last run of attribute selection. * @param in the instances to be reduced * @return a dimensionality reduced set of instances * @exception Exception if the instances can't be reduced */ public Instances reduceDimensionality(Instances in) throws Exception { if (m_attributeFilter == null) { throw new Exception("No feature selection has been performed yet!"); } if (m_transformer != null) { Instances transformed = new Instances(m_transformer.transformedHeader(), in.numInstances()); for (int i=0;i<in.numInstances();i++) { transformed.add(m_transformer.convertInstance(in.instance(i))); } return Filter.useFilter(transformed, m_attributeFilter); } return Filter.useFilter(in, m_attributeFilter); } /** * reduce the dimensionality of a single instance to include only those * attributes chosen by the last run of attribute selection. * @param in the instance to be reduced * @return a dimensionality reduced instance * @exception Exception if the instance can't be reduced */ public Instance reduceDimensionality(Instance in) throws Exception { if (m_attributeFilter == null) { throw new Exception("No feature selection has been performed yet!"); } if (m_transformer != null) { in = m_transformer.convertInstance(in); } m_attributeFilter.input(in); m_attributeFilter.batchFinished(); Instance result = m_attributeFilter.output(); return result; } /** * constructor. Sets defaults for each member varaible. Default * attribute evaluator is CfsSubsetEval; default search method is * BestFirst. */ public AttributeSelection () { setFolds(10); setRanking(false); setXval(false); setSeed(1); setEvaluator(new CfsSubsetEval()); setSearch(new GreedyStepwise()); m_selectionResults = new StringBuffer(); m_selectedAttributeSet = null; m_attributeRanking = null; } /** * Perform attribute selection with a particular evaluator and * a set of options specifying search method and input file etc. * * @param ASEvaluator an evaluator object * @param options an array of options, not only for the evaluator * but also the search method (if any) and an input data file * @return the results of attribute selection as a String * @exception Exception if no training file is set */ public static String SelectAttributes (ASEvaluation ASEvaluator, String[] options) throws Exception { String trainFileName, searchName; Instances train = null; ASSearch searchMethod = null; String[] optionsTmp = (String[]) options.clone(); boolean helpRequested = false; try { // get basic options (options the same for all attribute selectors trainFileName = Utils.getOption('i', options); helpRequested = Utils.getFlag('h', optionsTmp); if (helpRequested || (trainFileName.length() == 0)) { searchName = Utils.getOption('s', optionsTmp); if (searchName.length() != 0) { String[] searchOptions = Utils.splitOptions(searchName); searchMethod = (ASSearch)Class.forName(searchOptions[0]).newInstance(); } if (helpRequested) throw new Exception("Help requested."); else throw new Exception("No training file given."); } } catch (Exception e) { throw new Exception('\n' + e.getMessage() + makeOptionString(ASEvaluator, searchMethod)); } DataSource source = new DataSource(trainFileName); train = source.getDataSet(); return SelectAttributes(ASEvaluator, options, train); } /** * returns a string summarizing the results of repeated attribute * selection runs on splits of a dataset. * @return a summary of attribute selection results * @exception Exception if no attribute selection has been performed. */ public String CVResultsString () throws Exception { StringBuffer CvString = new StringBuffer(); if ((m_subsetResults == null && m_rankResults == null) || ( m_trainInstances == null)) { throw new Exception("Attribute selection has not been performed yet!"); } int fieldWidth = (int)(Math.log(m_trainInstances.numAttributes()) +1.0); CvString.append("\n\n=== Attribute selection " + m_numFolds + " fold cross-validation "); if (!(m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(m_ASEvaluator instanceof UnsupervisedAttributeEvaluator) && (m_trainInstances.classAttribute().isNominal())) { CvString.append("(stratified), seed: "); CvString.append(m_seed+" ===\n\n"); } else { CvString.append("seed: "+m_seed+" ===\n\n"); } if ((m_searchMethod instanceof RankedOutputSearch) && (m_doRank == true)) { CvString.append("average merit average rank attribute\n"); // calcualte means and std devs for (int i = 0; i < m_rankResults[0].length; i++) { m_rankResults[0][i] /= m_numFolds; // mean merit double var = m_rankResults[0][i]*m_rankResults[0][i]*m_numFolds; var = (m_rankResults[2][i] - var); var /= m_numFolds; if (var <= 0.0) { var = 0.0; m_rankResults[2][i] = 0; } else { m_rankResults[2][i] = Math.sqrt(var); } m_rankResults[1][i] /= m_numFolds; // mean rank var = m_rankResults[1][i]*m_rankResults[1][i]*m_numFolds; var = (m_rankResults[3][i] - var); var /= m_numFolds; if (var <= 0.0) { var = 0.0; m_rankResults[3][i] = 0; } else { m_rankResults[3][i] = Math.sqrt(var); } } // now sort them by mean rank int[] s = Utils.sort(m_rankResults[1]); for (int i=0; i<s.length; i++) { if (m_rankResults[1][s[i]] > 0) { CvString.append(Utils.doubleToString(/*Math. abs(*/m_rankResults[0][s[i]]/*)*/, 6, 3) + " +-" + Utils.doubleToString(m_rankResults[2][s[i]], 6, 3) + " " + Utils.doubleToString(m_rankResults[1][s[i]], fieldWidth+2, 1) + " +-" + Utils.doubleToString(m_rankResults[3][s[i]], 5, 2) +" " + Utils.doubleToString(((double)(s[i] + 1)), fieldWidth, 0) + " " + m_trainInstances.attribute(s[i]).name() + "\n"); } } } else { CvString.append("number of folds (%) attribute\n"); for (int i = 0; i < m_subsetResults.length; i++) { if ((m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) || (i != m_trainInstances.classIndex())) { CvString.append(Utils.doubleToString(m_subsetResults[i], 12, 0) + "(" + Utils.doubleToString((m_subsetResults[i] / m_numFolds * 100.0) , 3, 0) + " %) " + Utils.doubleToString(((double)(i + 1)), fieldWidth, 0) + " " + m_trainInstances.attribute(i).name() + "\n"); } } } return CvString.toString(); } /** * Select attributes for a split of the data. Calling this function * updates the statistics on attribute selection. CVResultsString() * returns a string summarizing the results of repeated calls to * this function. Assumes that splits are from the same dataset--- * ie. have the same number and types of attributes as previous * splits. * * @param split the instances to select attributes from * @exception Exception if an error occurs */ public void selectAttributesCVSplit(Instances split) throws Exception { double[][] attributeRanking = null; // if the train instances are null then set equal to this split. // If this is the case then this function is more than likely being // called from outside this class in order to obtain CV statistics // and all we need m_trainIstances for is to get at attribute names // and types etc. if (m_trainInstances == null) { m_trainInstances = split; } // create space to hold statistics if (m_rankResults == null && m_subsetResults == null) { m_subsetResults = new double[split.numAttributes()]; m_rankResults = new double[4][split.numAttributes()]; } m_ASEvaluator.buildEvaluator(split); // Do the search int[] attributeSet = m_searchMethod.search(m_ASEvaluator, split); // Do any postprocessing that a attribute selection method might // require attributeSet = m_ASEvaluator.postProcess(attributeSet); if ((m_searchMethod instanceof RankedOutputSearch) && (m_doRank == true)) { attributeRanking = ((RankedOutputSearch)m_searchMethod). rankedAttributes(); // System.out.println(attributeRanking[0][1]); for (int j = 0; j < attributeRanking.length; j++) { // merit m_rankResults[0][(int)attributeRanking[j][0]] += attributeRanking[j][1]; // squared merit m_rankResults[2][(int)attributeRanking[j][0]] += (attributeRanking[j][1]*attributeRanking[j][1]); // rank m_rankResults[1][(int)attributeRanking[j][0]] += (j + 1); // squared rank m_rankResults[3][(int)attributeRanking[j][0]] += (j + 1)*(j + 1); // += (attributeRanking[j][0] * attributeRanking[j][0]); } } else { for (int j = 0; j < attributeSet.length; j++) { m_subsetResults[attributeSet[j]]++; } } m_trials++; } /** * Perform a cross validation for attribute selection. With subset * evaluators the number of times each attribute is selected over * the cross validation is reported. For attribute evaluators, the * average merit and average ranking + std deviation is reported for * each attribute. * * @return the results of cross validation as a String * @exception Exception if an error occurs during cross validation */ public String CrossValidateAttributes () throws Exception { Instances cvData = new Instances(m_trainInstances); Instances train; Random random = new Random(m_seed); cvData.randomize(random); if (!(m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) { if (cvData.classAttribute().isNominal()) { cvData.stratify(m_numFolds); } } for (int i = 0; i < m_numFolds; i++) { // Perform attribute selection train = cvData.trainCV(m_numFolds, i, random); selectAttributesCVSplit(train); } return CVResultsString(); } /** * Perform attribute selection on the supplied training instances. * * @param data the instances to select attributes from * @exception Exception if there is a problem during selection */ public void SelectAttributes (Instances data) throws Exception { int [] attributeSet; m_transformer = null; m_attributeFilter = null; m_trainInstances = data; if (m_doXval == true && (m_ASEvaluator instanceof AttributeTransformer)) { throw new Exception("Can't cross validate an attribute transformer."); } if (m_ASEvaluator instanceof SubsetEvaluator && m_searchMethod instanceof Ranker) { throw new Exception(m_ASEvaluator.getClass().getName() +" must use a search method other than Ranker"); } if (m_ASEvaluator instanceof AttributeEvaluator && !(m_searchMethod instanceof Ranker)) { // System.err.println("AttributeEvaluators must use a Ranker search " // +"method. Switching to Ranker..."); // m_searchMethod = new Ranker(); throw new Exception("AttributeEvaluators must use the Ranker search " + "method"); } if (m_searchMethod instanceof RankedOutputSearch) { m_doRank = ((RankedOutputSearch)m_searchMethod).getGenerateRanking(); } if (m_ASEvaluator instanceof UnsupervisedAttributeEvaluator || m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) { // unset the class index // m_trainInstances.setClassIndex(-1); } else { // check that a class index has been set if (m_trainInstances.classIndex() < 0) { m_trainInstances.setClassIndex(m_trainInstances.numAttributes()-1); } } // Initialize the attribute evaluator m_ASEvaluator.buildEvaluator(m_trainInstances); if (m_ASEvaluator instanceof AttributeTransformer) { m_trainInstances = ((AttributeTransformer)m_ASEvaluator).transformedHeader(); m_transformer = (AttributeTransformer)m_ASEvaluator; } int fieldWidth = (int)(Math.log(m_trainInstances.numAttributes()) +1.0); // Do the search attributeSet = m_searchMethod.search(m_ASEvaluator, m_trainInstances); // try and determine if the search method uses an attribute transformer--- // this is a bit of a hack to make things work properly with RankSearch // using PrincipalComponents as its attribute ranker try { BeanInfo bi = Introspector.getBeanInfo(m_searchMethod.getClass()); PropertyDescriptor properties[]; MethodDescriptor methods[]; // methods = bi.getMethodDescriptors(); properties = bi.getPropertyDescriptors(); for (int i=0;i<properties.length;i++) { String name = properties[i].getDisplayName(); Method meth = properties[i].getReadMethod(); Object retType = meth.getReturnType(); if (retType.equals(ASEvaluation.class)) { Class args [] = { }; ASEvaluation tempEval = (ASEvaluation)(meth.invoke(m_searchMethod, (Object[])args)); if (tempEval instanceof AttributeTransformer) { // grab the transformed data header m_trainInstances = ((AttributeTransformer)tempEval).transformedHeader(); m_transformer = (AttributeTransformer)tempEval; } } } } catch (IntrospectionException ex) { System.err.println("AttributeSelection: Couldn't " +"introspect"); } // Do any postprocessing that a attribute selection method might require attributeSet = m_ASEvaluator.postProcess(attributeSet); if (!m_doRank) { m_selectionResults.append(printSelectionResults()); } if ((m_searchMethod instanceof RankedOutputSearch) && m_doRank == true) { m_attributeRanking = ((RankedOutputSearch)m_searchMethod).rankedAttributes(); m_selectionResults.append(printSelectionResults()); m_selectionResults.append("Ranked attributes:\n"); // retrieve the number of attributes to retain m_numToSelect = ((RankedOutputSearch)m_searchMethod).getCalculatedNumToSelect(); // determine fieldwidth for merit int f_p=0; int w_p=0; for (int i = 0; i < m_numToSelect; i++) { double precision = (Math.abs(m_attributeRanking[i][1]) - (int)(Math.abs(m_attributeRanking[i][1]))); double intPart = (int)(Math.abs(m_attributeRanking[i][1])); if (precision > 0) { precision = Math.abs((Math.log(Math.abs(precision)) / Math.log(10)))+3; } if (precision > f_p) { f_p = (int)precision; } if (intPart == 0) { if (w_p < 2) { w_p = 2; } } else if ((Math.abs((Math.log(Math.abs(m_attributeRanking[i][1])) / Math.log(10)))+1) > w_p) { if (m_attributeRanking[i][1] > 0) { w_p = (int)Math.abs((Math.log(Math.abs(m_attributeRanking[i][1])) / Math.log(10)))+1; } } } for (int i = 0; i < m_numToSelect; i++) { m_selectionResults. append(Utils.doubleToString(m_attributeRanking[i][1], f_p+w_p+1,f_p) + Utils.doubleToString((m_attributeRanking[i][0] + 1), fieldWidth+1,0) + " " + m_trainInstances. attribute((int)m_attributeRanking[i][0]).name() + "\n"); } // set up the selected attributes array - usable by a filter or // whatever if (m_trainInstances.classIndex() >= 0) { if ((!(m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) || m_ASEvaluator instanceof AttributeTransformer) { // one more for the class m_selectedAttributeSet = new int[m_numToSelect + 1]; m_selectedAttributeSet[m_numToSelect] = m_trainInstances.classIndex(); } else { m_selectedAttributeSet = new int[m_numToSelect]; } } else { m_selectedAttributeSet = new int[m_numToSelect]; } m_selectionResults.append("\nSelected attributes: "); for (int i = 0; i < m_numToSelect; i++) { m_selectedAttributeSet[i] = (int)m_attributeRanking[i][0]; if (i == m_numToSelect - 1) { m_selectionResults.append(((int)m_attributeRanking[i][0] + 1) + " : " + (i + 1) + "\n"); } else { m_selectionResults.append(((int)m_attributeRanking[i][0] + 1)); m_selectionResults.append(","); } } } else { // set up the selected attributes array - usable by a filter or // whatever if ((!(m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) || m_trainInstances.classIndex() >= 0) // one more for the class { m_selectedAttributeSet = new int[attributeSet.length + 1]; m_selectedAttributeSet[attributeSet.length] = m_trainInstances.classIndex(); } else { m_selectedAttributeSet = new int[attributeSet.length]; } for (int i = 0; i < attributeSet.length; i++) { m_selectedAttributeSet[i] = attributeSet[i]; } m_selectionResults.append("Selected attributes: "); for (int i = 0; i < attributeSet.length; i++) { if (i == (attributeSet.length - 1)) { m_selectionResults.append((attributeSet[i] + 1) + " : " + attributeSet.length + "\n"); } else { m_selectionResults.append((attributeSet[i] + 1) + ","); } } for (int i=0;i<attributeSet.length;i++) { m_selectionResults.append(" " +m_trainInstances .attribute(attributeSet[i]).name() +"\n"); } } // Cross validation should be called from here if (m_doXval == true) { m_selectionResults.append(CrossValidateAttributes()); } // set up the attribute filter with the selected attributes if (m_selectedAttributeSet != null && !m_doXval) { m_attributeFilter = new Remove(); m_attributeFilter.setAttributeIndicesArray(m_selectedAttributeSet); m_attributeFilter.setInvertSelection(true); m_attributeFilter.setInputFormat(m_trainInstances); } // Save space m_trainInstances = new Instances(m_trainInstances, 0); } /** * Perform attribute selection with a particular evaluator and * a set of options specifying search method and options for the * search method and evaluator. * * @param ASEvaluator an evaluator object * @param options an array of options, not only for the evaluator * but also the search method (if any) and an input data file * @param train the input instances * @return the results of attribute selection as a String * @exception Exception if incorrect options are supplied */ public static String SelectAttributes (ASEvaluation ASEvaluator, String[] options, Instances train) throws Exception { int seed = 1, folds = 10; String foldsString, seedString, searchName; String classString; String searchClassName; String[] searchOptions = null; //new String [1]; ASSearch searchMethod = null; boolean doCrossVal = false; int classIndex = -1; boolean helpRequested = false; AttributeSelection trainSelector = new AttributeSelection(); try { if (Utils.getFlag('h', options)) { helpRequested = true; } // does data already have a class attribute set? if (train.classIndex() != -1) classIndex = train.classIndex() + 1; // get basic options (options the same for all attribute selectors classString = Utils.getOption('c', options); if (classString.length() != 0) { if (classString.equals("first")) { classIndex = 1; } else if (classString.equals("last")) { classIndex = train.numAttributes(); } else { classIndex = Integer.parseInt(classString); } } if ((classIndex != -1) && ((classIndex == 0) || (classIndex > train.numAttributes()))) { throw new Exception("Class index out of range."); } if (classIndex != -1) { train.setClassIndex(classIndex - 1); } else { // classIndex = train.numAttributes(); // train.setClassIndex(classIndex - 1); } foldsString = Utils.getOption('x', options); if (foldsString.length() != 0) { folds = Integer.parseInt(foldsString); doCrossVal = true; } trainSelector.setFolds(folds); trainSelector.setXval(doCrossVal); seedString = Utils.getOption('n', options); if (seedString.length() != 0) { seed = Integer.parseInt(seedString); } trainSelector.setSeed(seed); searchName = Utils.getOption('s', options); if ((searchName.length() == 0) && (!(ASEvaluator instanceof AttributeEvaluator))) { throw new Exception("No search method given."); } if (searchName.length() != 0) { searchName = searchName.trim(); // split off any search options int breakLoc = searchName.indexOf(' '); searchClassName = searchName; String searchOptionsString = ""; if (breakLoc != -1) { searchClassName = searchName.substring(0, breakLoc); searchOptionsString = searchName.substring(breakLoc).trim(); searchOptions = Utils.splitOptions(searchOptionsString); } } else { try { searchClassName = new String("weka.attributeSelection.Ranker"); searchMethod = (ASSearch)Class. forName(searchClassName).newInstance(); } catch (Exception e) { throw new Exception("Can't create Ranker object"); } } // if evaluator is a subset evaluator // create search method and set its options (if any) if (searchMethod == null) { searchMethod = ASSearch.forName(searchClassName, searchOptions); } // set the search method trainSelector.setSearch(searchMethod); } catch (Exception e) { throw new Exception('\n' + e.getMessage() + makeOptionString(ASEvaluator, searchMethod)); } try { // Set options for ASEvaluator if (ASEvaluator instanceof OptionHandler) { ((OptionHandler)ASEvaluator).setOptions(options); } /* // Set options for Search method if (searchMethod instanceof OptionHandler) { if (searchOptions != null) { ((OptionHandler)searchMethod).setOptions(searchOptions); } } Utils.checkForRemainingOptions(searchOptions); */ } catch (Exception e) { throw new Exception("\n" + e.getMessage() + makeOptionString(ASEvaluator, searchMethod)); } try { Utils.checkForRemainingOptions(options); } catch (Exception e) { throw new Exception('\n' + e.getMessage() + makeOptionString(ASEvaluator, searchMethod)); } if (helpRequested) { System.out.println(makeOptionString(ASEvaluator, searchMethod)); System.exit(0); } // set the attribute evaluator trainSelector.setEvaluator(ASEvaluator); // do the attribute selection trainSelector.SelectAttributes(train); // return the results string return trainSelector.toResultsString(); } /** * Assembles a text description of the attribute selection results. * * @return a string describing the results of attribute selection. */ private String printSelectionResults () { StringBuffer text = new StringBuffer(); text.append("\n\n=== Attribute Selection on all input data ===\n\n" + "Search Method:\n"); text.append(m_searchMethod.toString()); text.append("\nAttribute "); if (m_ASEvaluator instanceof SubsetEvaluator) { text.append("Subset Evaluator ("); } else { text.append("Evaluator ("); } if (!(m_ASEvaluator instanceof UnsupervisedSubsetEvaluator) && !(m_ASEvaluator instanceof UnsupervisedAttributeEvaluator)) { text.append("supervised, "); text.append("Class ("); if (m_trainInstances.attribute(m_trainInstances.classIndex()) .isNumeric()) { text.append("numeric): "); } else { text.append("nominal): "); } text.append((m_trainInstances.classIndex() + 1) + " " + m_trainInstances.attribute(m_trainInstances .classIndex()).name() + "):\n"); } else { text.append("unsupervised):\n"); } text.append(m_ASEvaluator.toString() + "\n"); return text.toString(); } /** * Make up the help string giving all the command line options * * @param ASEvaluator the attribute evaluator to include options for * @param searchMethod the search method to include options for * @return a string detailing the valid command line options * @throws Exception if something goes wrong */ private static String makeOptionString (ASEvaluation ASEvaluator, ASSearch searchMethod) throws Exception { StringBuffer optionsText = new StringBuffer(""); // General options optionsText.append("\n\nGeneral options:\n\n"); optionsText.append("-h\n\tdisplay this help\n"); optionsText.append("-i <name of input file>\n"); optionsText.append("\tSets training file.\n"); optionsText.append("-c <class index>\n"); optionsText.append("\tSets the class index for supervised attribute\n"); optionsText.append("\tselection. Default=last column.\n"); optionsText.append("-s <class name>\n"); optionsText.append("\tSets search method for subset evaluators.\n"); optionsText.append("-x <number of folds>\n"); optionsText.append("\tPerform a cross validation.\n"); optionsText.append("-n <random number seed>\n"); optionsText.append("\tUse in conjunction with -x.\n"); // Get attribute evaluator-specific options if (ASEvaluator instanceof OptionHandler) { optionsText.append("\nOptions specific to " + ASEvaluator.getClass().getName() + ":\n\n"); Enumeration enu = ((OptionHandler)ASEvaluator).listOptions(); while (enu.hasMoreElements()) { Option option = (Option)enu.nextElement(); optionsText.append(option.synopsis() + '\n'); optionsText.append(option.description() + "\n"); } } if (searchMethod != null) { if (searchMethod instanceof OptionHandler) { optionsText.append("\nOptions specific to " + searchMethod.getClass().getName() + ":\n\n"); Enumeration enu = ((OptionHandler)searchMethod).listOptions(); while (enu.hasMoreElements()) { Option option = (Option)enu.nextElement(); optionsText.append(option.synopsis() + '\n'); optionsText.append(option.description() + "\n"); } } } else { if (ASEvaluator instanceof SubsetEvaluator) { System.out.println("No search method given."); } } return optionsText.toString(); } /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the name of an " + "attribute/subset evaluator"); } String EvaluatorName = args[0]; args[0] = ""; ASEvaluation newEval = ASEvaluation.forName(EvaluatorName, null); System.out.println(SelectAttributes(newEval, args)); } catch (Exception e) { System.out.println(e.getMessage()); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
34,439
30.223935
81
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/AttributeSetEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RELEASE INFORMATION (December 27, 2004) * * FCBF algorithm: * Template obtained from Weka * Developped for Weka by Zheng Alan Zhao * December 27, 2004 * * FCBF algorithm is a feature selection method based on Symmetrical Uncertainty * Measurement for relevance redundancy analysis. The details of FCBF algorithm are * in L. Yu and H. Liu. Feature selection for high-dimensional data: a fast * correlation-based filter solution. In Proceedings of the twentieth International * Conference on Machine Learning, pages 856--863, 2003. * * * CONTACT INFORMATION * * For algorithm implementation: * Zheng Zhao: zhaozheng at asu.edu * * For the algorithm: * Lei Yu: leiyu at asu.edu * Huan Liu: hliu at asu.edu * * Data Mining and Machine Learning Lab * Computer Science and Engineering Department * Fulton School of Engineering * Arizona State University * Tempe, AZ 85287 * * AttributeSetEvaluator.java * * Copyright (C) 2004 Data Mining and Machine Learning Lab, * Computer Science and Engineering Department, * Fulton School of Engineering, * Arizona State University * */ package weka.attributeSelection; /** * Abstract attribute set evaluator. * * @author Zheng Zhao: zhaozheng at asu.edu * @version $Revision: 8034 $ */ public abstract class AttributeSetEvaluator extends ASEvaluation { /** for serialization */ private static final long serialVersionUID = -5744881009422257389L; // =============== // Public methods. // =============== /** * evaluates an individual attribute * * @param attribute the index of the attribute to be evaluated * @return the "merit" of the attribute * @exception Exception if the attribute could not be evaluated */ public abstract double evaluateAttribute(int attribute) throws Exception; /** * Evaluates a set of attributes * * @param attributes an <code>int[]</code> value * @param classAttributes an <code>int[]</code> value * @return a <code>double</code> value * @exception Exception if an error occurs */ public abstract double evaluateAttribute(int[] attributes, int[] classAttributes) throws Exception; }
3,029
31.580645
87
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/AttributeTransformer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AttributeTransformer.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Instance; import weka.core.Instances; /** * Abstract attribute transformer. Transforms the dataset. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface AttributeTransformer { // =============== // Public methods. // =============== /** * Returns just the header for the transformed data (ie. an empty * set of instances. This is so that AttributeSelection can * determine the structure of the transformed data without actually * having to get all the transformed data through getTransformedData(). * @return the header of the transformed data. * @exception Exception if the header of the transformed data can't * be determined. */ Instances transformedHeader() throws Exception; /** * Transform the supplied data set (assumed to be the same format * as the training data) * @return A set of instances representing the transformed data * @exception Exception if the attribute could not be evaluated */ Instances transformedData(Instances data) throws Exception; /** * Transforms an instance in the format of the original data to the * transformed space * @return a transformed instance * @exception Exception if the instance could not be transformed */ Instance convertInstance(Instance instance) throws Exception; }
2,191
32.723077
74
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/BestFirst.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BestFirst.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.io.Serializable; import java.util.BitSet; import java.util.Enumeration; import java.util.Hashtable; import java.util.Vector; import weka.core.FastVector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; /** <!-- globalinfo-start --> * BestFirst:<br/> * <br/> * Searches the space of attribute subsets by greedy hillclimbing augmented with a backtracking facility. Setting the number of consecutive non-improving nodes allowed controls the level of backtracking done. Best first may start with the empty set of attributes and search forward, or start with the full set of attributes and search backward, or start at any point and search in both directions (by considering all possible single attribute additions and deletions at a given point).<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1).</pre> * * <pre> -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search.</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * Martin Guetlein (cashing merit of expanded nodes) * @version $Revision: 8034 $ */ public class BestFirst extends ASSearch implements OptionHandler, StartSetHandler { /** for serialization */ static final long serialVersionUID = 7841338689536821867L; // Inner classes /** * Class for a node in a linked list. Used in best first search. * @author Mark Hall (mhall@cs.waikato.ac.nz) **/ public class Link2 implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -8236598311516351420L; /* BitSet group; */ Object [] m_data; double m_merit; /** * Constructor */ public Link2 (Object [] data, double mer) { // group = (BitSet)gr.clone(); m_data = data; m_merit = mer; } /** Get a group */ public Object [] getData () { return m_data; } public String toString () { return ("Node: " + m_data.toString() + " " + m_merit); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Class for handling a linked list. Used in best first search. * Extends the Vector class. * @author Mark Hall (mhall@cs.waikato.ac.nz) **/ public class LinkedList2 extends FastVector { /** for serialization */ static final long serialVersionUID = 3250538292330398929L; /** Max number of elements in the list */ int m_MaxSize; // ================ // Public methods // ================ public LinkedList2 (int sz) { super(); m_MaxSize = sz; } /** * removes an element (Link) at a specific index from the list. * @param index the index of the element to be removed. **/ public void removeLinkAt (int index) throws Exception { if ((index >= 0) && (index < size())) { removeElementAt(index); } else { throw new Exception("index out of range (removeLinkAt)"); } } /** * returns the element (Link) at a specific index from the list. * @param index the index of the element to be returned. **/ public Link2 getLinkAt (int index) throws Exception { if (size() == 0) { throw new Exception("List is empty (getLinkAt)"); } else {if ((index >= 0) && (index < size())) { return ((Link2)(elementAt(index))); } else { throw new Exception("index out of range (getLinkAt)"); } } } /** * adds an element (Link) to the list. * @param data the attribute set specification * @param mer the "merit" of this attribute set **/ public void addToList (Object [] data, double mer) throws Exception { Link2 newL = new Link2(data, mer); if (size() == 0) { addElement(newL); } else {if (mer > ((Link2)(firstElement())).m_merit) { if (size() == m_MaxSize) { removeLinkAt(m_MaxSize - 1); } //---------- insertElementAt(newL, 0); } else { int i = 0; int size = size(); boolean done = false; //------------ // don't insert if list contains max elements an this // is worst than the last if ((size == m_MaxSize) && (mer <= ((Link2)(lastElement())).m_merit)) { } //--------------- else { while ((!done) && (i < size)) { if (mer > ((Link2)(elementAt(i))).m_merit) { if (size == m_MaxSize) { removeLinkAt(m_MaxSize - 1); } // --------------------- insertElementAt(newL, i); done = true; } else {if (i == size - 1) { addElement(newL); done = true; } else { i++; } } } } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // member variables /** maximum number of stale nodes before terminating search */ protected int m_maxStale; /** 0 == backward search, 1 == forward search, 2 == bidirectional */ protected int m_searchDirection; /** search direction: backward */ protected static final int SELECTION_BACKWARD = 0; /** search direction: forward */ protected static final int SELECTION_FORWARD = 1; /** search direction: bidirectional */ protected static final int SELECTION_BIDIRECTIONAL = 2; /** search directions */ public static final Tag [] TAGS_SELECTION = { new Tag(SELECTION_BACKWARD, "Backward"), new Tag(SELECTION_FORWARD, "Forward"), new Tag(SELECTION_BIDIRECTIONAL, "Bi-directional"), }; /** holds an array of starting attributes */ protected int[] m_starting; /** holds the start set for the search as a Range */ protected Range m_startRange; /** does the data have a class */ protected boolean m_hasClass; /** holds the class index */ protected int m_classIndex; /** number of attributes in the data */ protected int m_numAttribs; /** total number of subsets evaluated during a search */ protected int m_totalEvals; /** for debugging */ protected boolean m_debug; /** holds the merit of the best subset found */ protected double m_bestMerit; /** holds the maximum size of the lookup cache for evaluated subsets */ protected int m_cacheSize; /** * Returns a string describing this search method * @return a description of the search method suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "BestFirst:\n\n" +"Searches the space of attribute subsets by greedy hillclimbing " +"augmented with a backtracking facility. Setting the number of " +"consecutive non-improving nodes allowed controls the level of " +"backtracking done. Best first may start with the empty set of " +"attributes and search forward, or start with the full set of " +"attributes and search backward, or start at any point and search " +"in both directions (by considering all possible single attribute " +"additions and deletions at a given point).\n"; } /** *Constructor */ public BestFirst () { resetOptions(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. * **/ public Enumeration listOptions () { Vector newVector = new Vector(4); newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7." ,"P",1 , "-P <start set>")); newVector.addElement(new Option("\tDirection of search. (default = 1)." , "D", 1 , "-D <0 = backward | 1 = forward " + "| 2 = bi-directional>")); newVector.addElement(new Option("\tNumber of non-improving nodes to" + "\n\tconsider before terminating search." , "N", 1, "-N <num>")); newVector.addElement(new Option("\tSize of lookup cache for evaluated subsets." +"\n\tExpressed as a multiple of the number of" +"\n\tattributes in the data set. (default = 1)", "S", 1, "-S <num>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1).</pre> * * <pre> -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search.</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported * **/ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setStartSet(optionString); } optionString = Utils.getOption('D', options); if (optionString.length() != 0) { setDirection(new SelectedTag(Integer.parseInt(optionString), TAGS_SELECTION)); } else { setDirection(new SelectedTag(SELECTION_FORWARD, TAGS_SELECTION)); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setSearchTermination(Integer.parseInt(optionString)); } optionString = Utils.getOption('S', options); if (optionString.length() != 0) { setLookupCacheSize(Integer.parseInt(optionString)); } m_debug = Utils.getFlag('Z', options); } /** * Set the maximum size of the evaluated subset cache (hashtable). This is * expressed as a multiplier for the number of attributes in the data set. * (default = 1). * * @param size the maximum size of the hashtable */ public void setLookupCacheSize(int size) { if (size >= 0) { m_cacheSize = size; } } /** * Return the maximum size of the evaluated subset cache (expressed as a multiplier * for the number of attributes in a data set. * * @return the maximum size of the hashtable. */ public int getLookupCacheSize() { return m_cacheSize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String lookupCacheSizeTipText() { return "Set the maximum size of the lookup cache of evaluated subsets. This is " +"expressed as a multiplier of the number of attributes in the data set. " +"(default = 1)."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String startSetTipText() { return "Set the start point for the search. This is specified as a comma " +"seperated list off attribute indexes starting at 1. It can include " +"ranges. Eg. 1,2,5-9,17."; } /** * Sets a starting set of attributes for the search. It is the * search method's responsibility to report this start set (if any) * in its toString() method. * @param startSet a string containing a list of attributes (and or ranges), * eg. 1,2,6,10-15. * @throws Exception if start set can't be set. */ public void setStartSet (String startSet) throws Exception { m_startRange.setRanges(startSet); } /** * Returns a list of attributes (and or attribute ranges) as a String * @return a list of attributes (and or attribute ranges) */ public String getStartSet () { return m_startRange.getRanges(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String searchTerminationTipText() { return "Set the amount of backtracking. Specify the number of "; } /** * Set the numnber of non-improving nodes to consider before terminating * search. * * @param t the number of non-improving nodes * @throws Exception if t is less than 1 */ public void setSearchTermination (int t) throws Exception { if (t < 1) { throw new Exception("Value of -N must be > 0."); } m_maxStale = t; } /** * Get the termination criterion (number of non-improving nodes). * * @return the number of non-improving nodes */ public int getSearchTermination () { return m_maxStale; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String directionTipText() { return "Set the direction of the search."; } /** * Set the search direction * * @param d the direction of the search */ public void setDirection (SelectedTag d) { if (d.getTags() == TAGS_SELECTION) { m_searchDirection = d.getSelectedTag().getID(); } } /** * Get the search direction * * @return the direction of the search */ public SelectedTag getDirection () { return new SelectedTag(m_searchDirection, TAGS_SELECTION); } /** * Gets the current settings of BestFirst. * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[6]; int current = 0; if (!(getStartSet().equals(""))) { options[current++] = "-P"; options[current++] = ""+startSetToString(); } options[current++] = "-D"; options[current++] = "" + m_searchDirection; options[current++] = "-N"; options[current++] = "" + m_maxStale; while (current < options.length) { options[current++] = ""; } return options; } /** * converts the array of starting attributes to a string. This is * used by getOptions to return the actual attributes specified * as the starting set. This is better than using m_startRanges.getRanges() * as the same start set can be specified in different ways from the * command line---eg 1,2,3 == 1-3. This is to ensure that stuff that * is stored in a database is comparable. * @return a comma seperated list of individual attribute numbers as a String */ private String startSetToString() { StringBuffer FString = new StringBuffer(); boolean didPrint; if (m_starting == null) { return getStartSet(); } for (int i = 0; i < m_starting.length; i++) { didPrint = false; if ((m_hasClass == false) || (m_hasClass == true && i != m_classIndex)) { FString.append((m_starting[i] + 1)); didPrint = true; } if (i == (m_starting.length - 1)) { FString.append(""); } else { if (didPrint) { FString.append(","); } } } return FString.toString(); } /** * returns a description of the search as a String * @return a description of the search */ public String toString () { StringBuffer BfString = new StringBuffer(); BfString.append("\tBest first.\n\tStart set: "); if (m_starting == null) { BfString.append("no attributes\n"); } else { BfString.append(startSetToString()+"\n"); } BfString.append("\tSearch direction: "); if (m_searchDirection == SELECTION_BACKWARD) { BfString.append("backward\n"); } else {if (m_searchDirection == SELECTION_FORWARD) { BfString.append("forward\n"); } else { BfString.append("bi-directional\n"); } } BfString.append("\tStale search after " + m_maxStale + " node expansions\n"); BfString.append("\tTotal number of subsets evaluated: " + m_totalEvals + "\n"); BfString.append("\tMerit of best subset found: " +Utils.doubleToString(Math.abs(m_bestMerit),8,3)+"\n"); return BfString.toString(); } protected void printGroup (BitSet tt, int numAttribs) { int i; for (i = 0; i < numAttribs; i++) { if (tt.get(i) == true) { System.out.print((i + 1) + " "); } } System.out.println(); } /** * Searches the attribute subset space by best first search * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { m_totalEvals = 0; if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } if (ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; } else { m_hasClass = true; m_classIndex = data.classIndex(); } SubsetEvaluator ASEvaluator = (SubsetEvaluator)ASEval; m_numAttribs = data.numAttributes(); int i, j; int best_size = 0; int size = 0; int done; int sd = m_searchDirection; BitSet best_group, temp_group; int stale; double best_merit; double merit; boolean z; boolean added; Link2 tl; Hashtable lookup = new Hashtable(m_cacheSize * m_numAttribs); int insertCount = 0; int cacheHits = 0; LinkedList2 bfList = new LinkedList2(m_maxStale); best_merit = -Double.MAX_VALUE; stale = 0; best_group = new BitSet(m_numAttribs); m_startRange.setUpper(m_numAttribs-1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } // If a starting subset has been supplied, then initialise the bitset if (m_starting != null) { for (i = 0; i < m_starting.length; i++) { if ((m_starting[i]) != m_classIndex) { best_group.set(m_starting[i]); } } best_size = m_starting.length; m_totalEvals++; } else { if (m_searchDirection == SELECTION_BACKWARD) { setStartSet("1-last"); m_starting = new int[m_numAttribs]; // init initial subset to all attributes for (i = 0, j = 0; i < m_numAttribs; i++) { if (i != m_classIndex) { best_group.set(i); m_starting[j++] = i; } } best_size = m_numAttribs - 1; m_totalEvals++; } } // evaluate the initial subset best_merit = ASEvaluator.evaluateSubset(best_group); // add the initial group to the list and the hash table Object [] best = new Object[1]; best[0] = best_group.clone(); bfList.addToList(best, best_merit); BitSet tt = (BitSet)best_group.clone(); String hashC = tt.toString(); lookup.put(hashC, new Double(best_merit)); while (stale < m_maxStale) { added = false; if (m_searchDirection == SELECTION_BIDIRECTIONAL) { // bi-directional search done = 2; sd = SELECTION_FORWARD; } else { done = 1; } // finished search? if (bfList.size() == 0) { stale = m_maxStale; break; } // copy the attribute set at the head of the list tl = bfList.getLinkAt(0); temp_group = (BitSet)(tl.getData()[0]); temp_group = (BitSet)temp_group.clone(); // remove the head of the list bfList.removeLinkAt(0); // count the number of bits set (attributes) int kk; for (kk = 0, size = 0; kk < m_numAttribs; kk++) { if (temp_group.get(kk)) { size++; } } do { for (i = 0; i < m_numAttribs; i++) { if (sd == SELECTION_FORWARD) { z = ((i != m_classIndex) && (!temp_group.get(i))); } else { z = ((i != m_classIndex) && (temp_group.get(i))); } if (z) { // set the bit (attribute to add/delete) if (sd == SELECTION_FORWARD) { temp_group.set(i); size++; } else { temp_group.clear(i); size--; } /* if this subset has been seen before, then it is already in the list (or has been fully expanded) */ tt = (BitSet)temp_group.clone(); hashC = tt.toString(); if (lookup.containsKey(hashC) == false) { merit = ASEvaluator.evaluateSubset(temp_group); m_totalEvals++; // insert this one in the hashtable if (insertCount > m_cacheSize * m_numAttribs) { lookup = new Hashtable(m_cacheSize * m_numAttribs); insertCount = 0; } hashC = tt.toString(); lookup.put(hashC, new Double(merit)); insertCount++; } else { merit = ((Double)lookup.get(hashC)).doubleValue(); cacheHits++; } // insert this one in the list Object[] add = new Object[1]; add[0] = tt.clone(); bfList.addToList(add, merit); if (m_debug) { System.out.print("Group: "); printGroup(tt, m_numAttribs); System.out.println("Merit: " + merit); } // is this better than the best? if (sd == SELECTION_FORWARD) { z = ((merit - best_merit) > 0.00001); } else { if (merit == best_merit) { z = (size < best_size); } else { z = (merit > best_merit); } } if (z) { added = true; stale = 0; best_merit = merit; // best_size = (size + best_size); best_size = size; best_group = (BitSet)(temp_group.clone()); } // unset this addition(deletion) if (sd == SELECTION_FORWARD) { temp_group.clear(i); size--; } else { temp_group.set(i); size++; } } } if (done == 2) { sd = SELECTION_BACKWARD; } done--; } while (done > 0); /* if we haven't added a new attribute subset then full expansion of this node hasen't resulted in anything better */ if (!added) { stale++; } } m_bestMerit = best_merit; return attributeList(best_group); } /** * Reset options to default values */ protected void resetOptions () { m_maxStale = 5; m_searchDirection = SELECTION_FORWARD; m_starting = null; m_startRange = new Range(); m_classIndex = -1; m_totalEvals = 0; m_cacheSize = 1; m_debug = false; } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ protected int[] attributeList (BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
24,657
25.457082
490
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/CfsSubsetEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CfsSubsetEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.BitSet; import java.util.Enumeration; import java.util.HashSet; import java.util.Set; import java.util.Vector; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ContingencyTables; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.ThreadSafe; import weka.core.Utils; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; /** <!-- globalinfo-start --> * CfsSubsetEval :<br/> * <br/> * Evaluates the worth of a subset of attributes by considering the individual predictive ability of each feature along with the degree of redundancy between them.<br/> * <br/> * Subsets of features that are highly correlated with the class while having low intercorrelation are preferred.<br/> * <br/> * For more information see:<br/> * <br/> * M. A. Hall (1998). Correlation-based Feature Subset Selection for Machine Learning. Hamilton, New Zealand. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Hall1998, * address = {Hamilton, New Zealand}, * author = {M. A. Hall}, * school = {University of Waikato}, * title = {Correlation-based Feature Subset Selection for Machine Learning}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * Treat missing values as a separate value.</pre> * * <pre> -L * Don't include locally predictive attributes.</pre> * * <pre> -Z * Precompute the full correlation matrix at the outset, rather than compute correlations lazily (as needed) during the search. Use this in conjuction with parallel processing in order to speed up a backward search.</pre> * * <pre> -P &lt;int&gt; * The size of the thread pool, for example, the number of cores in the CPU. (default 1) * </pre> * * <pre> -E &lt;int&gt; * The number of threads to use, which should be &gt;= size of thread pool. (default 1) * </pre> * * <pre> -D * Output debugging info.</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 9883 $ * @see Discretize */ public class CfsSubsetEval extends ASEvaluation implements SubsetEvaluator, ThreadSafe, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 747878400813276317L; /** The training instances */ private Instances m_trainInstances; /** Discretise attributes when class in nominal */ private Discretize m_disTransform; /** The class index */ private int m_classIndex; /** Is the class numeric */ private boolean m_isNumeric; /** Number of attributes in the training data */ private int m_numAttribs; /** Number of instances in the training data */ private int m_numInstances; /** Treat missing values as separate values */ private boolean m_missingSeparate; /** Include locally predictive attributes */ private boolean m_locallyPredictive; /** Holds the matrix of attribute correlations */ // private Matrix m_corr_matrix; private float[][] m_corr_matrix; /** Standard deviations of attributes (when using pearsons correlation) */ private double[] m_std_devs; /** Threshold for admitting locally predictive features */ private double m_c_Threshold; /** Output debugging info */ protected boolean m_debug; /** Number of entries in the correlation matrix */ protected int m_numEntries; /** Number of correlations actually computed */ protected AtomicInteger m_numFilled; protected boolean m_preComputeCorrelationMatrix; /** * The number of threads used to compute the correlation matrix. Used when * correlation matrix is precomputed */ protected int m_numThreads = 1; /** * The size of the thread pool. Usually set equal to the number of CPUs or CPU * cores available */ protected int m_poolSize = 1; /** Thread pool */ protected transient ExecutorService m_pool = null; /** * Returns a string describing this attribute evaluator * * @return a description of the evaluator suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "CfsSubsetEval :\n\nEvaluates the worth of a subset of attributes " + "by considering the individual predictive ability of each feature " + "along with the degree of redundancy between them.\n\n" + "Subsets of features that are highly correlated with the class " + "while having low intercorrelation are preferred.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "M. A. Hall"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Correlation-based Feature Subset Selection for Machine Learning"); result.setValue(Field.SCHOOL, "University of Waikato"); result.setValue(Field.ADDRESS, "Hamilton, New Zealand"); return result; } /** * Constructor */ public CfsSubsetEval() { resetOptions(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. * **/ @Override public Enumeration listOptions() { Vector newVector = new Vector(3); newVector.addElement(new Option("\tTreat missing values as a separate " + "value.", "M", 0, "-M")); newVector.addElement(new Option( "\tDon't include locally predictive attributes" + ".", "L", 0, "-L")); newVector.addElement(new Option( "\t" + preComputeCorrelationMatrixTipText(), "Z", 0, "-Z")); newVector.addElement(new Option( "\t" + poolSizeTipText() + " (default 1)\n", "P", 1, "-P <int>")); newVector.addElement(new Option("\t" + numThreadsTipText() + " (default 1)\n", "E", 1, "-E <int>")); newVector.addElement(new Option("\tOutput debugging info" + ".", "D", 0, "-D")); return newVector.elements(); } /** * Parses and sets a given list of options. * <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * Treat missing values as a separate value.</pre> * * <pre> -L * Don't include locally predictive attributes.</pre> * * <pre> -Z * Precompute the full correlation matrix at the outset, rather than compute correlations lazily (as needed) during the search. Use this in conjuction with parallel processing in order to speed up a backward search.</pre> * * <pre> -P &lt;int&gt; * The size of the thread pool, for example, the number of cores in the CPU. (default 1) * </pre> * * <pre> -E &lt;int&gt; * The number of threads to use, which should be &gt;= size of thread pool. (default 1) * </pre> * * <pre> -D * Output debugging info.</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported * **/ @Override public void setOptions(String[] options) throws Exception { resetOptions(); setMissingSeparate(Utils.getFlag('M', options)); setLocallyPredictive(!Utils.getFlag('L', options)); setPreComputeCorrelationMatrix(Utils.getFlag('Z', options)); String PoolSize = Utils.getOption('P', options); if (PoolSize.length() != 0) { setPoolSize(Integer.parseInt(PoolSize)); } else { setPoolSize(1); } String NumThreads = Utils.getOption('E', options); if (NumThreads.length() != 0) { setNumThreads(Integer.parseInt(NumThreads)); } else { setNumThreads(1); } setDebug(Utils.getFlag('D', options)); } /** * @return a string to describe the option */ public String preComputeCorrelationMatrixTipText() { return "Precompute the full correlation matrix at the outset, " + "rather than compute correlations lazily (as needed) " + "during the search. Use this in conjuction with " + "parallel processing in order to speed up a backward " + "search."; } /** * Set whether to pre-compute the full correlation matrix at the outset, * rather than computing individual correlations lazily (as needed) during the * search. * * @param p true if the correlation matrix is to be pre-computed at the outset */ public void setPreComputeCorrelationMatrix(boolean p) { m_preComputeCorrelationMatrix = p; } /** * Get whether to pre-compute the full correlation matrix at the outset, * rather than computing individual correlations lazily (as needed) during the * search. * * @return true if the correlation matrix is to be pre-computed at the outset */ public boolean getPreComputeCorrelationMatrix() { return m_preComputeCorrelationMatrix; } /** * @return a string to describe the option */ public String numThreadsTipText() { return "The number of threads to use, which should be >= size of thread pool."; } /** * Gets the number of threads. */ public int getNumThreads() { return m_numThreads; } /** * Sets the number of threads */ public void setNumThreads(int nT) { m_numThreads = nT; } /** * @return a string to describe the option */ public String poolSizeTipText() { return "The size of the thread pool, for example, the number of cores in the CPU."; } /** * Gets the number of threads. */ public int getPoolSize() { return m_poolSize; } /** * Sets the number of threads */ public void setPoolSize(int nT) { m_poolSize = nT; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String locallyPredictiveTipText() { return "Identify locally predictive attributes. Iteratively adds " + "attributes with the highest correlation with the class as long " + "as there is not already an attribute in the subset that has a " + "higher correlation with the attribute in question"; } /** * Include locally predictive attributes * * @param b true or false */ public void setLocallyPredictive(boolean b) { m_locallyPredictive = b; } /** * Return true if including locally predictive attributes * * @return true if locally predictive attributes are to be used */ public boolean getLocallyPredictive() { return m_locallyPredictive; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String missingSeparateTipText() { return "Treat missing as a separate value. Otherwise, counts for missing " + "values are distributed across other values in proportion to their " + "frequency."; } /** * Treat missing as a separate value * * @param b true or false */ public void setMissingSeparate(boolean b) { m_missingSeparate = b; } /** * Return true is missing is treated as a separate value * * @return true if missing is to be treated as a separate value */ public boolean getMissingSeparate() { return m_missingSeparate; } /** * Set whether to output debugging info * * @param d true if debugging info is to be output */ public void setDebug(boolean d) { m_debug = d; } /** * Set whether to output debugging info * * @return true if debugging info is to be output */ public boolean getDebug() { return m_debug; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String debugTipText() { return "Output debugging info"; } /** * Gets the current settings of CfsSubsetEval * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { String[] options = new String[8]; int current = 0; if (getMissingSeparate()) { options[current++] = "-M"; } if (!getLocallyPredictive()) { options[current++] = "-L"; } if (getPreComputeCorrelationMatrix()) { options[current++] = "-Z"; } options[current++] = "-P"; options[current++] = "" + getPoolSize(); options[current++] = "-E"; options[current++] = "" + getNumThreads(); if (getDebug()) { options[current++] = "-D"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates a attribute evaluator. Has to initialize all fields of the * evaluator that are not being set via options. * * CFS also discretises attributes (if necessary) and initializes the * correlation matrix. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been generated successfully */ @Override public void buildEvaluator(Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); m_numEntries = 0; m_numFilled = new AtomicInteger(); m_trainInstances = new Instances(data); m_trainInstances.deleteWithMissingClass(); m_classIndex = m_trainInstances.classIndex(); m_numAttribs = m_trainInstances.numAttributes(); m_numInstances = m_trainInstances.numInstances(); m_isNumeric = m_trainInstances.attribute(m_classIndex).isNumeric(); if (!m_isNumeric) { m_disTransform = new Discretize(); m_disTransform.setUseBetterEncoding(true); m_disTransform.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_disTransform); if (m_debug) { System.err.println("Finished discretizing input data"); } } m_std_devs = new double[m_numAttribs]; m_corr_matrix = new float[m_numAttribs][]; for (int i = 0; i < m_numAttribs; i++) { m_corr_matrix[i] = new float[i + 1]; m_numEntries += (i + 1); } m_numEntries -= m_numAttribs; for (int i = 0; i < m_corr_matrix.length; i++) { m_corr_matrix[i][i] = 1.0f; m_std_devs[i] = 1.0; } for (int i = 0; i < m_numAttribs; i++) { for (int j = 0; j < m_corr_matrix[i].length - 1; j++) { m_corr_matrix[i][j] = -999; } } if (m_preComputeCorrelationMatrix && m_poolSize > 1) { m_pool = Executors.newFixedThreadPool(m_poolSize); Set<Future<Void>> results = new HashSet<Future<Void>>(); int numEntriesPerThread = (m_numEntries + m_numAttribs) / m_numThreads; numEntriesPerThread = numEntriesPerThread < 1 ? 1 : numEntriesPerThread; int startRow = 0; int startCol = 0; int count = 0; for (int i = 0; i < m_corr_matrix.length; i++) { for (int j = 0; j < m_corr_matrix[i].length; j++) { count++; if (count == numEntriesPerThread || (i == m_corr_matrix.length - 1 && j == m_corr_matrix[i].length - 1)) { final int sR = startRow; final int sC = startCol; final int eR = i; final int eC = j; startRow = i; startCol = j; count = 0; Future<Void> future = m_pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { if (m_debug) { System.err .println("Starting correlation computation task..."); } for (int i = sR; i <= eR; i++) { for (int j = (i == sR ? sC : 0); j < (i == eR ? eC : m_corr_matrix[i].length); j++) { if (m_corr_matrix[i][j] == -999) { float corr = correlate(i, j); m_corr_matrix[i][j] = corr; } } } if (m_debug) { System.err .println("Percentage of correlation matrix computed: " + Utils.doubleToString(((double) m_numFilled.get() / m_numEntries * 100.0), 2) + "%"); } return null; } }); results.add(future); } } } for (Future<Void> f : results) { f.get(); } // shut down the thread pool m_pool.shutdown(); } } /** * evaluates a subset of attributes * * @param subset a bitset representing the attribute subset to be evaluated * @return the merit * @throws Exception if the subset could not be evaluated */ @Override public double evaluateSubset(BitSet subset) throws Exception { double num = 0.0; double denom = 0.0; float corr; int larger, smaller; // do numerator for (int i = 0; i < m_numAttribs; i++) { if (i != m_classIndex) { if (subset.get(i)) { if (i > m_classIndex) { larger = i; smaller = m_classIndex; } else { smaller = i; larger = m_classIndex; } /* * int larger = (i > m_classIndex ? i : m_classIndex); int smaller = * (i > m_classIndex ? m_classIndex : i); */ if (m_corr_matrix[larger][smaller] == -999) { corr = correlate(i, m_classIndex); m_corr_matrix[larger][smaller] = corr; num += (m_std_devs[i] * corr); } else { num += (m_std_devs[i] * m_corr_matrix[larger][smaller]); } } } } // do denominator for (int i = 0; i < m_numAttribs; i++) { if (i != m_classIndex) { if (subset.get(i)) { denom += (1.0 * m_std_devs[i] * m_std_devs[i]); for (int j = 0; j < m_corr_matrix[i].length - 1; j++) { if (subset.get(j)) { if (m_corr_matrix[i][j] == -999) { corr = correlate(i, j); m_corr_matrix[i][j] = corr; denom += (2.0 * m_std_devs[i] * m_std_devs[j] * corr); } else { denom += (2.0 * m_std_devs[i] * m_std_devs[j] * m_corr_matrix[i][j]); } } } } } } if (denom < 0.0) { denom *= -1.0; } if (denom == 0.0) { return (0.0); } double merit = (num / Math.sqrt(denom)); if (merit < 0.0) { merit *= -1.0; } return merit; } private float correlate(int att1, int att2) { m_numFilled.addAndGet(1); if (!m_isNumeric) { return (float) symmUncertCorr(att1, att2); } boolean att1_is_num = (m_trainInstances.attribute(att1).isNumeric()); boolean att2_is_num = (m_trainInstances.attribute(att2).isNumeric()); if (att1_is_num && att2_is_num) { return (float) num_num(att1, att2); } else { if (att2_is_num) { return (float) num_nom2(att1, att2); } else { if (att1_is_num) { return (float) num_nom2(att2, att1); } } } return (float) nom_nom(att1, att2); } private double symmUncertCorr(int att1, int att2) { int i, j, k, ii, jj; int ni, nj; double sum = 0.0; double sumi[], sumj[]; double counts[][]; Instance inst; double corr_measure; boolean flag = false; double temp = 0.0; if (att1 == m_classIndex || att2 == m_classIndex) { flag = true; } ni = m_trainInstances.attribute(att1).numValues() + 1; nj = m_trainInstances.attribute(att2).numValues() + 1; counts = new double[ni][nj]; sumi = new double[ni]; sumj = new double[nj]; for (i = 0; i < ni; i++) { sumi[i] = 0.0; for (j = 0; j < nj; j++) { sumj[j] = 0.0; counts[i][j] = 0.0; } } // Fill the contingency table for (i = 0; i < m_numInstances; i++) { inst = m_trainInstances.instance(i); if (inst.isMissing(att1)) { ii = ni - 1; } else { ii = (int) inst.value(att1); } if (inst.isMissing(att2)) { jj = nj - 1; } else { jj = (int) inst.value(att2); } counts[ii][jj]++; } // get the row totals for (i = 0; i < ni; i++) { sumi[i] = 0.0; for (j = 0; j < nj; j++) { sumi[i] += counts[i][j]; sum += counts[i][j]; } } // get the column totals for (j = 0; j < nj; j++) { sumj[j] = 0.0; for (i = 0; i < ni; i++) { sumj[j] += counts[i][j]; } } // distribute missing counts if (!m_missingSeparate && (sumi[ni - 1] < m_numInstances) && (sumj[nj - 1] < m_numInstances)) { double[] i_copy = new double[sumi.length]; double[] j_copy = new double[sumj.length]; double[][] counts_copy = new double[sumi.length][sumj.length]; for (i = 0; i < ni; i++) { System.arraycopy(counts[i], 0, counts_copy[i], 0, sumj.length); } System.arraycopy(sumi, 0, i_copy, 0, sumi.length); System.arraycopy(sumj, 0, j_copy, 0, sumj.length); double total_missing = (sumi[ni - 1] + sumj[nj - 1] - counts[ni - 1][nj - 1]); // do the missing i's if (sumi[ni - 1] > 0.0) { for (j = 0; j < nj - 1; j++) { if (counts[ni - 1][j] > 0.0) { for (i = 0; i < ni - 1; i++) { temp = ((i_copy[i] / (sum - i_copy[ni - 1])) * counts[ni - 1][j]); counts[i][j] += temp; sumi[i] += temp; } counts[ni - 1][j] = 0.0; } } } sumi[ni - 1] = 0.0; // do the missing j's if (sumj[nj - 1] > 0.0) { for (i = 0; i < ni - 1; i++) { if (counts[i][nj - 1] > 0.0) { for (j = 0; j < nj - 1; j++) { temp = ((j_copy[j] / (sum - j_copy[nj - 1])) * counts[i][nj - 1]); counts[i][j] += temp; sumj[j] += temp; } counts[i][nj - 1] = 0.0; } } } sumj[nj - 1] = 0.0; // do the both missing if (counts[ni - 1][nj - 1] > 0.0 && total_missing != sum) { for (i = 0; i < ni - 1; i++) { for (j = 0; j < nj - 1; j++) { temp = (counts_copy[i][j] / (sum - total_missing)) * counts_copy[ni - 1][nj - 1]; counts[i][j] += temp; sumi[i] += temp; sumj[j] += temp; } } counts[ni - 1][nj - 1] = 0.0; } } corr_measure = ContingencyTables.symmetricalUncertainty(counts); if (Utils.eq(corr_measure, 0.0)) { if (flag == true) { return (0.0); } else { return (1.0); } } else { return (corr_measure); } } private double num_num(int att1, int att2) { int i; Instance inst; double r, diff1, diff2, num = 0.0, sx = 0.0, sy = 0.0; double mx = m_trainInstances.meanOrMode(m_trainInstances.attribute(att1)); double my = m_trainInstances.meanOrMode(m_trainInstances.attribute(att2)); for (i = 0; i < m_numInstances; i++) { inst = m_trainInstances.instance(i); diff1 = (inst.isMissing(att1)) ? 0.0 : (inst.value(att1) - mx); diff2 = (inst.isMissing(att2)) ? 0.0 : (inst.value(att2) - my); num += (diff1 * diff2); sx += (diff1 * diff1); sy += (diff2 * diff2); } if (sx != 0.0) { if (m_std_devs[att1] == 1.0) { m_std_devs[att1] = Math.sqrt((sx / m_numInstances)); } } if (sy != 0.0) { if (m_std_devs[att2] == 1.0) { m_std_devs[att2] = Math.sqrt((sy / m_numInstances)); } } if ((sx * sy) > 0.0) { r = (num / (Math.sqrt(sx * sy))); return ((r < 0.0) ? -r : r); } else { if (att1 != m_classIndex && att2 != m_classIndex) { return 1.0; } else { return 0.0; } } } private double num_nom2(int att1, int att2) { int i, ii, k; double temp; Instance inst; int mx = (int) m_trainInstances .meanOrMode(m_trainInstances.attribute(att1)); double my = m_trainInstances.meanOrMode(m_trainInstances.attribute(att2)); double stdv_num = 0.0; double diff1, diff2; double r = 0.0, rr; int nx = (!m_missingSeparate) ? m_trainInstances.attribute(att1) .numValues() : m_trainInstances.attribute(att1).numValues() + 1; double[] prior_nom = new double[nx]; double[] stdvs_nom = new double[nx]; double[] covs = new double[nx]; for (i = 0; i < nx; i++) { stdvs_nom[i] = covs[i] = prior_nom[i] = 0.0; } // calculate frequencies (and means) of the values of the nominal // attribute for (i = 0; i < m_numInstances; i++) { inst = m_trainInstances.instance(i); if (inst.isMissing(att1)) { if (!m_missingSeparate) { ii = mx; } else { ii = nx - 1; } } else { ii = (int) inst.value(att1); } // increment freq for nominal prior_nom[ii]++; } for (k = 0; k < m_numInstances; k++) { inst = m_trainInstances.instance(k); // std dev of numeric attribute diff2 = (inst.isMissing(att2)) ? 0.0 : (inst.value(att2) - my); stdv_num += (diff2 * diff2); // for (i = 0; i < nx; i++) { if (inst.isMissing(att1)) { if (!m_missingSeparate) { temp = (i == mx) ? 1.0 : 0.0; } else { temp = (i == (nx - 1)) ? 1.0 : 0.0; } } else { temp = (i == inst.value(att1)) ? 1.0 : 0.0; } diff1 = (temp - (prior_nom[i] / m_numInstances)); stdvs_nom[i] += (diff1 * diff1); covs[i] += (diff1 * diff2); } } // calculate weighted correlation for (i = 0, temp = 0.0; i < nx; i++) { // calculate the weighted variance of the nominal temp += ((prior_nom[i] / m_numInstances) * (stdvs_nom[i] / m_numInstances)); if ((stdvs_nom[i] * stdv_num) > 0.0) { // System.out.println("Stdv :"+stdvs_nom[i]); rr = (covs[i] / (Math.sqrt(stdvs_nom[i] * stdv_num))); if (rr < 0.0) { rr = -rr; } r += ((prior_nom[i] / m_numInstances) * rr); } /* * if there is zero variance for the numeric att at a specific level of * the catergorical att then if neither is the class then make this * correlation at this level maximally bad i.e. 1.0. If either is the * class then maximally bad correlation is 0.0 */ else { if (att1 != m_classIndex && att2 != m_classIndex) { r += ((prior_nom[i] / m_numInstances) * 1.0); } } } // set the standard deviations for these attributes if necessary // if ((att1 != classIndex) && (att2 != classIndex)) // ============= if (temp != 0.0) { if (m_std_devs[att1] == 1.0) { m_std_devs[att1] = Math.sqrt(temp); } } if (stdv_num != 0.0) { if (m_std_devs[att2] == 1.0) { m_std_devs[att2] = Math.sqrt((stdv_num / m_numInstances)); } } if (r == 0.0) { if (att1 != m_classIndex && att2 != m_classIndex) { r = 1.0; } } return r; } private double nom_nom(int att1, int att2) { int i, j, ii, jj, z; double temp1, temp2; Instance inst; int mx = (int) m_trainInstances .meanOrMode(m_trainInstances.attribute(att1)); int my = (int) m_trainInstances .meanOrMode(m_trainInstances.attribute(att2)); double diff1, diff2; double r = 0.0, rr; int nx = (!m_missingSeparate) ? m_trainInstances.attribute(att1) .numValues() : m_trainInstances.attribute(att1).numValues() + 1; int ny = (!m_missingSeparate) ? m_trainInstances.attribute(att2) .numValues() : m_trainInstances.attribute(att2).numValues() + 1; double[][] prior_nom = new double[nx][ny]; double[] sumx = new double[nx]; double[] sumy = new double[ny]; double[] stdvsx = new double[nx]; double[] stdvsy = new double[ny]; double[][] covs = new double[nx][ny]; for (i = 0; i < nx; i++) { sumx[i] = stdvsx[i] = 0.0; } for (j = 0; j < ny; j++) { sumy[j] = stdvsy[j] = 0.0; } for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { covs[i][j] = prior_nom[i][j] = 0.0; } } // calculate frequencies (and means) of the values of the nominal // attribute for (i = 0; i < m_numInstances; i++) { inst = m_trainInstances.instance(i); if (inst.isMissing(att1)) { if (!m_missingSeparate) { ii = mx; } else { ii = nx - 1; } } else { ii = (int) inst.value(att1); } if (inst.isMissing(att2)) { if (!m_missingSeparate) { jj = my; } else { jj = ny - 1; } } else { jj = (int) inst.value(att2); } // increment freq for nominal prior_nom[ii][jj]++; sumx[ii]++; sumy[jj]++; } for (z = 0; z < m_numInstances; z++) { inst = m_trainInstances.instance(z); for (j = 0; j < ny; j++) { if (inst.isMissing(att2)) { if (!m_missingSeparate) { temp2 = (j == my) ? 1.0 : 0.0; } else { temp2 = (j == (ny - 1)) ? 1.0 : 0.0; } } else { temp2 = (j == inst.value(att2)) ? 1.0 : 0.0; } diff2 = (temp2 - (sumy[j] / m_numInstances)); stdvsy[j] += (diff2 * diff2); } // for (i = 0; i < nx; i++) { if (inst.isMissing(att1)) { if (!m_missingSeparate) { temp1 = (i == mx) ? 1.0 : 0.0; } else { temp1 = (i == (nx - 1)) ? 1.0 : 0.0; } } else { temp1 = (i == inst.value(att1)) ? 1.0 : 0.0; } diff1 = (temp1 - (sumx[i] / m_numInstances)); stdvsx[i] += (diff1 * diff1); for (j = 0; j < ny; j++) { if (inst.isMissing(att2)) { if (!m_missingSeparate) { temp2 = (j == my) ? 1.0 : 0.0; } else { temp2 = (j == (ny - 1)) ? 1.0 : 0.0; } } else { temp2 = (j == inst.value(att2)) ? 1.0 : 0.0; } diff2 = (temp2 - (sumy[j] / m_numInstances)); covs[i][j] += (diff1 * diff2); } } } // calculate weighted correlation for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { if ((stdvsx[i] * stdvsy[j]) > 0.0) { // System.out.println("Stdv :"+stdvs_nom[i]); rr = (covs[i][j] / (Math.sqrt(stdvsx[i] * stdvsy[j]))); if (rr < 0.0) { rr = -rr; } r += ((prior_nom[i][j] / m_numInstances) * rr); } // if there is zero variance for either of the categorical atts then if // neither is the class then make this // correlation at this level maximally bad i.e. 1.0. If either is // the class then maximally bad correlation is 0.0 else { if (att1 != m_classIndex && att2 != m_classIndex) { r += ((prior_nom[i][j] / m_numInstances) * 1.0); } } } } // calculate weighted standard deviations for these attributes // (if necessary) for (i = 0, temp1 = 0.0; i < nx; i++) { temp1 += ((sumx[i] / m_numInstances) * (stdvsx[i] / m_numInstances)); } if (temp1 != 0.0) { if (m_std_devs[att1] == 1.0) { m_std_devs[att1] = Math.sqrt(temp1); } } for (j = 0, temp2 = 0.0; j < ny; j++) { temp2 += ((sumy[j] / m_numInstances) * (stdvsy[j] / m_numInstances)); } if (temp2 != 0.0) { if (m_std_devs[att2] == 1.0) { m_std_devs[att2] = Math.sqrt(temp2); } } if (r == 0.0) { if (att1 != m_classIndex && att2 != m_classIndex) { r = 1.0; } } return r; } /** * returns a string describing CFS * * @return the description as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); if (m_trainInstances == null) { text.append("CFS subset evaluator has not been built yet\n"); } else { text.append("\tCFS Subset Evaluator\n"); if (m_missingSeparate) { text.append("\tTreating missing values as a separate value\n"); } if (m_locallyPredictive) { text.append("\tIncluding locally predictive attributes\n"); } } return text.toString(); } private void addLocallyPredictive(BitSet best_group) { int i, j; boolean done = false; boolean ok = true; double temp_best = -1.0; float corr; j = 0; BitSet temp_group = (BitSet) best_group.clone(); int larger, smaller; while (!done) { temp_best = -1.0; // find best not already in group for (i = 0; i < m_numAttribs; i++) { if (i > m_classIndex) { larger = i; smaller = m_classIndex; } else { smaller = i; larger = m_classIndex; } /* * int larger = (i > m_classIndex ? i : m_classIndex); int smaller = (i * > m_classIndex ? m_classIndex : i); */ if ((!temp_group.get(i)) && (i != m_classIndex)) { if (m_corr_matrix[larger][smaller] == -999) { corr = correlate(i, m_classIndex); m_corr_matrix[larger][smaller] = corr; } if (m_corr_matrix[larger][smaller] > temp_best) { temp_best = m_corr_matrix[larger][smaller]; j = i; } } } if (temp_best == -1.0) { done = true; } else { ok = true; temp_group.set(j); // check the best against correlations with others already // in group for (i = 0; i < m_numAttribs; i++) { if (i > j) { larger = i; smaller = j; } else { larger = j; smaller = i; } /* * int larger = (i > j ? i : j); int smaller = (i > j ? j : i); */ if (best_group.get(i)) { if (m_corr_matrix[larger][smaller] == -999) { corr = correlate(i, j); m_corr_matrix[larger][smaller] = corr; } if (m_corr_matrix[larger][smaller] > temp_best - m_c_Threshold) { ok = false; break; } } } // if ok then add to best_group if (ok) { best_group.set(j); } } } } /** * Calls locallyPredictive in order to include locally predictive attributes * (if requested). * * @param attributeSet the set of attributes found by the search * @return a possibly ranked list of postprocessed attributes * @throws Exception if postprocessing fails for some reason */ @Override public int[] postProcess(int[] attributeSet) throws Exception { if (m_debug) { System.err.println("Percentage of correlation matrix computed " + "over the search: " + Utils.doubleToString( ((double) m_numFilled.get() / m_numEntries * 100.0), 2) + "%"); } int j = 0; if (!m_locallyPredictive) { // m_trainInstances = new Instances(m_trainInstances,0); return attributeSet; } BitSet bestGroup = new BitSet(m_numAttribs); for (int i = 0; i < attributeSet.length; i++) { bestGroup.set(attributeSet[i]); } addLocallyPredictive(bestGroup); // count how many are set for (int i = 0; i < m_numAttribs; i++) { if (bestGroup.get(i)) { j++; } } int[] newSet = new int[j]; j = 0; for (int i = 0; i < m_numAttribs; i++) { if (bestGroup.get(i)) { newSet[j++] = i; } } // m_trainInstances = new Instances(m_trainInstances,0); return newSet; } protected void resetOptions() { m_trainInstances = null; m_missingSeparate = false; m_locallyPredictive = true; m_c_Threshold = 0.0; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9883 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runEvaluator(new CfsSubsetEval(), args); } }
39,145
26.743444
224
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/CheckAttributeSelection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckAttributeSelection.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Attribute; import weka.core.CheckScheme; import weka.core.FastVector; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.SerializedObject; import weka.core.TestInstances; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** * Class for examining the capabilities and finding problems with * attribute selection schemes. If you implement an attribute selection using * the WEKA.libraries, you should run the checks on it to ensure robustness * and correct operation. Passing all the tests of this object does not mean * bugs in the attribute selection don't exist, but this will help find some * common ones. <p/> * * Typical usage: <p/> * <code>java weka.attributeSelection.CheckAttributeSelection -W ASscheme_name * -- ASscheme_options </code><p/> * * CheckAttributeSelection reports on the following: * <ul> * <li> Scheme abilities * <ul> * <li> Possible command line options to the scheme </li> * <li> Whether the scheme can predict nominal, numeric, string, * date or relational class attributes. </li> * <li> Whether the scheme can handle numeric predictor attributes </li> * <li> Whether the scheme can handle nominal predictor attributes </li> * <li> Whether the scheme can handle string predictor attributes </li> * <li> Whether the scheme can handle date predictor attributes </li> * <li> Whether the scheme can handle relational predictor attributes </li> * <li> Whether the scheme can handle multi-instance data </li> * <li> Whether the scheme can handle missing predictor values </li> * <li> Whether the scheme can handle missing class values </li> * <li> Whether a nominal scheme only handles 2 class problems </li> * <li> Whether the scheme can handle instance weights </li> * </ul> * </li> * <li> Correct functioning * <ul> * <li> Correct initialisation during search (i.e. no result * changes when search is performed repeatedly) </li> * <li> Whether the scheme alters the data pased to it * (number of instances, instance order, instance weights, etc) </li> * </ul> * </li> * <li> Degenerate cases * <ul> * <li> building scheme with zero instances </li> * <li> all but one predictor attribute values missing </li> * <li> all predictor attribute values missing </li> * <li> all but one class values missing </li> * <li> all class values missing </li> * </ul> * </li> * </ul> * Running CheckAttributeSelection with the debug option set will output the * training dataset for any failed tests.<p/> * * The <code>weka.attributeSelection.AbstractAttributeSelectionTest</code> * uses this class to test all the schemes. Any changes here, have to be * checked in that abstract test class, too. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -eval name [options] * Full name and options of the evaluator analyzed. * eg: weka.attributeSelection.CfsSubsetEval</pre> * * <pre> -search name [options] * Full name and options of the search method analyzed. * eg: weka.attributeSelection.Ranker</pre> * * <pre> -test &lt;eval|search&gt; * The scheme to test, either the evaluator or the search method. * (Default: eval)</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.CfsSubsetEval: * </pre> * * <pre> -M * Treat missing values as a seperate value.</pre> * * <pre> -L * Don't include locally predictive attributes.</pre> * * <pre> * Options specific to search method weka.attributeSelection.Ranker: * </pre> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * Any starting attributes specified are * ignored during the ranking.</pre> * * <pre> -T &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking.</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to select</pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see TestInstances */ public class CheckAttributeSelection extends CheckScheme { /* * Note about test methods: * - methods return array of booleans * - first index: success or not * - second index: acceptable or not (e.g., Exception is OK) * * FracPete (fracpete at waikato dot ac dot nz) */ /*** The evaluator to be examined */ protected ASEvaluation m_Evaluator = new CfsSubsetEval(); /*** The search method to be used */ protected ASSearch m_Search = new Ranker(); /** whether to test the evaluator (default) or the search method */ protected boolean m_TestEvaluator = true; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tFull name and options of the evaluator analyzed.\n" +"\teg: weka.attributeSelection.CfsSubsetEval", "eval", 1, "-eval name [options]")); result.addElement(new Option( "\tFull name and options of the search method analyzed.\n" +"\teg: weka.attributeSelection.Ranker", "search", 1, "-search name [options]")); result.addElement(new Option( "\tThe scheme to test, either the evaluator or the search method.\n" +"\t(Default: eval)", "test", 1, "-test <eval|search>")); if ((m_Evaluator != null) && (m_Evaluator instanceof OptionHandler)) { result.addElement(new Option("", "", 0, "\nOptions specific to evaluator " + m_Evaluator.getClass().getName() + ":")); Enumeration enm = ((OptionHandler) m_Evaluator).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); } if ((m_Search != null) && (m_Search instanceof OptionHandler)) { result.addElement(new Option("", "", 0, "\nOptions specific to search method " + m_Search.getClass().getName() + ":")); Enumeration enm = ((OptionHandler) m_Search).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -eval name [options] * Full name and options of the evaluator analyzed. * eg: weka.attributeSelection.CfsSubsetEval</pre> * * <pre> -search name [options] * Full name and options of the search method analyzed. * eg: weka.attributeSelection.Ranker</pre> * * <pre> -test &lt;eval|search&gt; * The scheme to test, either the evaluator or the search method. * (Default: eval)</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.CfsSubsetEval: * </pre> * * <pre> -M * Treat missing values as a seperate value.</pre> * * <pre> -L * Don't include locally predictive attributes.</pre> * * <pre> * Options specific to search method weka.attributeSelection.Ranker: * </pre> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * Any starting attributes specified are * ignored during the ranking.</pre> * * <pre> -T &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking.</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to select</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; super.setOptions(options); tmpStr = Utils.getOption("eval", options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setEvaluator( (ASEvaluation) forName( "weka.attributeSelection", ASEvaluation.class, tmpStr, tmpOptions)); } tmpStr = Utils.getOption("search", options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setSearch( (ASSearch) forName( "weka.attributeSelection", ASSearch.class, tmpStr, tmpOptions)); } tmpStr = Utils.getOption("test", options); setTestEvaluator(!tmpStr.equalsIgnoreCase("search")); } /** * Gets the current settings of the CheckAttributeSelection. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-eval"); if (getEvaluator() instanceof OptionHandler) result.add( getEvaluator().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) getEvaluator()).getOptions())); else result.add( getEvaluator().getClass().getName()); result.add("-search"); if (getSearch() instanceof OptionHandler) result.add( getSearch().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) getSearch()).getOptions())); else result.add( getSearch().getClass().getName()); result.add("-test"); if (getTestEvaluator()) result.add("eval"); else result.add("search"); return (String[]) result.toArray(new String[result.size()]); } /** * Begin the tests, reporting results to System.out */ public void doTests() { if (getTestObject() == null) { println("\n=== No scheme set ==="); return; } println("\n=== Check on scheme: " + getTestObject().getClass().getName() + " ===\n"); // Start tests m_ClasspathProblems = false; println("--> Checking for interfaces"); canTakeOptions(); boolean weightedInstancesHandler = weightedInstancesHandler()[0]; boolean multiInstanceHandler = multiInstanceHandler()[0]; println("--> Scheme tests"); declaresSerialVersionUID(); testsPerClassType(Attribute.NOMINAL, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.NUMERIC, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.DATE, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.STRING, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.RELATIONAL, weightedInstancesHandler, multiInstanceHandler); } /** * Set the evaluator to test. * * @param value the evaluator to use. */ public void setEvaluator(ASEvaluation value) { m_Evaluator = value; } /** * Get the current evaluator * * @return the current evaluator */ public ASEvaluation getEvaluator() { return m_Evaluator; } /** * Set the search method to test. * * @param value the search method to use. */ public void setSearch(ASSearch value) { m_Search = value; } /** * Get the current search method * * @return the current search method */ public ASSearch getSearch() { return m_Search; } /** * Sets whether the evaluator or the search method is being tested. * * @param value if true then the evaluator will be tested */ public void setTestEvaluator(boolean value) { m_TestEvaluator = value; } /** * Gets whether the evaluator is being tested or the search method. * * @return true if the evaluator is being tested */ public boolean getTestEvaluator() { return m_TestEvaluator; } /** * returns either the evaluator or the search method. * * @return the object to be tested * @see #m_TestEvaluator */ protected Object getTestObject() { if (getTestEvaluator()) return getEvaluator(); else return getSearch(); } /** * returns deep copies of the given object * * @param obj the object to copy * @param num the number of copies * @return the deep copies * @throws Exception if copying fails */ protected Object[] makeCopies(Object obj, int num) throws Exception { if (obj == null) throw new Exception("No object set"); Object[] objs = new Object[num]; SerializedObject so = new SerializedObject(obj); for(int i = 0; i < objs.length; i++) { objs[i] = so.getObject(); } return objs; } /** * Performs a attribute selection with the given search and evaluation scheme * on the provided data. The generated AttributeSelection object is returned. * * @param search the search scheme to use * @param eval the evaluator to use * @param data the data to work on * @return the used attribute selection object * @throws Exception if the attribute selection fails */ protected AttributeSelection search(ASSearch search, ASEvaluation eval, Instances data) throws Exception { AttributeSelection result; result = new AttributeSelection(); result.setSeed(42); result.setSearch(search); result.setEvaluator(eval); result.SelectAttributes(data); return result; } /** * Run a battery of tests for a given class attribute type * * @param classType true if the class attribute should be numeric * @param weighted true if the scheme says it handles weights * @param multiInstance true if the scheme handles multi-instance data */ protected void testsPerClassType(int classType, boolean weighted, boolean multiInstance) { boolean PNom = canPredict(true, false, false, false, false, multiInstance, classType)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance, classType)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance, classType)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance, classType)[0]; boolean PRel; if (!multiInstance) PRel = canPredict(false, false, false, false, true, multiInstance, classType)[0]; else PRel = false; if (PNom || PNum || PStr || PDat || PRel) { if (weighted) instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); if (classType == Attribute.NOMINAL) canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4); if (!multiInstance) { canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 0); canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 1); } canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 20)[0]; if (handleMissingPredictors) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 100); boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 20)[0]; if (handleMissingClass) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 100); correctSearchInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, handleMissingPredictors, handleMissingClass); } } /** * Checks whether the scheme can take command line options. * * @return index 0 is true if the scheme can take options */ protected boolean[] canTakeOptions() { boolean[] result = new boolean[2]; print("options..."); if (getTestObject() instanceof OptionHandler) { println("yes"); if (m_Debug) { println("\n=== Full report ==="); Enumeration enu = ((OptionHandler) getTestObject()).listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); print(option.synopsis() + "\n" + option.description() + "\n"); } println("\n"); } result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme says it can handle instance weights. * * @return true if the scheme handles instance weights */ protected boolean[] weightedInstancesHandler() { boolean[] result = new boolean[2]; print("weighted instances scheme..."); if (getTestObject() instanceof WeightedInstancesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme handles multi-instance data. * * @return true if the scheme handles multi-instance data */ protected boolean[] multiInstanceHandler() { boolean[] result = new boolean[2]; print("multi-instance scheme..."); if (getTestObject() instanceof MultiInstanceCapabilitiesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * tests for a serialVersionUID. Fails in case the schemes don't declare * a UID (both must!). * * @return index 0 is true if the scheme declares a UID */ protected boolean[] declaresSerialVersionUID() { boolean[] result = new boolean[2]; boolean eval; boolean search; print("serialVersionUID..."); eval = !SerializationHelper.needsUID(m_Evaluator.getClass()); search = !SerializationHelper.needsUID(m_Search.getClass()); result[0] = eval && search; if (result[0]) println("yes"); else println("no"); return result; } /** * Checks basic prediction of the scheme, for simple non-troublesome * datasets. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canPredict( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("basic predict"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("unary"); accepts.addElement("binary"); accepts.addElement("nominal"); accepts.addElement("numeric"); accepts.addElement("string"); accepts.addElement("date"); accepts.addElement("relational"); accepts.addElement("multi-instance"); accepts.addElement("not in classpath"); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether nominal schemes can handle more than two classes. * If a scheme is only designed for two-class problems it should * throw an appropriate exception for multi-class problems. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param numClasses the number of classes to test * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleNClasses( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int numClasses) { print("more than two class problems"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL); print("..."); FastVector accepts = new FastVector(); accepts.addElement("number"); accepts.addElement("class"); int numTrain = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle class attributes as Nth attribute. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class attribute (0-based, -1 means last attribute) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable * @see TestInstances#CLASS_IS_LAST */ protected boolean[] canHandleClassAsNthAttribute( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex) { if (classIndex == TestInstances.CLASS_IS_LAST) print("class attribute as last attribute"); else print("class attribute as " + (classIndex + 1) + ". attribute"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, classIndex, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle zero training instances. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleZeroTraining( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("handle zero training instances"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("train"); accepts.addElement("value"); int numTrain = 0, numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme correctly initialises models when * ASSearch.search is called. This test calls search with * one training dataset. ASSearch is then called on a training set with * different structure, and then again with the original training set. * If the equals method of the ASEvaluation class returns false, this is * noted as incorrect search initialisation. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is always false */ protected boolean[] correctSearchInitialisation( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { boolean[] result = new boolean[2]; print("correct initialisation during search"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; Instances train1 = null; Instances train2 = null; ASSearch search = null; ASEvaluation evaluation1A = null; ASEvaluation evaluation1B = null; ASEvaluation evaluation2 = null; AttributeSelection attsel1A = null; AttributeSelection attsel1B = null; int stage = 0; try { // Make two train sets with different numbers of attributes train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); train2 = makeTestDataset(84, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train1, missingLevel, predictorMissing, classMissing); addMissing(train2, missingLevel, predictorMissing, classMissing); } search = ASSearch.makeCopies(getSearch(), 1)[0]; evaluation1A = ASEvaluation.makeCopies(getEvaluator(), 1)[0]; evaluation1B = ASEvaluation.makeCopies(getEvaluator(), 1)[0]; evaluation2 = ASEvaluation.makeCopies(getEvaluator(), 1)[0]; } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { stage = 0; attsel1A = search(search, evaluation1A, train1); stage = 1; search(search, evaluation2, train2); stage = 2; attsel1B = search(search, evaluation1B, train1); stage = 3; if (!attsel1A.toResultsString().equals(attsel1B.toResultsString())) { if (m_Debug) { println( "\n=== Full report ===\n" + "\nFirst search\n" + attsel1A.toResultsString() + "\n\n"); println( "\nSecond search\n" + attsel1B.toResultsString() + "\n\n"); } throw new Exception("Results differ between search calls"); } println("yes"); result[0] = true; if (false && m_Debug) { println( "\n=== Full report ===\n" + "\nFirst search\n" + evaluation1A.toString() + "\n\n"); println( "\nSecond search\n" + evaluation1B.toString() + "\n\n"); } } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during training"); switch (stage) { case 0: print(" of dataset 1"); break; case 1: print(" of dataset 2"); break; case 2: print(" of dataset 1 (2nd build)"); break; case 3: print(", comparing results from builds of dataset 1"); break; } println(": " + ex.getMessage() + "\n"); println("here are the datasets:\n"); println("=== Train1 Dataset ===\n" + train1.toString() + "\n"); println("=== Train2 Dataset ===\n" + train2.toString() + "\n"); } } return result; } /** * Checks basic missing value handling of the scheme. If the missing * values cause an exception to be thrown by the scheme, this will be * recorded. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param missingLevel the percentage of missing values * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleMissing( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing, int missingLevel) { if (missingLevel == 100) print("100% "); print("missing"); if (predictorMissing) { print(" predictor"); if (classMissing) print(" and"); } if (classMissing) print(" class"); print(" values"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("missing"); accepts.addElement("value"); accepts.addElement("train"); accepts.addElement("no attributes"); int numTrain = getNumInstances(), numClasses = 2; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle instance weights. * This test compares the scheme performance on two datasets * that are identical except for the training weights. If the * results change, then the scheme must be using the weights. It * may be possible to get a false positive from this test if the * weight changes aren't significant enough to induce a change * in scheme performance (but the weights are chosen to minimize * the likelihood of this). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 true if the test was passed */ protected boolean[] instanceWeights( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("scheme uses instance weights"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2*getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; ASSearch[] search = null; ASEvaluation evaluationB = null; ASEvaluation evaluationI = null; AttributeSelection attselB = null; AttributeSelection attselI = null; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); search = ASSearch.makeCopies(getSearch(), 2); evaluationB = ASEvaluation.makeCopies(getEvaluator(), 1)[0]; evaluationI = ASEvaluation.makeCopies(getEvaluator(), 1)[0]; attselB = search(search[0], evaluationB, train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now modify instance weights and re-built/test for (int i = 0; i < train.numInstances(); i++) { train.instance(i).setWeight(0); } Random random = new Random(1); for (int i = 0; i < train.numInstances() / 2; i++) { int inst = Math.abs(random.nextInt()) % train.numInstances(); int weight = Math.abs(random.nextInt()) % 10 + 1; train.instance(inst).setWeight(weight); } attselI = search(search[1], evaluationI, train); if (attselB.toResultsString().equals(attselI.toResultsString())) { // println("no"); evalFail = true; throw new Exception("evalFail"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results don't differ between non-weighted and " + "weighted instance models."); println("Here are the results:\n"); println("\nboth methods\n"); println(evaluationB.toString()); } else { print("Problem during training"); println(": " + ex.getMessage() + "\n"); } println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } } } return result; } /** * Checks whether the scheme alters the training dataset during * training. If the scheme needs to modify the training * data it should take a copy of the training data. Currently checks * for changes to header structure, number of instances, order of * instances, instance weights. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if we know the scheme can handle * (at least) moderate missing predictor values * @param classMissing true if we know the scheme can handle * (at least) moderate missing class values * @return index 0 is true if the test was passed */ protected boolean[] datasetIntegrity( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing) { print("scheme doesn't alter original datasets"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 20; boolean[] result = new boolean[2]; Instances train = null; Instances trainCopy = null; ASSearch search = null; ASEvaluation evaluation = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); search = ASSearch.makeCopies(getSearch(), 1)[0]; evaluation = ASEvaluation.makeCopies(getEvaluator(), 1)[0]; trainCopy = new Instances(train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { search(search, evaluation, trainCopy); compareDatasets(train, trainCopy); println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during training"); println(": " + ex.getMessage() + "\n"); println("Here are the datasets:\n"); println("=== Train Dataset (original) ===\n" + trainCopy.toString() + "\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } return result; } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, FastVector accepts) { return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the attribute index of the class * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, FastVector accepts) { boolean[] result = new boolean[2]; Instances train = null; ASSearch search = null; ASEvaluation evaluation = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); search = ASSearch.makeCopies(getSearch(), 1)[0]; evaluation = ASEvaluation.makeCopies(getEvaluator(), 1)[0]; } catch (Exception ex) { ex.printStackTrace(); throw new Error("Error setting up for tests: " + ex.getMessage()); } try { search(search, evaluation, train); println("yes"); result[0] = true; } catch (Exception ex) { boolean acceptable = false; String msg; if (ex.getMessage() == null) msg = ""; else msg = ex.getMessage().toLowerCase(); if (msg.indexOf("not in classpath") > -1) m_ClasspathProblems = true; for (int i = 0; i < accepts.size(); i++) { if (msg.indexOf((String)accepts.elementAt(i)) >= 0) { acceptable = true; } } println("no" + (acceptable ? " (OK error message)" : "")); result[1] = acceptable; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during training"); println(": " + ex.getMessage() + "\n"); if (!acceptable) { if (accepts.size() > 0) { print("Error message doesn't mention "); for (int i = 0; i < accepts.size(); i++) { if (i != 0) { print(" or "); } print('"' + (String)accepts.elementAt(i) + '"'); } } println("here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } } return result; } /** * Make a simple set of instances, which can later be modified * for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, boolean multiInstance) throws Exception { return makeTestDataset( seed, numInstances, numNominal, numNumeric, numString, numDate, numRelational, numClasses, classType, TestInstances.CLASS_IS_LAST, multiInstance); } /** * Make a simple set of instances with variable position of the class * attribute, which can later be modified for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class (0-based, -1 as last) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see TestInstances#CLASS_IS_LAST * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, int classIndex, boolean multiInstance) throws Exception { TestInstances dataset = new TestInstances(); dataset.setSeed(seed); dataset.setNumInstances(numInstances); dataset.setNumNominal(numNominal); dataset.setNumNumeric(numNumeric); dataset.setNumString(numString); dataset.setNumDate(numDate); dataset.setNumRelational(numRelational); dataset.setNumClasses(numClasses); dataset.setClassType(classType); dataset.setClassIndex(classIndex); dataset.setNumClasses(numClasses); dataset.setMultiInstance(multiInstance); dataset.setWords(getWords()); dataset.setWordSeparators(getWordSeparators()); return process(dataset.generate()); } /** * Print out a short summary string for the dataset characteristics * * @param nominalPredictor true if nominal predictor attributes are present * @param numericPredictor true if numeric predictor attributes are present * @param stringPredictor true if string predictor attributes are present * @param datePredictor true if date predictor attributes are present * @param relationalPredictor true if relational predictor attributes are present * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) */ protected void printAttributeSummary(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { String str = ""; if (numericPredictor) str += " numeric"; if (nominalPredictor) { if (str.length() > 0) str += " &"; str += " nominal"; } if (stringPredictor) { if (str.length() > 0) str += " &"; str += " string"; } if (datePredictor) { if (str.length() > 0) str += " &"; str += " date"; } if (relationalPredictor) { if (str.length() > 0) str += " &"; str += " relational"; } str += " predictors)"; switch (classType) { case Attribute.NUMERIC: str = " (numeric class," + str; break; case Attribute.NOMINAL: str = " (nominal class," + str; break; case Attribute.STRING: str = " (string class," + str; break; case Attribute.DATE: str = " (date class," + str; break; case Attribute.RELATIONAL: str = " (relational class," + str; break; } print(str); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Test method for this class * * @param args the commandline parameters */ public static void main(String [] args) { runCheck(new CheckAttributeSelection(), args); } }
57,155
33.829982
131
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ChiSquaredAttributeEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ChiSquaredAttributeEval.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Capabilities; import weka.core.ContingencyTables; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; import weka.filters.unsupervised.attribute.NumericToBinary; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * ChiSquaredAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by computing the value of the chi-squared statistic with respect to the class.<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * treat missing values as a seperate value.</pre> * * <pre> -B * just binarize numeric attributes instead * of properly discretizing them.</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 5511 $ * @see Discretize * @see NumericToBinary */ public class ChiSquaredAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler { /** for serialization */ static final long serialVersionUID = -8316857822521717692L; /** Treat missing values as a seperate value */ private boolean m_missing_merge; /** Just binarize numeric attributes */ private boolean m_Binarize; /** The chi-squared value for each attribute */ private double[] m_ChiSquareds; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "ChiSquaredAttributeEval :\n\nEvaluates the worth of an attribute " +"by computing the value of the chi-squared statistic with respect to the class.\n"; } /** * Constructor */ public ChiSquaredAttributeEval () { resetOptions(); } /** * Returns an enumeration describing the available options * @return an enumeration of all the available options **/ public Enumeration listOptions () { Vector newVector = new Vector(2); newVector.addElement(new Option("\ttreat missing values as a seperate " + "value.", "M", 0, "-M")); newVector.addElement(new Option("\tjust binarize numeric attributes instead \n" +"\tof properly discretizing them.", "B", 0, "-B")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * treat missing values as a seperate value.</pre> * * <pre> -B * just binarize numeric attributes instead * of properly discretizing them.</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { resetOptions(); setMissingMerge(!(Utils.getFlag('M', options))); setBinarizeNumericAttributes(Utils.getFlag('B', options)); } /** * Gets the current settings. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[2]; int current = 0; if (!getMissingMerge()) { options[current++] = "-M"; } if (getBinarizeNumericAttributes()) { options[current++] = "-B"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String binarizeNumericAttributesTipText() { return "Just binarize numeric attributes instead of properly discretizing them."; } /** * Binarize numeric attributes. * * @param b true=binarize numeric attributes */ public void setBinarizeNumericAttributes (boolean b) { m_Binarize = b; } /** * get whether numeric attributes are just being binarized. * * @return true if missing values are being distributed. */ public boolean getBinarizeNumericAttributes () { return m_Binarize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String missingMergeTipText() { return "Distribute counts for missing values. Counts are distributed " +"across other values in proportion to their frequency. Otherwise, " +"missing is treated as a separate value."; } /** * distribute the counts for missing values across observed values * * @param b true=distribute missing values. */ public void setMissingMerge (boolean b) { m_missing_merge = b; } /** * get whether missing values are being distributed or not * * @return true if missing values are being distributed. */ public boolean getMissingMerge () { return m_missing_merge; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Initializes a chi-squared attribute evaluator. * Discretizes all attributes that are numeric. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); int classIndex = data.classIndex(); int numInstances = data.numInstances(); if (!m_Binarize) { Discretize disTransform = new Discretize(); disTransform.setUseBetterEncoding(true); disTransform.setInputFormat(data); data = Filter.useFilter(data, disTransform); } else { NumericToBinary binTransform = new NumericToBinary(); binTransform.setInputFormat(data); data = Filter.useFilter(data, binTransform); } int numClasses = data.attribute(classIndex).numValues(); // Reserve space and initialize counters double[][][] counts = new double[data.numAttributes()][][]; for (int k = 0; k < data.numAttributes(); k++) { if (k != classIndex) { int numValues = data.attribute(k).numValues(); counts[k] = new double[numValues + 1][numClasses + 1]; } } // Initialize counters double[] temp = new double[numClasses + 1]; for (int k = 0; k < numInstances; k++) { Instance inst = data.instance(k); if (inst.classIsMissing()) { temp[numClasses] += inst.weight(); } else { temp[(int)inst.classValue()] += inst.weight(); } } for (int k = 0; k < counts.length; k++) { if (k != classIndex) { for (int i = 0; i < temp.length; i++) { counts[k][0][i] = temp[i]; } } } // Get counts for (int k = 0; k < numInstances; k++) { Instance inst = data.instance(k); for (int i = 0; i < inst.numValues(); i++) { if (inst.index(i) != classIndex) { if (inst.isMissingSparse(i) || inst.classIsMissing()) { if (!inst.isMissingSparse(i)) { counts[inst.index(i)][(int)inst.valueSparse(i)][numClasses] += inst.weight(); counts[inst.index(i)][0][numClasses] -= inst.weight(); } else if (!inst.classIsMissing()) { counts[inst.index(i)][data.attribute(inst.index(i)).numValues()] [(int)inst.classValue()] += inst.weight(); counts[inst.index(i)][0][(int)inst.classValue()] -= inst.weight(); } else { counts[inst.index(i)][data.attribute(inst.index(i)).numValues()] [numClasses] += inst.weight(); counts[inst.index(i)][0][numClasses] -= inst.weight(); } } else { counts[inst.index(i)][(int)inst.valueSparse(i)] [(int)inst.classValue()] += inst.weight(); counts[inst.index(i)][0][(int)inst.classValue()] -= inst.weight(); } } } } // distribute missing counts if required if (m_missing_merge) { for (int k = 0; k < data.numAttributes(); k++) { if (k != classIndex) { int numValues = data.attribute(k).numValues(); // Compute marginals double[] rowSums = new double[numValues]; double[] columnSums = new double[numClasses]; double sum = 0; for (int i = 0; i < numValues; i++) { for (int j = 0; j < numClasses; j++) { rowSums[i] += counts[k][i][j]; columnSums[j] += counts[k][i][j]; } sum += rowSums[i]; } if (Utils.gr(sum, 0)) { double[][] additions = new double[numValues][numClasses]; // Compute what needs to be added to each row for (int i = 0; i < numValues; i++) { for (int j = 0; j < numClasses; j++) { additions[i][j] = (rowSums[i] / sum) * counts[k][numValues][j]; } } // Compute what needs to be added to each column for (int i = 0; i < numClasses; i++) { for (int j = 0; j < numValues; j++) { additions[j][i] += (columnSums[i] / sum) * counts[k][j][numClasses]; } } // Compute what needs to be added to each cell for (int i = 0; i < numClasses; i++) { for (int j = 0; j < numValues; j++) { additions[j][i] += (counts[k][j][i] / sum) * counts[k][numValues][numClasses]; } } // Make new contingency table double[][] newTable = new double[numValues][numClasses]; for (int i = 0; i < numValues; i++) { for (int j = 0; j < numClasses; j++) { newTable[i][j] = counts[k][i][j] + additions[i][j]; } } counts[k] = newTable; } } } } // Compute chi-squared values m_ChiSquareds = new double[data.numAttributes()]; for (int i = 0; i < data.numAttributes(); i++) { if (i != classIndex) { m_ChiSquareds[i] = ContingencyTables. chiVal(ContingencyTables.reduceMatrix(counts[i]), false); } } } /** * Reset options to their default values */ protected void resetOptions () { m_ChiSquareds = null; m_missing_merge = true; m_Binarize = false; } /** * evaluates an individual attribute by measuring its * chi-squared value. * * @param attribute the index of the attribute to be evaluated * @return the chi-squared value * @throws Exception if the attribute could not be evaluated */ public double evaluateAttribute (int attribute) throws Exception { return m_ChiSquareds[attribute]; } /** * Describe the attribute evaluator * @return a description of the attribute evaluator as a string */ public String toString () { StringBuffer text = new StringBuffer(); if (m_ChiSquareds == null) { text.append("Chi-squared attribute evaluator has not been built"); } else { text.append("\tChi-squared Ranking Filter"); if (!m_missing_merge) { text.append("\n\tMissing values treated as seperate"); } if (m_Binarize) { text.append("\n\tNumeric attributes are just binarized"); } } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5511 $"); } /** * Main method. * * @param args the options */ public static void main (String[] args) { runEvaluator(new ChiSquaredAttributeEval(), args); } }
13,684
28.493534
122
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ClassifierSubsetEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ClassifierSubsetEval.java * Copyright (C) 2000 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.rules.ZeroR; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; import java.io.File; import java.util.BitSet; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Classifier subset evaluator:<br/> * <br/> * Evaluates attribute subsets on training data or a seperate hold out testing set. Uses a classifier to estimate the 'merit' of a set of attributes. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;classifier&gt; * class name of the classifier to use for accuracy estimation. * Place any classifier options LAST on the command line * following a "--". eg.: * -B weka.classifiers.bayes.NaiveBayes ... -- -K * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> -T * Use the training data to estimate accuracy.</pre> * * <pre> -H &lt;filename&gt; * Name of the hold out/test set to * estimate accuracy on.</pre> * * <pre> * Options specific to scheme weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 5511 $ */ public class ClassifierSubsetEval extends HoldOutSubsetEvaluator implements OptionHandler, ErrorBasedMeritEvaluator { /** for serialization */ static final long serialVersionUID = 7532217899385278710L; /** training instances */ private Instances m_trainingInstances; /** class index */ private int m_classIndex; /** number of attributes in the training data */ private int m_numAttribs; /** number of training instances */ private int m_numInstances; /** holds the classifier to use for error estimates */ private Classifier m_Classifier = new ZeroR(); /** holds the evaluation object to use for evaluating the classifier */ private Evaluation m_Evaluation; /** the file that containts hold out/test instances */ private File m_holdOutFile = new File("Click to set hold out or " +"test instances"); /** the instances to test on */ private Instances m_holdOutInstances = null; /** evaluate on training data rather than seperate hold out/test set */ private boolean m_useTraining = true; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Classifier subset evaluator:\n\nEvaluates attribute subsets on training data or a seperate " + "hold out testing set. Uses a classifier to estimate the 'merit' of a set of attributes."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(3); newVector.addElement(new Option( "\tclass name of the classifier to use for accuracy estimation.\n" + "\tPlace any classifier options LAST on the command line\n" + "\tfollowing a \"--\". eg.:\n" + "\t\t-B weka.classifiers.bayes.NaiveBayes ... -- -K\n" + "\t(default: weka.classifiers.rules.ZeroR)", "B", 1, "-B <classifier>")); newVector.addElement(new Option( "\tUse the training data to estimate" +" accuracy.", "T",0,"-T")); newVector.addElement(new Option( "\tName of the hold out/test set to " +"\n\testimate accuracy on.", "H", 1,"-H <filename>")); if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { newVector.addElement(new Option("", "", 0, "\nOptions specific to " + "scheme " + m_Classifier.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_Classifier).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;classifier&gt; * class name of the classifier to use for accuracy estimation. * Place any classifier options LAST on the command line * following a "--". eg.: * -B weka.classifiers.bayes.NaiveBayes ... -- -K * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> -T * Use the training data to estimate accuracy.</pre> * * <pre> -H &lt;filename&gt; * Name of the hold out/test set to * estimate accuracy on.</pre> * * <pre> * Options specific to scheme weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('B', options); if (optionString.length() == 0) optionString = ZeroR.class.getName(); setClassifier(AbstractClassifier.forName(optionString, Utils.partitionOptions(options))); optionString = Utils.getOption('H',options); if (optionString.length() != 0) { setHoldOutFile(new File(optionString)); } setUseTraining(Utils.getFlag('T',options)); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifierTipText() { return "Classifier to use for estimating the accuracy of subsets"; } /** * Set the classifier to use for accuracy estimation * * @param newClassifier the Classifier to use. */ public void setClassifier (Classifier newClassifier) { m_Classifier = newClassifier; } /** * Get the classifier used as the base learner. * * @return the classifier used as the classifier */ public Classifier getClassifier () { return m_Classifier; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String holdOutFileTipText() { return "File containing hold out/test instances."; } /** * Gets the file that holds hold out/test instances. * @return File that contains hold out instances */ public File getHoldOutFile() { return m_holdOutFile; } /** * Set the file that contains hold out/test instances * @param h the hold out file */ public void setHoldOutFile(File h) { m_holdOutFile = h; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useTrainingTipText() { return "Use training data instead of hold out/test instances."; } /** * Get if training data is to be used instead of hold out/test data * @return true if training data is to be used instead of hold out data */ public boolean getUseTraining() { return m_useTraining; } /** * Set if training data is to be used instead of hold out/test data * @param t true if training data is to be used instead of hold out data */ public void setUseTraining(boolean t) { m_useTraining = t; } /** * Gets the current settings of ClassifierSubsetEval * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] classifierOptions = new String[0]; if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { classifierOptions = ((OptionHandler)m_Classifier).getOptions(); } String[] options = new String[6 + classifierOptions.length]; int current = 0; if (getClassifier() != null) { options[current++] = "-B"; options[current++] = getClassifier().getClass().getName(); } if (getUseTraining()) { options[current++] = "-T"; } options[current++] = "-H"; options[current++] = getHoldOutFile().getPath(); if (classifierOptions.length > 0) { options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); current += classifierOptions.length; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result; if (getClassifier() == null) { result = super.getCapabilities(); result.disableAll(); } else { result = getClassifier().getCapabilities(); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); return result; } /** * Generates a attribute evaluator. Has to initialize all fields of the * evaluator that are not being set via options. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); m_trainingInstances = data; m_classIndex = m_trainingInstances.classIndex(); m_numAttribs = m_trainingInstances.numAttributes(); m_numInstances = m_trainingInstances.numInstances(); // load the testing data if (!m_useTraining && (!getHoldOutFile().getPath().startsWith("Click to set"))) { java.io.Reader r = new java.io.BufferedReader( new java.io.FileReader(getHoldOutFile().getPath())); m_holdOutInstances = new Instances(r); m_holdOutInstances.setClassIndex(m_trainingInstances.classIndex()); if (m_trainingInstances.equalHeaders(m_holdOutInstances) == false) { throw new Exception("Hold out/test set is not compatable with " +"training data."); } } } /** * Evaluates a subset of attributes * * @param subset a bitset representing the attribute subset to be * evaluated * @return the error rate * @throws Exception if the subset could not be evaluated */ public double evaluateSubset (BitSet subset) throws Exception { int i,j; double errorRate = 0; int numAttributes = 0; Instances trainCopy=null; Instances testCopy=null; Remove delTransform = new Remove(); delTransform.setInvertSelection(true); // copy the training instances trainCopy = new Instances(m_trainingInstances); if (!m_useTraining) { if (m_holdOutInstances == null) { throw new Exception("Must specify a set of hold out/test instances " +"with -H"); } // copy the test instances testCopy = new Instances(m_holdOutInstances); } // count attributes set in the BitSet for (i = 0; i < m_numAttribs; i++) { if (subset.get(i)) { numAttributes++; } } // set up an array of attribute indexes for the filter (+1 for the class) int[] featArray = new int[numAttributes + 1]; for (i = 0, j = 0; i < m_numAttribs; i++) { if (subset.get(i)) { featArray[j++] = i; } } featArray[j] = m_classIndex; delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); trainCopy = Filter.useFilter(trainCopy, delTransform); if (!m_useTraining) { testCopy = Filter.useFilter(testCopy, delTransform); } // build the classifier m_Classifier.buildClassifier(trainCopy); m_Evaluation = new Evaluation(trainCopy); if (!m_useTraining) { m_Evaluation.evaluateModel(m_Classifier, testCopy); } else { m_Evaluation.evaluateModel(m_Classifier, trainCopy); } if (m_trainingInstances.classAttribute().isNominal()) { errorRate = m_Evaluation.errorRate(); } else { errorRate = m_Evaluation.meanAbsoluteError(); } m_Evaluation = null; // return the negative of the error rate as search methods need to // maximize something return -errorRate; } /** * Evaluates a subset of attributes with respect to a set of instances. * Calling this function overides any test/hold out instancs set from * setHoldOutFile. * @param subset a bitset representing the attribute subset to be * evaluated * @param holdOut a set of instances (possibly seperate and distinct * from those use to build/train the evaluator) with which to * evaluate the merit of the subset * @return the "merit" of the subset on the holdOut data * @throws Exception if the subset cannot be evaluated */ public double evaluateSubset(BitSet subset, Instances holdOut) throws Exception { int i,j; double errorRate; int numAttributes = 0; Instances trainCopy=null; Instances testCopy=null; if (m_trainingInstances.equalHeaders(holdOut) == false) { throw new Exception("evaluateSubset : Incompatable instance types."); } Remove delTransform = new Remove(); delTransform.setInvertSelection(true); // copy the training instances trainCopy = new Instances(m_trainingInstances); testCopy = new Instances(holdOut); // count attributes set in the BitSet for (i = 0; i < m_numAttribs; i++) { if (subset.get(i)) { numAttributes++; } } // set up an array of attribute indexes for the filter (+1 for the class) int[] featArray = new int[numAttributes + 1]; for (i = 0, j = 0; i < m_numAttribs; i++) { if (subset.get(i)) { featArray[j++] = i; } } featArray[j] = m_classIndex; delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); trainCopy = Filter.useFilter(trainCopy, delTransform); testCopy = Filter.useFilter(testCopy, delTransform); // build the classifier m_Classifier.buildClassifier(trainCopy); m_Evaluation = new Evaluation(trainCopy); m_Evaluation.evaluateModel(m_Classifier, testCopy); if (m_trainingInstances.classAttribute().isNominal()) { errorRate = m_Evaluation.errorRate(); } else { errorRate = m_Evaluation.meanAbsoluteError(); } m_Evaluation = null; // return the negative of the error as search methods need to // maximize something return -errorRate; } /** * Evaluates a subset of attributes with respect to a single instance. * Calling this function overides any hold out/test instances set * through setHoldOutFile. * @param subset a bitset representing the attribute subset to be * evaluated * @param holdOut a single instance (possibly not one of those used to * build/train the evaluator) with which to evaluate the merit of the subset * @param retrain true if the classifier should be retrained with respect * to the new subset before testing on the holdOut instance. * @return the "merit" of the subset on the holdOut instance * @throws Exception if the subset cannot be evaluated */ public double evaluateSubset(BitSet subset, Instance holdOut, boolean retrain) throws Exception { int i,j; double error; int numAttributes = 0; Instances trainCopy=null; Instance testCopy=null; if (m_trainingInstances.equalHeaders(holdOut.dataset()) == false) { throw new Exception("evaluateSubset : Incompatable instance types."); } Remove delTransform = new Remove(); delTransform.setInvertSelection(true); // copy the training instances trainCopy = new Instances(m_trainingInstances); testCopy = (Instance)holdOut.copy(); // count attributes set in the BitSet for (i = 0; i < m_numAttribs; i++) { if (subset.get(i)) { numAttributes++; } } // set up an array of attribute indexes for the filter (+1 for the class) int[] featArray = new int[numAttributes + 1]; for (i = 0, j = 0; i < m_numAttribs; i++) { if (subset.get(i)) { featArray[j++] = i; } } featArray[j] = m_classIndex; delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); if (retrain) { trainCopy = Filter.useFilter(trainCopy, delTransform); // build the classifier m_Classifier.buildClassifier(trainCopy); } delTransform.input(testCopy); testCopy = delTransform.output(); double pred; double [] distrib; distrib = m_Classifier.distributionForInstance(testCopy); if (m_trainingInstances.classAttribute().isNominal()) { pred = distrib[(int)testCopy.classValue()]; } else { pred = distrib[0]; } if (m_trainingInstances.classAttribute().isNominal()) { error = 1.0 - pred; } else { error = testCopy.classValue() - pred; } // return the negative of the error as search methods need to // maximize something return -error; } /** * Returns a string describing classifierSubsetEval * * @return the description as a string */ public String toString() { StringBuffer text = new StringBuffer(); if (m_trainingInstances == null) { text.append("\tClassifier subset evaluator has not been built yet\n"); } else { text.append("\tClassifier Subset Evaluator\n"); text.append("\tLearning scheme: " + getClassifier().getClass().getName() + "\n"); text.append("\tScheme options: "); String[] classifierOptions = new String[0]; if (m_Classifier instanceof OptionHandler) { classifierOptions = ((OptionHandler)m_Classifier).getOptions(); for (int i = 0; i < classifierOptions.length; i++) { text.append(classifierOptions[i] + " "); } } text.append("\n"); text.append("\tHold out/test set: "); if (!m_useTraining) { if (getHoldOutFile().getPath().startsWith("Click to set")) { text.append("none\n"); } else { text.append(getHoldOutFile().getPath()+'\n'); } } else { text.append("Training data\n"); } if (m_trainingInstances.attribute(m_classIndex).isNumeric()) { text.append("\tAccuracy estimation: MAE\n"); } else { text.append("\tAccuracy estimation: classification error\n"); } } return text.toString(); } /** * reset to defaults */ protected void resetOptions () { m_trainingInstances = null; m_Evaluation = null; m_Classifier = new ZeroR(); m_holdOutFile = new File("Click to set hold out or test instances"); m_holdOutInstances = null; m_useTraining = false; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5511 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new ClassifierSubsetEval(), args); } }
20,608
28.274148
149
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ConsistencySubsetEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ConsistencySubsetEval.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; import java.io.Serializable; import java.util.BitSet; import java.util.Enumeration; import java.util.Hashtable; /** <!-- globalinfo-start --> * ConsistencySubsetEval :<br/> * <br/> * Evaluates the worth of a subset of attributes by the level of consistency in the class values when the training instances are projected onto the subset of attributes. <br/> * <br/> * Consistency of any subset can never be lower than that of the full set of attributes, hence the usual practice is to use this subset evaluator in conjunction with a Random or Exhaustive search which looks for the smallest subset with consistency equal to that of the full set of attributes.<br/> * <br/> * For more information see:<br/> * <br/> * H. Liu, R. Setiono: A probabilistic approach to feature selection - A filter solution. In: 13th International Conference on Machine Learning, 319-327, 1996. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Liu1996, * author = {H. Liu and R. Setiono}, * booktitle = {13th International Conference on Machine Learning}, * pages = {319-327}, * title = {A probabilistic approach to feature selection - A filter solution}, * year = {1996} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 5511 $ * @see Discretize */ public class ConsistencySubsetEval extends ASEvaluation implements SubsetEvaluator, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -2880323763295270402L; /** training instances */ private Instances m_trainInstances; /** class index */ private int m_classIndex; /** number of attributes in the training data */ private int m_numAttribs; /** number of instances in the training data */ private int m_numInstances; /** Discretise numeric attributes */ private Discretize m_disTransform; /** Hash table for evaluating feature subsets */ private Hashtable m_table; /** * Class providing keys to the hash table. */ public class hashKey implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 6144138512017017408L; /** Array of attribute values for an instance */ private double [] attributes; /** True for an index if the corresponding attribute value is missing. */ private boolean [] missing; /** The key */ private int key; /** * Constructor for a hashKey * * @param t an instance from which to generate a key * @param numAtts the number of attributes * @throws Exception if something goes wrong */ public hashKey(Instance t, int numAtts) throws Exception { int i; int cindex = t.classIndex(); key = -999; attributes = new double [numAtts]; missing = new boolean [numAtts]; for (i=0;i<numAtts;i++) { if (i == cindex) { missing[i] = true; } else { if ((missing[i] = t.isMissing(i)) == false) { attributes[i] = t.value(i); } } } } /** * Convert a hash entry to a string * * @param t the set of instances * @param maxColWidth width to make the fields * @return the hash entry as string */ public String toString(Instances t, int maxColWidth) { int i; int cindex = t.classIndex(); StringBuffer text = new StringBuffer(); for (i=0;i<attributes.length;i++) { if (i != cindex) { if (missing[i]) { text.append("?"); for (int j=0;j<maxColWidth;j++) { text.append(" "); } } else { String ss = t.attribute(i).value((int)attributes[i]); StringBuffer sb = new StringBuffer(ss); for (int j=0;j < (maxColWidth-ss.length()+1); j++) { sb.append(" "); } text.append(sb); } } } return text.toString(); } /** * Constructor for a hashKey * * @param t an array of feature values */ public hashKey(double [] t) { int i; int l = t.length; key = -999; attributes = new double [l]; missing = new boolean [l]; for (i=0;i<l;i++) { if (t[i] == Double.MAX_VALUE) { missing[i] = true; } else { missing[i] = false; attributes[i] = t[i]; } } } /** * Calculates a hash code * * @return the hash code as an integer */ public int hashCode() { int hv = 0; if (key != -999) return key; for (int i=0;i<attributes.length;i++) { if (missing[i]) { hv += (i*13); } else { hv += (i * 5 * (attributes[i]+1)); } } if (key == -999) { key = hv; } return hv; } /** * Tests if two instances are equal * * @param b a key to compare with * @return true if the objects are equal */ public boolean equals(Object b) { if ((b == null) || !(b.getClass().equals(this.getClass()))) { return false; } boolean ok = true; boolean l; if (b instanceof hashKey) { hashKey n = (hashKey)b; for (int i=0;i<attributes.length;i++) { l = n.missing[i]; if (missing[i] || l) { if ((missing[i] && !l) || (!missing[i] && l)) { ok = false; break; } } else { if (attributes[i] != n.attributes[i]) { ok = false; break; } } } } else { return false; } return ok; } /** * Prints the hash code */ public void print_hash_code() { System.out.println("Hash val: "+hashCode()); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5511 $"); } } /** * Returns a string describing this search method * @return a description of the search suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "ConsistencySubsetEval :\n\nEvaluates the worth of a subset of " +"attributes by the level of consistency in the class values when the " +"training instances are projected onto the subset of attributes. " +"\n\nConsistency of any subset can never be lower than that of the " +"full set of attributes, hence the usual practice is to use this " +"subset evaluator in conjunction with a Random or Exhaustive search " +"which looks for the smallest subset with consistency equal to that " +"of the full set of attributes.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "H. Liu and R. Setiono"); result.setValue(Field.TITLE, "A probabilistic approach to feature selection - A filter solution"); result.setValue(Field.BOOKTITLE, "13th International Conference on Machine Learning"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.PAGES, "319-327"); return result; } /** * Constructor. Calls restOptions to set default options **/ public ConsistencySubsetEval () { resetOptions(); } /** * reset to defaults */ private void resetOptions () { m_trainInstances = null; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates a attribute evaluator. Has to initialize all fields of the * evaluator that are not being set via options. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); m_trainInstances = new Instances(data); m_trainInstances.deleteWithMissingClass(); m_classIndex = m_trainInstances.classIndex(); m_numAttribs = m_trainInstances.numAttributes(); m_numInstances = m_trainInstances.numInstances(); m_disTransform = new Discretize(); m_disTransform.setUseBetterEncoding(true); m_disTransform.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_disTransform); } /** * Evaluates a subset of attributes * * @param subset a bitset representing the attribute subset to be * evaluated * @throws Exception if the subset could not be evaluated */ public double evaluateSubset (BitSet subset) throws Exception { int [] fs; int i; int count = 0; for (i=0;i<m_numAttribs;i++) { if (subset.get(i)) { count++; } } double [] instArray = new double[count]; int index = 0; fs = new int[count]; for (i=0;i<m_numAttribs;i++) { if (subset.get(i)) { fs[index++] = i; } } // create new hash table m_table = new Hashtable((int)(m_numInstances * 1.5)); for (i=0;i<m_numInstances;i++) { Instance inst = m_trainInstances.instance(i); for (int j=0;j<fs.length;j++) { if (fs[j] == m_classIndex) { throw new Exception("A subset should not contain the class!"); } if (inst.isMissing(fs[j])) { instArray[j] = Double.MAX_VALUE; } else { instArray[j] = inst.value(fs[j]); } } insertIntoTable(inst, instArray); } return consistencyCount(); } /** * calculates the level of consistency in a dataset using a subset of * features. The consistency of a hash table entry is the total number * of instances hashed to that location minus the number of instances in * the largest class hashed to that location. The total consistency is * 1.0 minus the sum of the individual consistencies divided by the * total number of instances. * @return the consistency of the hash table as a value between 0 and 1. */ private double consistencyCount() { Enumeration e = m_table.keys(); double [] classDist; double count = 0.0; while (e.hasMoreElements()) { hashKey tt = (hashKey)e.nextElement(); classDist = (double []) m_table.get(tt); count += Utils.sum(classDist); int max = Utils.maxIndex(classDist); count -= classDist[max]; } count /= (double)m_numInstances; return (1.0 - count); } /** * Inserts an instance into the hash table * * @param inst instance to be inserted * @param instA the instance to be inserted as an array of attribute * values. * @throws Exception if the instance can't be inserted */ private void insertIntoTable(Instance inst, double [] instA) throws Exception { double [] tempClassDist2; double [] newDist; hashKey thekey; thekey = new hashKey(instA); // see if this one is already in the table tempClassDist2 = (double []) m_table.get(thekey); if (tempClassDist2 == null) { newDist = new double [m_trainInstances.classAttribute().numValues()]; newDist[(int)inst.classValue()] = inst.weight(); // add to the table m_table.put(thekey, newDist); } else { // update the distribution for this instance tempClassDist2[(int)inst.classValue()]+=inst.weight(); // update the table m_table.put(thekey, tempClassDist2); } } /** * returns a description of the evaluator * @return a description of the evaluator as a String. */ public String toString() { StringBuffer text = new StringBuffer(); if (m_trainInstances == null) { text.append("\tConsistency subset evaluator has not been built yet\n"); } else { text.append("\tConsistency Subset Evaluator\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5511 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new ConsistencySubsetEval(), args); } }
14,450
26.630975
298
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/CorrelationAttributeEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CorrelationAttributeEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * CorrelationAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by measuring the correlation (Pearson's) between it and the class.<br/> * <br/> * Nominal attributes are considered on a value by value basis by treating each value as an indicator. An overall correlation for a nominal attribute is arrived at via a weighted average.<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Output detailed info for nominal attributes</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9049 $ */ public class CorrelationAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler { /** For serialization */ private static final long serialVersionUID = -4931946995055872438L; /** The correlation for each attribute */ protected double[] m_correlations; /** Whether to output detailed (per value) correlation for nominal attributes */ protected boolean m_detailedOutput = false; /** Holds the detailed output info */ protected StringBuffer m_detailedOutputBuff; /** * Returns a string describing this attribute evaluator * * @return a description of the evaluator suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "CorrelationAttributeEval :\n\nEvaluates the worth of an attribute " + "by measuring the correlation (Pearson's) between it and the class.\n\n" + "Nominal attributes are considered on a value by " + "value basis by treating each value as an indicator. An overall " + "correlation for a nominal attribute is arrived at via a weighted average.\n"; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. **/ @Override public Enumeration listOptions() { // TODO Auto-generated method stub Vector<Option> newVector = new Vector<Option>(); newVector.addElement(new Option( "\tOutput detailed info for nominal attributes", "D", 0, "-D")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Output detailed info for nominal attributes</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { setOutputDetailedInfo(Utils.getFlag('D', options)); } /** * Gets the current settings of WrapperSubsetEval. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { String[] options = new String[1]; if (getOutputDetailedInfo()) { options[0] = "-D"; } else { options[0] = ""; } return options; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String outputDetailedInfoTipText() { return "Output per value correlation for nominal attributes"; } /** * Set whether to output per-value correlation for nominal attributes * * @param d true if detailed (per-value) correlation is to be output for * nominal attributes */ public void setOutputDetailedInfo(boolean d) { m_detailedOutput = d; } /** * Get whether to output per-value correlation for nominal attributes * * @return true if detailed (per-value) correlation is to be output for * nominal attributes */ public boolean getOutputDetailedInfo() { return m_detailedOutput; } /** * Evaluates an individual attribute by measuring the correlation (Pearson's) * between it and the class. Nominal attributes are considered on a value by * value basis by treating each value as an indicator. An overall correlation * for a nominal attribute is arrived at via a weighted average. * * @param attribute the index of the attribute to be evaluated * @return the correlation * @throws Exception if the attribute could not be evaluated */ @Override public double evaluateAttribute(int attribute) throws Exception { return m_correlations[attribute]; } /** * Describe the attribute evaluator * * @return a description of the attribute evaluator as a String */ @Override public String toString() { StringBuffer buff = new StringBuffer(); if (m_correlations == null) { buff.append("Correlation attribute evaluator has not been built yet."); } else { buff.append("\tCorrelation Ranking Filter"); if (m_detailedOutput && m_detailedOutputBuff.length() > 0) { buff.append("\n\tDetailed output for nominal attributes"); buff.append(m_detailedOutputBuff); } } return buff.toString(); } /** * Initializes an information gain attribute evaluator. Replaces missing * values with means/modes; Deletes instances with missing class values. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been generated successfully */ @Override public void buildEvaluator(Instances data) throws Exception { data = new Instances(data); data.deleteWithMissingClass(); ReplaceMissingValues rmv = new ReplaceMissingValues(); rmv.setInputFormat(data); data = Filter.useFilter(data, rmv); int numClasses = data.classAttribute().numValues(); int classIndex = data.classIndex(); int numInstances = data.numInstances(); m_correlations = new double[data.numAttributes()]; /* * boolean hasNominals = false; boolean hasNumerics = false; */ List<Integer> numericIndexes = new ArrayList<Integer>(); List<Integer> nominalIndexes = new ArrayList<Integer>(); if (m_detailedOutput) { m_detailedOutputBuff = new StringBuffer(); } // TODO for instance weights (folded into computing weighted correlations) // add another dimension just before the last [2] (0 for 0/1 binary vector // and // 1 for corresponding instance weights for the 1's) double[][][] nomAtts = new double[data.numAttributes()][][]; for (int i = 0; i < data.numAttributes(); i++) { if (data.attribute(i).isNominal() && i != classIndex) { nomAtts[i] = new double[data.attribute(i).numValues()][data .numInstances()]; Arrays.fill(nomAtts[i][0], 1.0); // set zero index for this att to all // 1's nominalIndexes.add(i); } else if (data.attribute(i).isNumeric() && i != classIndex) { numericIndexes.add(i); } } // do the nominal attributes if (nominalIndexes.size() > 0) { for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance(i); for (int j = 0; j < current.numValues(); j++) { if (current.attribute(current.index(j)).isNominal() && current.index(j) != classIndex) { // Will need to check for zero in case this isn't a sparse // instance (unless we add 1 and subtract 1) nomAtts[current.index(j)][(int) current.valueSparse(j)][i] += 1; nomAtts[current.index(j)][0][i] -= 1; } } } } if (data.classAttribute().isNumeric()) { double[] classVals = data.attributeToDoubleArray(classIndex); // do the numeric attributes for (Integer i : numericIndexes) { double[] numAttVals = data.attributeToDoubleArray(i); m_correlations[i] = Utils.correlation(numAttVals, classVals, numAttVals.length); if (m_correlations[i] == 1.0) { // check for zero variance (useless numeric attribute) if (Utils.variance(numAttVals) == 0) { m_correlations[i] = 0; } } } // do the nominal attributes if (nominalIndexes.size() > 0) { // now compute the correlations for the binarized nominal attributes for (Integer i : nominalIndexes) { double sum = 0; double corr = 0; double sumCorr = 0; double sumForValue = 0; if (m_detailedOutput) { m_detailedOutputBuff.append("\n\n") .append(data.attribute(i).name()); } for (int j = 0; j < data.attribute(i).numValues(); j++) { sumForValue = Utils.sum(nomAtts[i][j]); corr = Utils .correlation(nomAtts[i][j], classVals, classVals.length); // useless attribute - all instances have the same value if (sumForValue == numInstances || sumForValue == 0) { corr = 0; } if (corr < 0.0) { corr = -corr; } sumCorr += sumForValue * corr; sum += sumForValue; if (m_detailedOutput) { m_detailedOutputBuff.append("\n\t") .append(data.attribute(i).value(j)).append(": "); m_detailedOutputBuff.append(Utils.doubleToString(corr, 6)); } } m_correlations[i] = (sum > 0) ? sumCorr / sum : 0; } } } else { // class is nominal // TODO extra dimension for storing instance weights too double[][] binarizedClasses = new double[data.classAttribute() .numValues()][data.numInstances()]; // this is equal to the number of instances for all inst weights = 1 double[] classValCounts = new double[data.classAttribute().numValues()]; for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance(i); binarizedClasses[(int) current.classValue()][i] = 1; } for (int i = 0; i < data.classAttribute().numValues(); i++) { classValCounts[i] = Utils.sum(binarizedClasses[i]); } double sumClass = Utils.sum(classValCounts); // do numeric attributes first if (numericIndexes.size() > 0) { for (Integer i : numericIndexes) { double[] numAttVals = data.attributeToDoubleArray(i); double corr = 0; double sumCorr = 0; for (int j = 0; j < data.classAttribute().numValues(); j++) { corr = Utils.correlation(numAttVals, binarizedClasses[j], numAttVals.length); if (corr < 0.0) { corr = -corr; } if (corr == 1.0) { // check for zero variance (useless numeric attribute) if (Utils.variance(numAttVals) == 0) { corr = 0; } } sumCorr += classValCounts[j] * corr; } m_correlations[i] = sumCorr / sumClass; } } if (nominalIndexes.size() > 0) { for (Integer i : nominalIndexes) { if (m_detailedOutput) { m_detailedOutputBuff.append("\n\n") .append(data.attribute(i).name()); } double sumForAtt = 0; double corrForAtt = 0; for (int j = 0; j < data.attribute(i).numValues(); j++) { double sumForValue = Utils.sum(nomAtts[i][j]); double corr = 0; double sumCorr = 0; double avgCorrForValue = 0; sumForAtt += sumForValue; for (int k = 0; k < numClasses; k++) { // corr between value j and class k corr = Utils.correlation(nomAtts[i][j], binarizedClasses[k], binarizedClasses[k].length); // useless attribute - all instances have the same value if (sumForValue == numInstances || sumForValue == 0) { corr = 0; } if (corr < 0.0) { corr = -corr; } sumCorr += classValCounts[k] * corr; } avgCorrForValue = sumCorr / sumClass; corrForAtt += sumForValue * avgCorrForValue; if (m_detailedOutput) { m_detailedOutputBuff.append("\n\t") .append(data.attribute(i).value(j)).append(": "); m_detailedOutputBuff.append(Utils.doubleToString(avgCorrForValue, 6)); } } // the weighted average corr for att i as // a whole (wighted by value frequencies) m_correlations[i] = (sumForAtt > 0) ? corrForAtt / sumForAtt : 0; } } } if (m_detailedOutputBuff != null && m_detailedOutputBuff.length() > 0) { m_detailedOutputBuff.append("\n"); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9049 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runEvaluator(new CorrelationAttributeEval(), args); } }
15,162
30.989451
192
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/CostSensitiveASEvaluation.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * CostSensitiveASEvaluation.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.SelectedTag; import weka.core.Tag; import weka.classifiers.CostMatrix; import weka.core.WeightedInstancesHandler; import weka.core.RevisionUtils; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.StringReader; import java.io.StringWriter; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import java.util.ArrayList; /** * Abstract base class for cost-sensitive subset and attribute evaluators. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 5562 $ */ public abstract class CostSensitiveASEvaluation extends ASEvaluation implements OptionHandler, Serializable { /** for serialization */ static final long serialVersionUID = -7045833833363396977L; /** load cost matrix on demand */ public static final int MATRIX_ON_DEMAND = 1; /** use explicit cost matrix */ public static final int MATRIX_SUPPLIED = 2; /** Specify possible sources of the cost matrix */ public static final Tag [] TAGS_MATRIX_SOURCE = { new Tag(MATRIX_ON_DEMAND, "Load cost matrix on demand"), new Tag(MATRIX_SUPPLIED, "Use explicit cost matrix") }; /** Indicates the current cost matrix source */ protected int m_MatrixSource = MATRIX_ON_DEMAND; /** * The directory used when loading cost files on demand, null indicates * current directory */ protected File m_OnDemandDirectory = new File(System.getProperty("user.dir")); /** The name of the cost file, for command line options */ protected String m_CostFile; /** The cost matrix */ protected CostMatrix m_CostMatrix = new CostMatrix(1); /** The base evaluator to use */ protected ASEvaluation m_evaluator; /** random number seed */ protected int m_seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option( "\tFile name of a cost matrix to use. If this is not supplied,\n" +"\ta cost matrix will be loaded on demand. The name of the\n" +"\ton-demand file is the relation name of the training data\n" +"\tplus \".cost\", and the path to the on-demand file is\n" +"\tspecified with the -N option.", "C", 1, "-C <cost file name>")); newVector.addElement(new Option( "\tName of a directory to search for cost files when loading\n" +"\tcosts on demand (default current directory).", "N", 1, "-N <directory>")); newVector.addElement(new Option( "\tThe cost matrix in Matlab single line format.", "cost-matrix", 1, "-cost-matrix <matrix>")); newVector.addElement(new Option( "\tThe seed to use for random number generation.", "S", 1, "-S <integer>")); newVector.addElement(new Option( "\tFull name of base evaluator. Options after -- are " +"passed to the evaluator.\n" + "\t(default: " + defaultEvaluatorString() +")", "W", 1, "-W")); if (m_evaluator instanceof OptionHandler) { newVector.addElement(new Option( "", "", 0, "\nOptions specific to evaluator " + m_evaluator.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_evaluator).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Parses a given list of options. <p/> * * Valid options are: <p/> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -S &lt;integer&gt; * The seed to use for random number generation.</pre> * * <pre> -W * Full name of base evaluator. * (default: weka.attributeSelection.CfsSubsetEval)</pre> * * Options after -- are passed to the designated evaluator.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String costFile = Utils.getOption('C', options); if (costFile.length() != 0) { try { setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); } catch (Exception ex) { // now flag as possible old format cost matrix. Delay cost matrix // loading until buildClassifer is called setCostMatrix(null); } setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); m_CostFile = costFile; } else { setCostMatrixSource(new SelectedTag(MATRIX_ON_DEMAND, TAGS_MATRIX_SOURCE)); } String demandDir = Utils.getOption('N', options); if (demandDir.length() != 0) { setOnDemandDirectory(new File(demandDir)); } String cost_matrix = Utils.getOption("cost-matrix", options); if (cost_matrix.length() != 0) { StringWriter writer = new StringWriter(); CostMatrix.parseMatlab(cost_matrix).write(writer); setCostMatrix(new CostMatrix(new StringReader(writer.toString()))); setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); } String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } String evaluatorName = Utils.getOption('W', options); if (evaluatorName.length() > 0) { // This is just to set the evaluator in case the option // parsing fails. setEvaluator(ASEvaluation.forName(evaluatorName, null)); setEvaluator(ASEvaluation.forName(evaluatorName, Utils.partitionOptions(options))); } else { // This is just to set the classifier in case the option // parsing fails. setEvaluator(ASEvaluation.forName(defaultEvaluatorString(), null)); setEvaluator(ASEvaluation.forName(defaultEvaluatorString(), Utils.partitionOptions(options))); } } /** * Gets the current settings of the subset evaluator. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); if (m_MatrixSource == MATRIX_SUPPLIED) { if (m_CostFile != null) { options.add("-C"); options.add("" + m_CostFile); } else { options.add("-cost-matrix"); options.add(getCostMatrix().toMatlab()); } } else { options.add("-N"); options.add("" + getOnDemandDirectory()); } options.add("-S"); options.add("" + getSeed()); options.add("-W"); options.add(m_evaluator.getClass().getName()); if (m_evaluator instanceof OptionHandler) { String[] evaluatorOptions = ((OptionHandler)m_evaluator).getOptions(); if (evaluatorOptions.length > 0) { options.add("--"); for (int i = 0; i < evaluatorOptions.length; i++) { options.add(evaluatorOptions[i]); } } } return options.toArray(new String[0]); } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A meta subset evaluator that makes its base subset evaluator cost-sensitive. "; } /** * Return the name of the default evaluator. * * @return the name of the default evaluator */ public String defaultEvaluatorString() { return "weka.attributeSelection.CfsSubsetEval"; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixSourceTipText() { return "Sets where to get the cost matrix. The two options are" + "to use the supplied explicit cost matrix (the setting of the " + "costMatrix property), or to load a cost matrix from a file when " + "required (this file will be loaded from the directory set by the " + "onDemandDirectory property and will be named relation_name" + CostMatrix.FILE_EXTENSION + ")."; } /** * Gets the source location method of the cost matrix. Will be one of * MATRIX_ON_DEMAND or MATRIX_SUPPLIED. * * @return the cost matrix source. */ public SelectedTag getCostMatrixSource() { return new SelectedTag(m_MatrixSource, TAGS_MATRIX_SOURCE); } /** * Sets the source location of the cost matrix. Values other than * MATRIX_ON_DEMAND or MATRIX_SUPPLIED will be ignored. * * @param newMethod the cost matrix location method. */ public void setCostMatrixSource(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_MATRIX_SOURCE) { m_MatrixSource = newMethod.getSelectedTag().getID(); } } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String onDemandDirectoryTipText() { return "Sets the directory where cost files are loaded from. This option " + "is used when the costMatrixSource is set to \"On Demand\"."; } /** * Returns the directory that will be searched for cost files when * loading on demand. * * @return The cost file search directory. */ public File getOnDemandDirectory() { return m_OnDemandDirectory; } /** * Sets the directory that will be searched for cost files when * loading on demand. * * @param newDir The cost file search directory. */ public void setOnDemandDirectory(File newDir) { if (newDir.isDirectory()) { m_OnDemandDirectory = newDir; } else { m_OnDemandDirectory = new File(newDir.getParent()); } m_MatrixSource = MATRIX_ON_DEMAND; } /** * Gets the evaluator specification string, which contains the class name of * the evaluator and any options to the evaluator * * @return the evaluator string. */ protected String getEvaluatorSpec() { ASEvaluation ase = getEvaluator(); if (ase instanceof OptionHandler) { return ase.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)ase).getOptions()); } return ase.getClass().getName(); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixTipText() { return "Sets the cost matrix explicitly. This matrix is used if the " + "costMatrixSource property is set to \"Supplied\"."; } /** * Gets the misclassification cost matrix. * * @return the cost matrix */ public CostMatrix getCostMatrix() { return m_CostMatrix; } /** * Sets the misclassification cost matrix. * * @param newCostMatrix the cost matrix */ public void setCostMatrix(CostMatrix newCostMatrix) { m_CostMatrix = newCostMatrix; m_MatrixSource = MATRIX_SUPPLIED; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_seed = seed; } /** * Gets the seed for the random number generations. * * @return the seed for the random number generation */ public int getSeed() { return m_seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String evaluatorTipText() { return "The base evaluator to be used."; } /** * Set the base evaluator. * * @param newEvaluator the evaluator to use. * @throws IllegalArgumentException if the evaluator is of the wrong type */ public void setEvaluator(ASEvaluation newEvaluator) throws IllegalArgumentException { m_evaluator = newEvaluator; } /** * Get the evaluator used as the base evaluator. * * @return the evaluator used as the base evaluator */ public ASEvaluation getEvaluator() { return m_evaluator; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result; if (getEvaluator() != null) { result = getEvaluator().getCapabilities(); } else { result = new Capabilities(this); result.disableAll(); } // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Generates a attribute evaluator. Has to initialize all fields of the * evaluator that are not being set via options. * * @param data set of instances serving as training data * @exception Exception if the evaluator has not been * generated successfully */ public void buildEvaluator(Instances data) throws Exception { // can evaluator handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); if (m_evaluator == null) { throw new Exception("No base evaluator has been set!"); } if (m_MatrixSource == MATRIX_ON_DEMAND) { String costName = data.relationName() + CostMatrix.FILE_EXTENSION; File costFile = new File(getOnDemandDirectory(), costName); if (!costFile.exists()) { throw new Exception("On-demand cost file doesn't exist: " + costFile); } setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); } else if (m_CostMatrix == null) { // try loading an old format cost file m_CostMatrix = new CostMatrix(data.numClasses()); m_CostMatrix.readOldFormat(new BufferedReader( new FileReader(m_CostFile))); } Random random = null; if (!(m_evaluator instanceof WeightedInstancesHandler)) { random = new Random(m_seed); } data = m_CostMatrix.applyCostMatrix(data, random); m_evaluator.buildEvaluator(data); } /** * Provides a chance for a attribute evaluator to do any special * post processing of the selected attribute set. * * @param attributeSet the set of attributes found by the search * @return a possibly ranked list of postprocessed attributes * @exception Exception if postprocessing fails for some reason */ public int [] postProcess(int [] attributeSet) throws Exception { return m_evaluator.postProcess(attributeSet); } /** * Output a representation of this evaluator * * @return a string representation of the classifier */ public String toString() { if (m_evaluator == null) { return "CostSensitiveASEvaluation: No model built yet."; } String result = (m_evaluator instanceof AttributeEvaluator) ? "CostSensitiveAttributeEval using " : "CostSensitiveSubsetEval using "; result += "\n\n" + getEvaluatorSpec() + "\n\nEvaluator\n" + m_evaluator.toString() + "\n\nCost Matrix\n" + m_CostMatrix.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5562 $"); } }
18,030
30.034423
101
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/CostSensitiveAttributeEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * CostSensitiveAttributeEval.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.OptionHandler; import weka.core.RevisionUtils; import java.util.BitSet; import java.io.Serializable; /** <!-- globalinfo-start --> * A meta subset evaluator that makes its base subset evaluator cost-sensitive. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -S &lt;integer&gt; * The seed to use for random number generation.</pre> * * <pre> -W * Full name of base evaluator. * (default: weka.attributeSelection.ReliefFAttributeEval)</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.ReliefFAttributeEval: * </pre> * * <pre> -M &lt;num instances&gt; * Specify the number of instances to * sample when estimating attributes. * If not specified, then all instances * will be used.</pre> * * <pre> -D &lt;seed&gt; * Seed for randomly sampling instances. * (Default = 1)</pre> * * <pre> -K &lt;number of neighbours&gt; * Number of nearest neighbours (k) used * to estimate attribute relevances * (Default = 10).</pre> * * <pre> -W * Weight nearest neighbours by distance</pre> * * <pre> -A &lt;num&gt; * Specify sigma value (used in an exp * function to control how quickly * weights for more distant instances * decrease. Use in conjunction with -W. * Sensible value=1/5 to 1/10 of the * number of nearest neighbours. * (Default = 2)</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 5562 $ */ public class CostSensitiveAttributeEval extends CostSensitiveASEvaluation implements Serializable, AttributeEvaluator, OptionHandler { /** For serialization */ static final long serialVersionUID = 4484876541145458447L; /** * Default constructor. */ public CostSensitiveAttributeEval() { setEvaluator(new ReliefFAttributeEval()); } /** * Return the name of the default evaluator. * * @return the name of the default evaluator */ public String defaultEvaluatorString() { return "weka.attributeSelection.ReliefFAttributeEval"; } /** * Set the base evaluator. * * @param newEvaluator the evaluator to use. * @throws IllegalArgumentException if the evaluator is not an instance of AttributeEvaluator */ public void setEvaluator(ASEvaluation newEvaluator) throws IllegalArgumentException { if (!(newEvaluator instanceof AttributeEvaluator)) { throw new IllegalArgumentException("Evaluator must be an AttributeEvaluator!"); } m_evaluator = newEvaluator; } /** * Evaluates an individual attribute. Delegates the actual evaluation to the * base attribute evaluator. * * @param attribute the index of the attribute to be evaluated * @return the "merit" of the attribute * @exception Exception if the attribute could not be evaluated */ public double evaluateAttribute(int attribute) throws Exception { return ((AttributeEvaluator)m_evaluator).evaluateAttribute(attribute); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5562 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new CostSensitiveAttributeEval(), args); } }
4,826
28.254545
95
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/CostSensitiveSubsetEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * CostSensitiveSubsetEval.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.OptionHandler; import weka.core.RevisionUtils; import java.util.BitSet; import java.io.Serializable; /** <!-- globalinfo-start --> * A meta subset evaluator that makes its base subset evaluator cost-sensitive. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -S &lt;integer&gt; * The seed to use for random number generation.</pre> * * <pre> -W * Full name of base evaluator. * (default: weka.attributeSelection.CfsSubsetEval)</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.CfsSubsetEval: * </pre> * * <pre> -M * Treat missing values as a seperate value.</pre> * * <pre> -L * Don't include locally predictive attributes.</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 5562 $ */ public class CostSensitiveSubsetEval extends CostSensitiveASEvaluation implements Serializable, SubsetEvaluator, OptionHandler { /** For serialization */ static final long serialVersionUID = 2924546096103426700L; /** * Default constructor. */ public CostSensitiveSubsetEval() { setEvaluator(new CfsSubsetEval()); } /** * Set the base evaluator. * * @param newEvaluator the evaluator to use. * @throws IllegalArgumentException if the evaluator is not an instance of SubsetEvaluator */ public void setEvaluator(ASEvaluation newEvaluator) throws IllegalArgumentException { if (!(newEvaluator instanceof SubsetEvaluator)) { throw new IllegalArgumentException("Evaluator must be an SubsetEvaluator!"); } m_evaluator = newEvaluator; } /** * Evaluates a subset of attributes. Delegates the actual evaluation to * the base subset evaluator. * * @param subset a bitset representing the attribute subset to be * evaluated * @return the "merit" of the subset * @exception Exception if the subset could not be evaluated */ public double evaluateSubset(BitSet subset) throws Exception { return ((SubsetEvaluator)m_evaluator).evaluateSubset(subset); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5562 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new CostSensitiveSubsetEval(), args); } }
3,928
27.889706
92
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ErrorBasedMeritEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ErrorBasedMeritEvaluator.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; /** * Interface for evaluators that calculate the "merit" of attributes/subsets * as the error of a learning scheme * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface ErrorBasedMeritEvaluator { }
1,080
29.885714
76
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ExhaustiveSearch.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ExhaustiveSearch.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.math.BigInteger; import java.util.BitSet; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * ExhaustiveSearch : <br/> * <br/> * Performs an exhaustive search through the space of attribute subsets starting from the empty set of attrubutes. Reports the best subset found. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -V * Output subsets as the search progresses. * (default = false).</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 1.15 $ */ public class ExhaustiveSearch extends ASSearch implements OptionHandler { /** for serialization */ static final long serialVersionUID = 5741842861142379712L; /** the best feature set found during the search */ private BitSet m_bestGroup; /** the merit of the best subset found */ private double m_bestMerit; /** does the data have a class */ private boolean m_hasClass; /** holds the class index */ private int m_classIndex; /** number of attributes in the data */ private int m_numAttribs; /** if true, then ouput new best subsets as the search progresses */ private boolean m_verbose; /** the number of subsets evaluated during the search */ private int m_evaluations; /** * Returns a string describing this search method * @return a description of the search suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "ExhaustiveSearch : \n\nPerforms an exhaustive search through " +"the space of attribute subsets starting from the empty set of " +"attrubutes. Reports the best subset found."; } /** * Constructor */ public ExhaustiveSearch () { resetOptions(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(2); newVector.addElement(new Option("\tOutput subsets as the search progresses." +"\n\t(default = false)." , "V", 0 , "-V")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -V * Output subsets as the search progresses. * (default = false).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported * **/ public void setOptions (String[] options) throws Exception { resetOptions(); setVerbose(Utils.getFlag('V',options)); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String verboseTipText() { return "Print progress information. Sends progress info to the terminal " +"as the search progresses."; } /** * set whether or not to output new best subsets as the search proceeds * @param v true if output is to be verbose */ public void setVerbose(boolean v) { m_verbose = v; } /** * get whether or not output is verbose * @return true if output is set to verbose */ public boolean getVerbose() { return m_verbose; } /** * Gets the current settings of RandomSearch. * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[1]; int current = 0; if (m_verbose) { options[current++] = "-V"; } while (current < options.length) { options[current++] = ""; } return options; } /** * prints a description of the search * @return a description of the search as a string */ public String toString() { StringBuffer text = new StringBuffer(); text.append("\tExhaustive Search.\n\tStart set: "); text.append("no attributes\n"); text.append("\tNumber of evaluations: "+m_evaluations+"\n"); text.append("\tMerit of best subset found: " +Utils.doubleToString(Math.abs(m_bestMerit),8,3)+"\n"); return text.toString(); } /** * Searches the attribute subset space using an exhaustive search. * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { double best_merit; double tempMerit; boolean done = false; int sizeOfBest; int tempSize; BigInteger space = BigInteger.ZERO; m_evaluations = 0; m_numAttribs = data.numAttributes(); m_bestGroup = new BitSet(m_numAttribs); if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } if (ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; } else { m_hasClass = true; m_classIndex = data.classIndex(); } SubsetEvaluator ASEvaluator = (SubsetEvaluator)ASEval; m_numAttribs = data.numAttributes(); best_merit = ASEvaluator.evaluateSubset(m_bestGroup); m_evaluations++; sizeOfBest = countFeatures(m_bestGroup); BitSet tempGroup = new BitSet(m_numAttribs); tempMerit = ASEvaluator.evaluateSubset(tempGroup); if (m_verbose) { System.out.println("Zero feature subset (" +Utils.doubleToString(Math. abs(tempMerit),8,5) +")"); } if (tempMerit >= best_merit) { tempSize = countFeatures(tempGroup); if (tempMerit > best_merit || (tempSize < sizeOfBest)) { best_merit = tempMerit; m_bestGroup = (BitSet)(tempGroup.clone()); sizeOfBest = tempSize; } } int numatts = (m_hasClass) ? m_numAttribs - 1 : m_numAttribs; BigInteger searchSpaceEnd = BigInteger.ONE.add(BigInteger.ONE).pow(numatts).subtract(BigInteger.ONE); while (!done) { // the next subset space = space.add(BigInteger.ONE); if (space.equals(searchSpaceEnd)) { done = true; } tempGroup.clear(); for (int i = 0; i < numatts; i++) { if (space.testBit(i)) { if (!m_hasClass) { tempGroup.set(i); } else { int j = (i >= m_classIndex) ? i + 1 : i; tempGroup.set(j); } } } tempMerit = ASEvaluator.evaluateSubset(tempGroup); m_evaluations++; if (tempMerit >= best_merit) { tempSize = countFeatures(tempGroup); if (tempMerit > best_merit || (tempSize < sizeOfBest)) { best_merit = tempMerit; m_bestGroup = (BitSet)(tempGroup.clone()); sizeOfBest = tempSize; if (m_verbose) { System.out.println("New best subset (" +Utils.doubleToString(Math. abs(best_merit),8,5) +"): "+printSubset(m_bestGroup)); } } } } m_bestMerit = best_merit; return attributeList(m_bestGroup); } /** * counts the number of features in a subset * @param featureSet the feature set for which to count the features * @return the number of features in the subset */ private int countFeatures(BitSet featureSet) { int count = 0; for (int i=0;i<m_numAttribs;i++) { if (featureSet.get(i)) { count++; } } return count; } /** * prints a subset as a series of attribute numbers * @param temp the subset to print * @return a subset as a String of attribute numbers */ private String printSubset(BitSet temp) { StringBuffer text = new StringBuffer(); for (int j=0;j<m_numAttribs;j++) { if (temp.get(j)) { text.append((j+1)+" "); } } return text.toString(); } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ private int[] attributeList (BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * generates the next subset of size "size" given the subset "temp". * @param size the size of the feature subset (eg. 2 means that the * current subset contains two features and the next generated subset * should also contain 2 features). * @param temp will hold the generated subset as a BitSet */ private void generateNextSubset(int size, BitSet temp) { int i,j; int counter = 0; boolean done = false; BitSet temp2 = (BitSet)temp.clone(); System.err.println("Size: "+size); for (i=0;i<m_numAttribs;i++) { temp2.clear(i); } while ((!done) && (counter < size)) { for (i=m_numAttribs-1-counter;i>=0;i--) { if (temp.get(i)) { temp.clear(i); int newP; if (i != (m_numAttribs-1-counter)) { newP = i+1; if (newP == m_classIndex) { newP++; } if (newP < m_numAttribs) { temp.set(newP); for (j=0;j<counter;j++) { if (newP+1+j == m_classIndex) { newP++; } if (newP+1+j < m_numAttribs) { temp.set(newP+1+j); } } done = true; } else { counter++; } break; } else { counter++; break; } } } } if (temp.cardinality() < size) { temp.clear(); } System.err.println(printSubset(temp).toString()); } /** * resets to defaults */ private void resetOptions() { m_verbose = false; m_evaluations = 0; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.15 $"); } }
11,655
24.787611
145
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/FilteredAttributeEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * FilteredAttributeEval.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.filters.Filter; import weka.core.Instances; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.util.Enumeration; import java.util.Vector; import java.util.ArrayList; import java.io.Serializable; /** <!-- globalinfo-start --> * Class for running an arbitrary attribute evaluator on data that has been passed through an * arbitrary filter (note: filters that alter the order or number of attributes are not allowed). * Like the evaluator, the structure of the filter is based exclusively on the training data. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;evaluator specification&gt; * Full name of base evaluator to use, followed by evaluator options. * eg: "weka.attributeSelection.InfoGainAttributeEval -M"</pre> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.supervised.instance.SpreadSubsample -M 1"</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 5562 $ */ public class FilteredAttributeEval extends ASEvaluation implements Serializable, AttributeEvaluator, OptionHandler { /** For serialization */ static final long serialVersionUID = 2111121880778327334L; /** Base evaluator */ protected AttributeEvaluator m_evaluator = new InfoGainAttributeEval(); /** Filter */ protected Filter m_filter = new weka.filters.supervised.instance.SpreadSubsample(); /** Filtered instances structure */ protected Instances m_filteredInstances; public FilteredAttributeEval() { m_filteredInstances = null; } /** * Returns default capabilities of the evaluator. * * @return the capabilities of this evaluator. */ public Capabilities getCapabilities() { Capabilities result; if (getFilter() == null) { result = super.getCapabilities(); result.disableAll(); } else { result = getFilter().getCapabilities(); } // set dependencies for (Capability cap: Capability.values()) { result.enableDependency(cap); } return result; } /** * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for running an arbitrary attribute evaluator on data that has been passed " + "through an arbitrary filter (note: filters that alter the order or number of " + "attributes are not allowed). Like the evaluator, the structure of the filter " + "is based exclusively on the training data."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tFull name of base evaluator to use, followed by " +"evaluator options.\n" + "\teg: \"weka.attributeSelection.InfoGainAttributeEval -M\"", "W", 1, "-W <evaluator specification>")); newVector.addElement(new Option( "\tFull class name of filter to use, followed\n" + "\tby filter options.\n" + "\teg: \"weka.filters.supervised.instance.SpreadSubsample -M 1\"", "F", 1, "-F <filter specification>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;evaluator specification&gt; * Full name of base evaluator to use, followed by evaluator options. * eg: "weka.attributeSelection.InfoGainAttributeEval -M"</pre> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.supervised.instance.SpreadSubsample -M 1"</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String evaluator = Utils.getOption('W', options); if (evaluator.length() > 0) { String[] evaluatorSpec = Utils.splitOptions(evaluator); if (evaluatorSpec.length == 0) { throw new IllegalArgumentException("Invalid evaluator specification string"); } String evaluatorName = evaluatorSpec[0]; evaluatorSpec[0] = ""; setAttributeEvaluator((ASEvaluation)Utils.forName(AttributeEvaluator.class, evaluatorName, evaluatorSpec)); } else { setAttributeEvaluator(new InfoGainAttributeEval()); } // Same for filter String filterString = Utils.getOption('F', options); if (filterString.length() > 0) { String [] filterSpec = Utils.splitOptions(filterString); if (filterSpec.length == 0) { throw new IllegalArgumentException("Invalid filter specification string"); } String filterName = filterSpec[0]; filterSpec[0] = ""; setFilter((Filter) Utils.forName(Filter.class, filterName, filterSpec)); } else { setFilter(new weka.filters.supervised.instance.SpreadSubsample()); } } /** * Gets the current settings of the subset evaluator. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); options.add("-W"); options.add(getEvaluatorSpec()); options.add("-F"); options.add(getFilterSpec()); return options.toArray(new String[0]); } /** * Get the evaluator + options as a string * * @return a String containing the name of the evalautor + any options */ protected String getEvaluatorSpec() { AttributeEvaluator a = m_evaluator; if (a instanceof OptionHandler) { return a.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)a).getOptions()); } return a.getClass().getName(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String attributeEvaluatorTipText() { return "The attribute evaluator to be used."; } /** * Set the attribute evaluator to use * * @param newEvaluator the attribute evaluator to use */ public void setAttributeEvaluator(ASEvaluation newEvaluator) { if (!(newEvaluator instanceof AttributeEvaluator)) { throw new IllegalArgumentException("Evaluator must be an AttributeEvaluator!"); } m_evaluator = (AttributeEvaluator)newEvaluator; } /** * Get the attribute evaluator to use * * @return the attribute evaluator to use */ public ASEvaluation getAttributeEvaluator() { return (ASEvaluation)m_evaluator; } /** * Get the filter + options as a string * * @return a String containing the name of the filter + any options */ protected String getFilterSpec() { Filter c = getFilter(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTipText() { return "The filter to be used."; } /** * Set the filter to use * * @param newFilter the filter to use */ public void setFilter(Filter newFilter) { m_filter = newFilter; } /** * Get the filter to use * * @return the filter to use */ public Filter getFilter() { return m_filter; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5562 $"); } /** * Initializes a filtered attribute evaluator. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator(Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); // Structure of original Instances original = new Instances(data, 0); m_filter.setInputFormat(data); data = Filter.useFilter(data, m_filter); // Can only proceed if filter has not altered the order or // number of attributes in the data if (data.numAttributes() != original.numAttributes()) { throw new Exception("Filter must not alter the number of " +"attributes in the data!"); } // Check the class index (if set) if (original.classIndex() >= 0) { if (data.classIndex() != original.classIndex()) { throw new Exception("Filter must not change the class attribute!"); } } // check the order for (int i = 0; i < original.numAttributes(); i++) { if (!data.attribute(i).name().equals(original.attribute(i).name())) { throw new Exception("Filter must not alter the order of the attributes!"); } } // can the evaluator handle this data? ((ASEvaluation)getAttributeEvaluator()).getCapabilities().testWithFail(data); m_filteredInstances = data.stringFreeStructure(); ((ASEvaluation)m_evaluator).buildEvaluator(data); } /** * Evaluates an individual attribute by delegating to the base * evaluator. * * @param attribute the index of the attribute to be evaluated * @return the merit of the attribute according to the base evaluator * @throws Exception if the attribute could not be evaluated */ public double evaluateAttribute(int attribute) throws Exception { return m_evaluator.evaluateAttribute(attribute); } /** * Describe the attribute evaluator * @return a description of the attribute evaluator as a string */ public String toString() { StringBuffer text = new StringBuffer(); if (m_filteredInstances == null) { text.append("Filtered attribute evaluator has not been built"); } else { text.append("Filtered Attribute Evaluator"); text.append("\nFilter: " + getFilterSpec()); text.append("\nAttribute evaluator: " + getEvaluatorSpec()); text.append("\n\nFiltered header:\n"); text.append(m_filteredInstances); } text.append("\n"); return text.toString(); } // ============ // Test method. // ============ /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new FilteredAttributeEval(), args); } }
12,033
29.312343
99
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/FilteredSubsetEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * FilteredSubsetEval.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.filters.Filter; import weka.core.Instances; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.util.Enumeration; import java.util.Vector; import java.util.ArrayList; import java.util.BitSet; import java.io.Serializable; /** <!-- globalinfo-start --> * Class for running an arbitrary subset evaluator on data that has been passed through an arbitrary * filter (note: filters that alter the order or number of attributes are not allowed). * Like the evaluator, the structure of the filter is based exclusively on the training data. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;evaluator specification&gt; * Full name of base evaluator to use, followed by evaluator options. * eg: "weka.attributeSelection.CfsSubsetEval -L"</pre> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.supervised.instance.SpreadSubsample -M 1"</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 5562 $ */ public class FilteredSubsetEval extends ASEvaluation implements Serializable, SubsetEvaluator, OptionHandler { /** For serialization */ static final long serialVersionUID = 2111121880778327334L; /** Base evaluator */ protected SubsetEvaluator m_evaluator = new CfsSubsetEval(); /** Filter */ protected Filter m_filter = new weka.filters.supervised.instance.SpreadSubsample(); /** Filtered instances structure */ protected Instances m_filteredInstances; public FilteredSubsetEval() { m_filteredInstances = null; } /** * Returns default capabilities of the evaluator. * * @return the capabilities of this evaluator. */ public Capabilities getCapabilities() { Capabilities result; if (getFilter() == null) { result = super.getCapabilities(); result.disableAll(); } else { result = getFilter().getCapabilities(); } // set dependencies for (Capability cap: Capability.values()) { result.enableDependency(cap); } return result; } /** * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for running an arbitrary subset evaluator on data that has been passed " + "through an arbitrary filter (note: filters that alter the order or number of " + "attributes are not allowed). Like the evaluator, the structure of the filter " + "is based exclusively on the training data."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tFull name of base evaluator to use, followed by " +"evaluator options.\n" + "\teg: \"weka.attributeSelection.CfsSubsetEval -L\"", "W", 1, "-W <evaluator specification>")); newVector.addElement(new Option( "\tFull class name of filter to use, followed\n" + "\tby filter options.\n" + "\teg: \"weka.filters.supervised.instance.SpreadSubsample -M 1\"", "F", 1, "-F <filter specification>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;evaluator specification&gt; * Full name of base evaluator to use, followed by evaluator options. * eg: "weka.attributeSelection.CfsSubsetEval -L"</pre> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.supervised.instance.SpreadSubsample -M 1"</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String evaluator = Utils.getOption('W', options); if (evaluator.length() > 0) { String[] evaluatorSpec = Utils.splitOptions(evaluator); if (evaluatorSpec.length == 0) { throw new IllegalArgumentException("Invalid evaluator specification string"); } String evaluatorName = evaluatorSpec[0]; evaluatorSpec[0] = ""; setSubsetEvaluator((ASEvaluation)Utils.forName(SubsetEvaluator.class, evaluatorName, evaluatorSpec)); } else { setSubsetEvaluator(new CfsSubsetEval()); } // Same for filter String filterString = Utils.getOption('F', options); if (filterString.length() > 0) { String [] filterSpec = Utils.splitOptions(filterString); if (filterSpec.length == 0) { throw new IllegalArgumentException("Invalid filter specification string"); } String filterName = filterSpec[0]; filterSpec[0] = ""; setFilter((Filter) Utils.forName(Filter.class, filterName, filterSpec)); } else { setFilter(new weka.filters.supervised.instance.SpreadSubsample()); } } /** * Gets the current settings of the subset evaluator. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); options.add("-W"); options.add(getEvaluatorSpec()); options.add("-F"); options.add(getFilterSpec()); return options.toArray(new String[0]); } /** * Get the evaluator + options as a string * * @return a String containing the name of the evalautor + any options */ protected String getEvaluatorSpec() { SubsetEvaluator a = m_evaluator; if (a instanceof OptionHandler) { return a.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)a).getOptions()); } return a.getClass().getName(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String subsetEvaluatorTipText() { return "The subset evaluator to be used."; } /** * Set the subset evaluator to use * * @param newEvaluator the subset evaluator to use */ public void setSubsetEvaluator(ASEvaluation newEvaluator) { if (!(newEvaluator instanceof SubsetEvaluator)) { throw new IllegalArgumentException("Evaluator must be a SubsetEvaluator!"); } m_evaluator = (SubsetEvaluator)newEvaluator; } /** * Get the subset evaluator to use * * @return the subset evaluator to use */ public ASEvaluation getSubsetEvaluator() { return (ASEvaluation)m_evaluator; } /** * Get the filter + options as a string * * @return a String containing the name of the filter + any options */ protected String getFilterSpec() { Filter c = getFilter(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTipText() { return "The filter to be used."; } /** * Set the filter to use * * @param newFilter the filter to use */ public void setFilter(Filter newFilter) { m_filter = newFilter; } /** * Get the filter to use * * @return the filter to use */ public Filter getFilter() { return m_filter; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5562 $"); } /** * Initializes a filtered attribute evaluator. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator(Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); // Structure of original Instances original = new Instances(data, 0); m_filter.setInputFormat(data); data = Filter.useFilter(data, m_filter); // Can only proceed if filter has not altered the order or // number of attributes in the data if (data.numAttributes() != original.numAttributes()) { throw new Exception("Filter must not alter the number of " +"attributes in the data!"); } // Check the class index (if set) if (original.classIndex() >= 0) { if (data.classIndex() != original.classIndex()) { throw new Exception("Filter must not change the class attribute!"); } } // check the order for (int i = 0; i < original.numAttributes(); i++) { if (!data.attribute(i).name().equals(original.attribute(i).name())) { throw new Exception("Filter must not alter the order of the attributes!"); } } // can the evaluator handle this data? ((ASEvaluation)getSubsetEvaluator()).getCapabilities().testWithFail(data); m_filteredInstances = data.stringFreeStructure(); ((ASEvaluation)m_evaluator).buildEvaluator(data); } /** * evaluates a subset of attributes * * @param subset a bitset representing the attribute subset to be * evaluated * @return the "merit" of the subset * @exception Exception if the subset could not be evaluated */ public double evaluateSubset(BitSet subset) throws Exception { return m_evaluator.evaluateSubset(subset); } /** * Describe the attribute evaluator * @return a description of the attribute evaluator as a string */ public String toString() { StringBuffer text = new StringBuffer(); if (m_filteredInstances == null) { text.append("Filtered attribute evaluator has not been built"); } else { text.append("Filtered Attribute Evaluator"); text.append("\nFilter: " + getFilterSpec()); text.append("\nAttribute evaluator: " + getEvaluatorSpec()); text.append("\n\nFiltered header:\n"); text.append(m_filteredInstances); } text.append("\n"); return text.toString(); } // ============ // Test method. // ============ /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new FilteredSubsetEval(), args); } }
11,879
28.849246
101
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/GainRatioAttributeEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GainRatioAttributeEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ContingencyTables; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; /** * <!-- globalinfo-start --> GainRatioAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by measuring the gain ratio with respect * to the class.<br/> * <br/> * GainR(Class, Attribute) = (H(Class) - H(Class | Attribute)) / H(Attribute).<br/> * <p/> * <!-- globalinfo-end --> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M * treat missing values as a seperate value. * </pre> * * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 9690 $ * @see Discretize */ public class GainRatioAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler { /** for serialization */ static final long serialVersionUID = -8504656625598579926L; /** The training instances */ private Instances m_trainInstances; /** The class index */ private int m_classIndex; /** The number of attributes */ private int m_numAttribs; /** The number of instances */ private int m_numInstances; /** The number of classes */ private int m_numClasses; /** Merge missing values */ private boolean m_missing_merge; /** * Returns a string describing this attribute evaluator * * @return a description of the evaluator suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "GainRatioAttributeEval :\n\nEvaluates the worth of an attribute " + "by measuring the gain ratio with respect to the class.\n\n" + "GainR(Class, Attribute) = (H(Class) - H(Class | Attribute)) / " + "H(Attribute).\n"; } /** * Constructor */ public GainRatioAttributeEval() { resetOptions(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. **/ @Override public Enumeration listOptions() { Vector newVector = new Vector(1); newVector.addElement(new Option("\ttreat missing values as a seperate " + "value.", "M", 0, "-M")); return newVector.elements(); } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -M * treat missing values as a seperate value. * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported **/ @Override public void setOptions(String[] options) throws Exception { resetOptions(); setMissingMerge(!(Utils.getFlag('M', options))); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String missingMergeTipText() { return "Distribute counts for missing values. Counts are distributed " + "across other values in proportion to their frequency. Otherwise, " + "missing is treated as a separate value."; } /** * distribute the counts for missing values across observed values * * @param b true=distribute missing values. */ public void setMissingMerge(boolean b) { m_missing_merge = b; } /** * get whether missing values are being distributed or not * * @return true if missing values are being distributed. */ public boolean getMissingMerge() { return m_missing_merge; } /** * Gets the current settings of WrapperSubsetEval. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { String[] options = new String[1]; int current = 0; if (!getMissingMerge()) { options[current++] = "-M"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Initializes a gain ratio attribute evaluator. Discretizes all attributes * that are numeric. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been generated successfully */ @Override public void buildEvaluator(Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); m_trainInstances = data; m_classIndex = m_trainInstances.classIndex(); m_numAttribs = m_trainInstances.numAttributes(); m_numInstances = m_trainInstances.numInstances(); Discretize disTransform = new Discretize(); disTransform.setUseBetterEncoding(true); disTransform.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, disTransform); m_numClasses = m_trainInstances.attribute(m_classIndex).numValues(); } /** * reset options to default values */ protected void resetOptions() { m_trainInstances = null; m_missing_merge = true; } /** * evaluates an individual attribute by measuring the gain ratio of the class * given the attribute. * * @param attribute the index of the attribute to be evaluated * @return the gain ratio * @throws Exception if the attribute could not be evaluated */ @Override public double evaluateAttribute(int attribute) throws Exception { int i, j, ii, jj; int ni, nj; double sum = 0.0; ni = m_trainInstances.attribute(attribute).numValues() + 1; nj = m_numClasses + 1; double[] sumi, sumj; Instance inst; double temp = 0.0; sumi = new double[ni]; sumj = new double[nj]; double[][] counts = new double[ni][nj]; sumi = new double[ni]; sumj = new double[nj]; for (i = 0; i < ni; i++) { sumi[i] = 0.0; for (j = 0; j < nj; j++) { sumj[j] = 0.0; counts[i][j] = 0.0; } } // Fill the contingency table for (i = 0; i < m_numInstances; i++) { inst = m_trainInstances.instance(i); if (inst.isMissing(attribute)) { ii = ni - 1; } else { ii = (int) inst.value(attribute); } if (inst.isMissing(m_classIndex)) { jj = nj - 1; } else { jj = (int) inst.value(m_classIndex); } counts[ii][jj] += inst.weight(); } // get the row totals for (i = 0; i < ni; i++) { sumi[i] = 0.0; for (j = 0; j < nj; j++) { sumi[i] += counts[i][j]; sum += counts[i][j]; } } // get the column totals for (j = 0; j < nj; j++) { sumj[j] = 0.0; for (i = 0; i < ni; i++) { sumj[j] += counts[i][j]; } } // distribute missing counts if (m_missing_merge && (sumi[ni - 1] < sum) && (sumj[nj - 1] < sum)) { double[] i_copy = new double[sumi.length]; double[] j_copy = new double[sumj.length]; double[][] counts_copy = new double[sumi.length][sumj.length]; for (i = 0; i < ni; i++) { System.arraycopy(counts[i], 0, counts_copy[i], 0, sumj.length); } System.arraycopy(sumi, 0, i_copy, 0, sumi.length); System.arraycopy(sumj, 0, j_copy, 0, sumj.length); double total_missing = (sumi[ni - 1] + sumj[nj - 1] - counts[ni - 1][nj - 1]); // do the missing i's if (sumi[ni - 1] > 0.0) { for (j = 0; j < nj - 1; j++) { if (counts[ni - 1][j] > 0.0) { for (i = 0; i < ni - 1; i++) { temp = ((i_copy[i] / (sum - i_copy[ni - 1])) * counts[ni - 1][j]); counts[i][j] += temp; sumi[i] += temp; } counts[ni - 1][j] = 0.0; } } } sumi[ni - 1] = 0.0; // do the missing j's if (sumj[nj - 1] > 0.0) { for (i = 0; i < ni - 1; i++) { if (counts[i][nj - 1] > 0.0) { for (j = 0; j < nj - 1; j++) { temp = ((j_copy[j] / (sum - j_copy[nj - 1])) * counts[i][nj - 1]); counts[i][j] += temp; sumj[j] += temp; } counts[i][nj - 1] = 0.0; } } } sumj[nj - 1] = 0.0; // do the both missing if (counts[ni - 1][nj - 1] > 0.0 && total_missing < sum) { for (i = 0; i < ni - 1; i++) { for (j = 0; j < nj - 1; j++) { temp = (counts_copy[i][j] / (sum - total_missing)) * counts_copy[ni - 1][nj - 1]; counts[i][j] += temp; sumi[i] += temp; sumj[j] += temp; } } counts[ni - 1][nj - 1] = 0.0; } } return ContingencyTables.gainRatio(counts); } /** * Return a description of the evaluator * * @return description as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); if (m_trainInstances == null) { text.append("\tGain Ratio evaluator has not been built"); } else { text.append("\tGain Ratio feature evaluator"); if (!m_missing_merge) { text.append("\n\tMissing values treated as seperate"); } } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9690 $"); } /** * Main method. * * @param args the options -t training file */ public static void main(String[] args) { runEvaluator(new GainRatioAttributeEval(), args); } }
11,276
25.164733
84
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/GeneticSearch.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * GeneticSearch.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.io.Serializable; import java.util.BitSet; import java.util.Enumeration; import java.util.Hashtable; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * GeneticSearch:<br/> * <br/> * Performs a search using the simple genetic algorithm described in Goldberg (1989).<br/> * <br/> * For more information see:<br/> * <br/> * David E. Goldberg (1989). Genetic algorithms in search, optimization and machine learning. Addison-Wesley. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;book{Goldberg1989, * author = {David E. Goldberg}, * publisher = {Addison-Wesley}, * title = {Genetic algorithms in search, optimization and machine learning}, * year = {1989}, * ISBN = {0201157675} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.If supplied, the starting set becomes * one member of the initial random * population.</pre> * * <pre> -Z &lt;population size&gt; * Set the size of the population (even number). * (default = 20).</pre> * * <pre> -G &lt;number of generations&gt; * Set the number of generations. * (default = 20)</pre> * * <pre> -C &lt;probability of crossover&gt; * Set the probability of crossover. * (default = 0.6)</pre> * * <pre> -M &lt;probability of mutation&gt; * Set the probability of mutation. * (default = 0.033)</pre> * * <pre> -R &lt;report frequency&gt; * Set frequency of generation reports. * e.g, setting the value to 5 will * report every 5th generation * (default = number of generations)</pre> * * <pre> -S &lt;seed&gt; * Set the random number seed. * (default = 1)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 6759 $ */ public class GeneticSearch extends ASSearch implements StartSetHandler, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1618264232838472679L; /** * holds a starting set as an array of attributes. Becomes one member of the * initial random population */ private int[] m_starting; /** holds the start set for the search as a Range */ private Range m_startRange; /** does the data have a class */ private boolean m_hasClass; /** holds the class index */ private int m_classIndex; /** number of attributes in the data */ private int m_numAttribs; /** the current population */ private GABitSet [] m_population; /** the number of individual solutions */ private int m_popSize; /** the best population member found during the search */ private GABitSet m_best; /** the number of features in the best population member */ private int m_bestFeatureCount; /** the number of entries to cache for lookup */ private int m_lookupTableSize; /** the lookup table */ private Hashtable m_lookupTable; /** random number generation */ private Random m_random; /** seed for random number generation */ private int m_seed; /** the probability of crossover occuring */ private double m_pCrossover; /** the probability of mutation occuring */ private double m_pMutation; /** sum of the current population fitness */ private double m_sumFitness; private double m_maxFitness; private double m_minFitness; private double m_avgFitness; /** the maximum number of generations to evaluate */ private int m_maxGenerations; /** how often reports are generated */ private int m_reportFrequency; /** holds the generation reports */ private StringBuffer m_generationReports; // Inner class /** * A bitset for the genetic algorithm */ protected class GABitSet implements Cloneable, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -2930607837482622224L; /** the bitset */ private BitSet m_chromosome; /** holds raw merit */ private double m_objective = -Double.MAX_VALUE; /** the fitness */ private double m_fitness; /** * Constructor */ public GABitSet () { m_chromosome = new BitSet(); } /** * makes a copy of this GABitSet * @return a copy of the object * @throws CloneNotSupportedException if something goes wrong */ public Object clone() throws CloneNotSupportedException { GABitSet temp = new GABitSet(); temp.setObjective(this.getObjective()); temp.setFitness(this.getFitness()); temp.setChromosome((BitSet)(this.m_chromosome.clone())); return temp; //return super.clone(); } /** * sets the objective merit value * @param objective the objective value of this population member */ public void setObjective(double objective) { m_objective = objective; } /** * gets the objective merit * @return the objective merit of this population member */ public double getObjective() { return m_objective; } /** * sets the scaled fitness * @param fitness the scaled fitness of this population member */ public void setFitness(double fitness) { m_fitness = fitness; } /** * gets the scaled fitness * @return the scaled fitness of this population member */ public double getFitness() { return m_fitness; } /** * get the chromosome * @return the chromosome of this population member */ public BitSet getChromosome() { return m_chromosome; } /** * set the chromosome * @param c the chromosome to be set for this population member */ public void setChromosome(BitSet c) { m_chromosome = c; } /** * unset a bit in the chromosome * @param bit the bit to be cleared */ public void clear(int bit) { m_chromosome.clear(bit); } /** * set a bit in the chromosome * @param bit the bit to be set */ public void set(int bit) { m_chromosome.set(bit); } /** * get the value of a bit in the chromosome * @param bit the bit to query * @return the value of the bit */ public boolean get(int bit) { return m_chromosome.get(bit); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6759 $"); } } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(6); newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7." +"If supplied, the starting set becomes" +"\n\tone member of the initial random" +"\n\tpopulation." ,"P",1 , "-P <start set>")); newVector.addElement(new Option("\tSet the size of the population (even number)." +"\n\t(default = 20)." , "Z", 1 , "-Z <population size>")); newVector.addElement(new Option("\tSet the number of generations." +"\n\t(default = 20)" , "G", 1, "-G <number of generations>")); newVector.addElement(new Option("\tSet the probability of crossover." +"\n\t(default = 0.6)" , "C", 1, "-C <probability of" +" crossover>")); newVector.addElement(new Option("\tSet the probability of mutation." +"\n\t(default = 0.033)" , "M", 1, "-M <probability of mutation>")); newVector.addElement(new Option("\tSet frequency of generation reports." +"\n\te.g, setting the value to 5 will " +"\n\treport every 5th generation" +"\n\t(default = number of generations)" , "R", 1, "-R <report frequency>")); newVector.addElement(new Option("\tSet the random number seed." +"\n\t(default = 1)" , "S", 1, "-S <seed>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.If supplied, the starting set becomes * one member of the initial random * population.</pre> * * <pre> -Z &lt;population size&gt; * Set the size of the population (even number). * (default = 20).</pre> * * <pre> -G &lt;number of generations&gt; * Set the number of generations. * (default = 20)</pre> * * <pre> -C &lt;probability of crossover&gt; * Set the probability of crossover. * (default = 0.6)</pre> * * <pre> -M &lt;probability of mutation&gt; * Set the probability of mutation. * (default = 0.033)</pre> * * <pre> -R &lt;report frequency&gt; * Set frequency of generation reports. * e.g, setting the value to 5 will * report every 5th generation * (default = number of generations)</pre> * * <pre> -S &lt;seed&gt; * Set the random number seed. * (default = 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported * **/ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setStartSet(optionString); } optionString = Utils.getOption('Z', options); if (optionString.length() != 0) { setPopulationSize(Integer.parseInt(optionString)); } optionString = Utils.getOption('G', options); if (optionString.length() != 0) { setMaxGenerations(Integer.parseInt(optionString)); setReportFrequency(Integer.parseInt(optionString)); } optionString = Utils.getOption('C', options); if (optionString.length() != 0) { setCrossoverProb((new Double(optionString)).doubleValue()); } optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMutationProb((new Double(optionString)).doubleValue()); } optionString = Utils.getOption('R', options); if (optionString.length() != 0) { setReportFrequency(Integer.parseInt(optionString)); } optionString = Utils.getOption('S', options); if (optionString.length() != 0) { setSeed(Integer.parseInt(optionString)); } } /** * Gets the current settings of ReliefFAttributeEval. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[14]; int current = 0; if (!(getStartSet().equals(""))) { options[current++] = "-P"; options[current++] = ""+startSetToString(); } options[current++] = "-Z"; options[current++] = "" + getPopulationSize(); options[current++] = "-G"; options[current++] = "" + getMaxGenerations(); options[current++] = "-C"; options[current++] = "" + getCrossoverProb(); options[current++] = "-M"; options[current++] = "" + getMutationProb(); options[current++] = "-R"; options[current++] = "" + getReportFrequency(); options[current++] = "-S"; options[current++] = "" + getSeed(); while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String startSetTipText() { return "Set a start point for the search. This is specified as a comma " +"seperated list off attribute indexes starting at 1. It can include " +"ranges. Eg. 1,2,5-9,17. The start set becomes one of the population " +"members of the initial population."; } /** * Sets a starting set of attributes for the search. It is the * search method's responsibility to report this start set (if any) * in its toString() method. * @param startSet a string containing a list of attributes (and or ranges), * eg. 1,2,6,10-15. * @throws Exception if start set can't be set. */ public void setStartSet (String startSet) throws Exception { m_startRange.setRanges(startSet); } /** * Returns a list of attributes (and or attribute ranges) as a String * @return a list of attributes (and or attribute ranges) */ public String getStartSet () { return m_startRange.getRanges(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "Set the random seed."; } /** * set the seed for random number generation * @param s seed value */ public void setSeed(int s) { m_seed = s; } /** * get the value of the random number generator's seed * @return the seed for random number generation */ public int getSeed() { return m_seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String reportFrequencyTipText() { return "Set how frequently reports are generated. Default is equal to " +"the number of generations meaning that a report will be printed for " +"initial and final generations. Setting the value to 5 will result in " +"a report being printed every 5 generations."; } /** * set how often reports are generated * @param f generate reports every f generations */ public void setReportFrequency(int f) { m_reportFrequency = f; } /** * get how often repports are generated * @return how often reports are generated */ public int getReportFrequency() { return m_reportFrequency; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String mutationProbTipText() { return "Set the probability of mutation occuring."; } /** * set the probability of mutation * @param m the probability for mutation occuring */ public void setMutationProb(double m) { m_pMutation = m; } /** * get the probability of mutation * @return the probability of mutation occuring */ public double getMutationProb() { return m_pMutation; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String crossoverProbTipText() { return "Set the probability of crossover. This is the probability that " +"two population members will exchange genetic material."; } /** * set the probability of crossover * @param c the probability that two population members will exchange * genetic material */ public void setCrossoverProb(double c) { m_pCrossover = c; } /** * get the probability of crossover * @return the probability of crossover */ public double getCrossoverProb() { return m_pCrossover; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxGenerationsTipText() { return "Set the number of generations to evaluate."; } /** * set the number of generations to evaluate * @param m the number of generations */ public void setMaxGenerations(int m) { m_maxGenerations = m; } /** * get the number of generations * @return the maximum number of generations */ public int getMaxGenerations() { return m_maxGenerations; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String populationSizeTipText() { return "Set the population size (even number), this is the number of individuals " +"(attribute sets) in the population."; } /** * set the population size * @param p the size of the population */ public void setPopulationSize(int p) { if (p % 2 == 0) m_popSize = p; else System.out.println("Population size needs to be an even number!"); } /** * get the size of the population * @return the population size */ public int getPopulationSize() { return m_popSize; } /** * Returns a string describing this search method * @return a description of the search suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "GeneticSearch:\n\nPerforms a search using the simple genetic " + "algorithm described in Goldberg (1989).\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.BOOK); result.setValue(Field.AUTHOR, "David E. Goldberg"); result.setValue(Field.YEAR, "1989"); result.setValue(Field.TITLE, "Genetic algorithms in search, optimization and machine learning"); result.setValue(Field.ISBN, "0201157675"); result.setValue(Field.PUBLISHER, "Addison-Wesley"); return result; } /** * Constructor. Make a new GeneticSearch object */ public GeneticSearch() { resetOptions(); } /** * converts the array of starting attributes to a string. This is * used by getOptions to return the actual attributes specified * as the starting set. This is better than using m_startRanges.getRanges() * as the same start set can be specified in different ways from the * command line---eg 1,2,3 == 1-3. This is to ensure that stuff that * is stored in a database is comparable. * @return a comma seperated list of individual attribute numbers as a String */ private String startSetToString() { StringBuffer FString = new StringBuffer(); boolean didPrint; if (m_starting == null) { return getStartSet(); } for (int i = 0; i < m_starting.length; i++) { didPrint = false; if ((m_hasClass == false) || (m_hasClass == true && i != m_classIndex)) { FString.append((m_starting[i] + 1)); didPrint = true; } if (i == (m_starting.length - 1)) { FString.append(""); } else { if (didPrint) { FString.append(","); } } } return FString.toString(); } /** * returns a description of the search * @return a description of the search as a String */ public String toString() { StringBuffer GAString = new StringBuffer(); GAString.append("\tGenetic search.\n\tStart set: "); if (m_starting == null) { GAString.append("no attributes\n"); } else { GAString.append(startSetToString()+"\n"); } GAString.append("\tPopulation size: "+m_popSize); GAString.append("\n\tNumber of generations: "+m_maxGenerations); GAString.append("\n\tProbability of crossover: " +Utils.doubleToString(m_pCrossover,6,3)); GAString.append("\n\tProbability of mutation: " +Utils.doubleToString(m_pMutation,6,3)); GAString.append("\n\tReport frequency: "+m_reportFrequency); GAString.append("\n\tRandom number seed: "+m_seed+"\n"); GAString.append(m_generationReports.toString()); return GAString.toString(); } /** * Searches the attribute subset space using a genetic algorithm. * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { m_best = null; m_generationReports = new StringBuffer(); if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } if (ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; } else { m_hasClass = true; m_classIndex = data.classIndex(); } SubsetEvaluator ASEvaluator = (SubsetEvaluator)ASEval; m_numAttribs = data.numAttributes(); m_startRange.setUpper(m_numAttribs-1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } // initial random population m_lookupTable = new Hashtable(m_lookupTableSize); m_random = new Random(m_seed); m_population = new GABitSet [m_popSize]; // set up random initial population initPopulation(); evaluatePopulation(ASEvaluator); populationStatistics(); scalePopulation(); checkBest(); m_generationReports.append(populationReport(0)); boolean converged; for (int i=1;i<=m_maxGenerations;i++) { generation(); evaluatePopulation(ASEvaluator); populationStatistics(); scalePopulation(); // find the best pop member and check for convergence converged = checkBest(); if ((i == m_maxGenerations) || ((i % m_reportFrequency) == 0) || (converged == true)) { m_generationReports.append(populationReport(i)); if (converged == true) { break; } } } return attributeList(m_best.getChromosome()); } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ private int[] attributeList (BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * checks to see if any population members in the current * population are better than the best found so far. Also checks * to see if the search has converged---that is there is no difference * in fitness between the best and worse population member * @return true is the search has converged * @throws Exception if something goes wrong */ private boolean checkBest() throws Exception { int i,count,lowestCount = m_numAttribs; double b = -Double.MAX_VALUE; GABitSet localbest = null; BitSet temp; boolean converged = false; int oldcount = Integer.MAX_VALUE; if (m_maxFitness - m_minFitness > 0) { // find the best in this population for (i=0;i<m_popSize;i++) { if (m_population[i].getObjective() > b) { b = m_population[i].getObjective(); localbest = m_population[i]; oldcount = countFeatures(localbest.getChromosome()); } else if (Utils.eq(m_population[i].getObjective(), b)) { // see if it contains fewer features count = countFeatures(m_population[i].getChromosome()); if (count < oldcount) { b = m_population[i].getObjective(); localbest = m_population[i]; oldcount = count; } } } } else { // look for the smallest subset for (i=0;i<m_popSize;i++) { temp = m_population[i].getChromosome(); count = countFeatures(temp);; if (count < lowestCount) { lowestCount = count; localbest = m_population[i]; b = localbest.getObjective(); } } converged = true; } // count the number of features in localbest count = 0; temp = localbest.getChromosome(); count = countFeatures(temp); // compare to the best found so far if (m_best == null) { m_best = (GABitSet)localbest.clone(); m_bestFeatureCount = count; } else if (b > m_best.getObjective()) { m_best = (GABitSet)localbest.clone(); m_bestFeatureCount = count; } else if (Utils.eq(m_best.getObjective(), b)) { // see if the localbest has fewer features than the best so far if (count < m_bestFeatureCount) { m_best = (GABitSet)localbest.clone(); m_bestFeatureCount = count; } } return converged; } /** * counts the number of features in a subset * @param featureSet the feature set for which to count the features * @return the number of features in the subset */ private int countFeatures(BitSet featureSet) { int count = 0; for (int i=0;i<m_numAttribs;i++) { if (featureSet.get(i)) { count++; } } return count; } /** * performs a single generation---selection, crossover, and mutation * @throws Exception if an error occurs */ private void generation () throws Exception { int i,j=0; double best_fit = -Double.MAX_VALUE; int old_count = 0; int count; GABitSet [] newPop = new GABitSet [m_popSize]; int parent1,parent2; /** first ensure that the population best is propogated into the new generation */ for (i=0;i<m_popSize;i++) { if (m_population[i].getFitness() > best_fit) { j = i; best_fit = m_population[i].getFitness(); old_count = countFeatures(m_population[i].getChromosome()); } else if (Utils.eq(m_population[i].getFitness(), best_fit)) { count = countFeatures(m_population[i].getChromosome()); if (count < old_count) { j = i; best_fit = m_population[i].getFitness(); old_count = count; } } } newPop[0] = (GABitSet)(m_population[j].clone()); newPop[1] = newPop[0]; for (j=2;j<m_popSize;j+=2) { parent1 = select(); parent2 = select(); newPop[j] = (GABitSet)(m_population[parent1].clone()); newPop[j+1] = (GABitSet)(m_population[parent2].clone()); // if parents are equal mutate one bit if (parent1 == parent2) { int r; if (m_hasClass) { while ((r = m_random.nextInt(m_numAttribs)) == m_classIndex); } else { r = m_random.nextInt(m_numAttribs); } if (newPop[j].get(r)) { newPop[j].clear(r); } else { newPop[j].set(r); } } else { // crossover double r = m_random.nextDouble(); if (m_numAttribs >= 3) { if (r < m_pCrossover) { // cross point int cp = Math.abs(m_random.nextInt()); cp %= (m_numAttribs-2); cp ++; for (i=0;i<cp;i++) { if (m_population[parent1].get(i)) { newPop[j+1].set(i); } else { newPop[j+1].clear(i); } if (m_population[parent2].get(i)) { newPop[j].set(i); } else { newPop[j].clear(i); } } } } // mutate for (int k=0;k<2;k++) { for (i=0;i<m_numAttribs;i++) { r = m_random.nextDouble(); if (r < m_pMutation) { if (m_hasClass && (i == m_classIndex)) { // ignore class attribute } else { if (newPop[j+k].get(i)) { newPop[j+k].clear(i); } else { newPop[j+k].set(i); } } } } } } } m_population = newPop; } /** * selects a population member to be considered for crossover * @return the index of the selected population member */ private int select() { int i; double r,partsum; partsum = 0; r = m_random.nextDouble() * m_sumFitness; for (i=0;i<m_popSize;i++) { partsum += m_population[i].getFitness(); if (partsum >= r || (i == m_popSize - 1)) { break; } } // if none was found, take first if (i == m_popSize) i = 0; return i; } /** * evaluates an entire population. Population members are looked up in * a hash table and if they are not found then they are evaluated using * ASEvaluator. * @param ASEvaluator the subset evaluator to use for evaluating population * members * @throws Exception if something goes wrong during evaluation */ private void evaluatePopulation (SubsetEvaluator ASEvaluator) throws Exception { int i; double merit; for (i=0;i<m_popSize;i++) { // if its not in the lookup table then evaluate and insert if (m_lookupTable.containsKey(m_population[i] .getChromosome()) == false) { merit = ASEvaluator.evaluateSubset(m_population[i].getChromosome()); m_population[i].setObjective(merit); m_lookupTable.put(m_population[i].getChromosome(),m_population[i]); } else { GABitSet temp = (GABitSet)m_lookupTable. get(m_population[i].getChromosome()); m_population[i].setObjective(temp.getObjective()); } } } /** * creates random population members for the initial population. Also * sets the first population member to be a start set (if any) * provided by the user * @throws Exception if the population can't be created */ private void initPopulation () throws Exception { int i,j,bit; int num_bits; boolean ok; int start = 0; // add the start set as the first population member (if specified) if (m_starting != null) { m_population[0] = new GABitSet(); for (i=0;i<m_starting.length;i++) { if ((m_starting[i]) != m_classIndex) { m_population[0].set(m_starting[i]); } } start = 1; } for (i=start;i<m_popSize;i++) { m_population[i] = new GABitSet(); num_bits = m_random.nextInt(); num_bits = num_bits % m_numAttribs-1; if (num_bits < 0) { num_bits *= -1; } if (num_bits == 0) { num_bits = 1; } for (j=0;j<num_bits;j++) { ok = false; do { bit = m_random.nextInt(); if (bit < 0) { bit *= -1; } bit = bit % m_numAttribs; if (m_hasClass) { if (bit != m_classIndex) { ok = true; } } else { ok = true; } } while (!ok); if (bit > m_numAttribs) { throw new Exception("Problem in population init"); } m_population[i].set(bit); } } } /** * calculates summary statistics for the current population */ private void populationStatistics() { int i; m_sumFitness = m_minFitness = m_maxFitness = m_population[0].getObjective(); for (i=1;i<m_popSize;i++) { m_sumFitness += m_population[i].getObjective(); if (m_population[i].getObjective() > m_maxFitness) { m_maxFitness = m_population[i].getObjective(); } else if (m_population[i].getObjective() < m_minFitness) { m_minFitness = m_population[i].getObjective(); } } m_avgFitness = (m_sumFitness / m_popSize); } /** * scales the raw (objective) merit of the population members */ private void scalePopulation() { int j; double a = 0; double b = 0; double fmultiple = 2.0; double delta; // prescale if (m_minFitness > ((fmultiple * m_avgFitness - m_maxFitness) / (fmultiple - 1.0))) { delta = m_maxFitness - m_avgFitness; a = ((fmultiple - 1.0) * m_avgFitness / delta); b = m_avgFitness * (m_maxFitness - fmultiple * m_avgFitness) / delta; } else { delta = m_avgFitness - m_minFitness; a = m_avgFitness / delta; b = -m_minFitness * m_avgFitness / delta; } // scalepop m_sumFitness = 0; for (j=0;j<m_popSize;j++) { if (a == Double.POSITIVE_INFINITY || a == Double.NEGATIVE_INFINITY || b == Double.POSITIVE_INFINITY || b == Double.NEGATIVE_INFINITY) { m_population[j].setFitness(m_population[j].getObjective()); } else { m_population[j]. setFitness(Math.abs((a * m_population[j].getObjective() + b))); } m_sumFitness += m_population[j].getFitness(); } } /** * generates a report on the current population * @return a report as a String */ private String populationReport (int genNum) { int i; StringBuffer temp = new StringBuffer(); if (genNum == 0) { temp.append("\nInitial population\n"); } else { temp.append("\nGeneration: "+genNum+"\n"); } temp.append("merit \tscaled \tsubset\n"); for (i=0;i<m_popSize;i++) { temp.append(Utils.doubleToString(Math. abs(m_population[i].getObjective()), 8,5) +"\t" +Utils.doubleToString(m_population[i].getFitness(), 8,5) +"\t"); temp.append(printPopMember(m_population[i].getChromosome())+"\n"); } return temp.toString(); } /** * prints a population member as a series of attribute numbers * @param temp the chromosome of a population member * @return a population member as a String of attribute numbers */ private String printPopMember(BitSet temp) { StringBuffer text = new StringBuffer(); for (int j=0;j<m_numAttribs;j++) { if (temp.get(j)) { text.append((j+1)+" "); } } return text.toString(); } /** * prints a population member's chromosome * @param temp the chromosome of a population member * @return a population member's chromosome as a String */ private String printPopChrom(BitSet temp) { StringBuffer text = new StringBuffer(); for (int j=0;j<m_numAttribs;j++) { if (temp.get(j)) { text.append("1"); } else { text.append("0"); } } return text.toString(); } /** * reset to default values for options */ private void resetOptions () { m_population = null; m_popSize = 20; m_lookupTableSize = 1001; m_pCrossover = 0.6; m_pMutation = 0.033; m_maxGenerations = 20; m_reportFrequency = m_maxGenerations; m_starting = null; m_startRange = new Range(); m_seed = 1; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6759 $"); } }
37,408
27.622035
109
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/GreedyStepwise.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GreedyStepwise.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.ArrayList; import java.util.BitSet; import java.util.Enumeration; import java.util.List; import java.util.Vector; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * GreedyStepwise :<br/> * <br/> * Performs a greedy forward or backward search through the space of attribute subsets. May start with no/all attributes or from an arbitrary point in the space. Stops when the addition/deletion of any remaining attributes results in a decrease in evaluation. Can also produce a ranked list of attributes by traversing the space from one side to the other and recording the order that attributes are selected.<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C * Use conservative forward search</pre> * * <pre> -B * Use a backward search instead of a * forward one.</pre> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -R * Produce a ranked list of attributes.</pre> * * <pre> -T &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking. * Use in conjuction with -R</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to select</pre> * * <pre> -num-slots &lt;int&gt; * The number of execution slots, for example, the number of cores in the CPU. (default 1) * </pre> * * <pre> -D * Print debugging output</pre> * <!-- options-end --> * * @author Mark Hall * @version $Revision: 9848 $ */ public class GreedyStepwise extends ASSearch implements RankedOutputSearch, StartSetHandler, OptionHandler { /** for serialization */ static final long serialVersionUID = -6312951970168325471L; /** does the data have a class */ protected boolean m_hasClass; /** holds the class index */ protected int m_classIndex; /** number of attributes in the data */ protected int m_numAttribs; /** true if the user has requested a ranked list of attributes */ protected boolean m_rankingRequested; /** * go from one side of the search space to the other in order to generate a * ranking */ protected boolean m_doRank; /** used to indicate whether or not ranking has been performed */ protected boolean m_doneRanking; /** * A threshold by which to discard attributes---used by the AttributeSelection * module */ protected double m_threshold; /** * The number of attributes to select. -1 indicates that all attributes are to * be retained. Has precedence over m_threshold */ protected int m_numToSelect = -1; protected int m_calculatedNumToSelect; /** the merit of the best subset found */ protected double m_bestMerit; /** a ranked list of attribute indexes */ protected double[][] m_rankedAtts; protected int m_rankedSoFar; /** the best subset found */ protected BitSet m_best_group; protected ASEvaluation m_ASEval; protected Instances m_Instances; /** holds the start set for the search as a Range */ protected Range m_startRange; /** holds an array of starting attributes */ protected int[] m_starting; /** Use a backwards search instead of a forwards one */ protected boolean m_backward = false; /** * If set then attributes will continue to be added during a forward search as * long as the merit does not degrade */ protected boolean m_conservativeSelection = false; /** Print debugging output */ protected boolean m_debug = false; protected int m_poolSize = 1; /** Thread pool */ protected transient ExecutorService m_pool = null; /** * Constructor */ public GreedyStepwise() { m_threshold = -Double.MAX_VALUE; m_doneRanking = false; m_startRange = new Range(); m_starting = null; resetOptions(); } /** * Returns a string describing this search method * * @return a description of the search suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "GreedyStepwise :\n\nPerforms a greedy forward or backward search " + "through " + "the space of attribute subsets. May start with no/all attributes or from " + "an arbitrary point in the space. Stops when the addition/deletion of any " + "remaining attributes results in a decrease in evaluation. " + "Can also produce a ranked list of " + "attributes by traversing the space from one side to the other and " + "recording the order that attributes are selected.\n"; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String searchBackwardsTipText() { return "Search backwards rather than forwards."; } /** * Set whether to search backwards instead of forwards * * @param back true to search backwards */ public void setSearchBackwards(boolean back) { m_backward = back; if (m_backward) { setGenerateRanking(false); } } /** * Get whether to search backwards * * @return true if the search will proceed backwards */ public boolean getSearchBackwards() { return m_backward; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String thresholdTipText() { return "Set threshold by which attributes can be discarded. Default value " + "results in no attributes being discarded. Use in conjunction with " + "generateRanking"; } /** * Set the threshold by which the AttributeSelection module can discard * attributes. * * @param threshold the threshold. */ @Override public void setThreshold(double threshold) { m_threshold = threshold; } /** * Returns the threshold so that the AttributeSelection module can discard * attributes from the ranking. */ @Override public double getThreshold() { return m_threshold; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numToSelectTipText() { return "Specify the number of attributes to retain. The default value " + "(-1) indicates that all attributes are to be retained. Use either " + "this option or a threshold to reduce the attribute set."; } /** * Specify the number of attributes to select from the ranked list (if * generating a ranking). -1 indicates that all attributes are to be retained. * * @param n the number of attributes to retain */ @Override public void setNumToSelect(int n) { m_numToSelect = n; } /** * Gets the number of attributes to be retained. * * @return the number of attributes to retain */ @Override public int getNumToSelect() { return m_numToSelect; } /** * Gets the calculated number of attributes to retain. This is the actual * number of attributes to retain. This is the same as getNumToSelect if the * user specifies a number which is not less than zero. Otherwise it should be * the number of attributes in the (potentially transformed) data. */ @Override public int getCalculatedNumToSelect() { if (m_numToSelect >= 0) { m_calculatedNumToSelect = m_numToSelect; } return m_calculatedNumToSelect; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String generateRankingTipText() { return "Set to true if a ranked list is required."; } /** * Records whether the user has requested a ranked list of attributes. * * @param doRank true if ranking is requested */ @Override public void setGenerateRanking(boolean doRank) { m_rankingRequested = doRank; } /** * Gets whether ranking has been requested. This is used by the * AttributeSelection module to determine if rankedAttributes() should be * called. * * @return true if ranking has been requested. */ @Override public boolean getGenerateRanking() { return m_rankingRequested; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String startSetTipText() { return "Set the start point for the search. This is specified as a comma " + "seperated list off attribute indexes starting at 1. It can include " + "ranges. Eg. 1,2,5-9,17."; } /** * Sets a starting set of attributes for the search. It is the search method's * responsibility to report this start set (if any) in its toString() method. * * @param startSet a string containing a list of attributes (and or ranges), * eg. 1,2,6,10-15. * @throws Exception if start set can't be set. */ @Override public void setStartSet(String startSet) throws Exception { m_startRange.setRanges(startSet); } /** * Returns a list of attributes (and or attribute ranges) as a String * * @return a list of attributes (and or attribute ranges) */ @Override public String getStartSet() { return m_startRange.getRanges(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String conservativeForwardSelectionTipText() { return "If true (and forward search is selected) then attributes " + "will continue to be added to the best subset as long as merit does " + "not degrade."; } /** * Set whether attributes should continue to be added during a forward search * as long as merit does not decrease * * @param c true if atts should continue to be atted */ public void setConservativeForwardSelection(boolean c) { m_conservativeSelection = c; } /** * Gets whether conservative selection has been enabled * * @return true if conservative forward selection is enabled */ public boolean getConservativeForwardSelection() { return m_conservativeSelection; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String debuggingOutputTipText() { return "Output debugging information to the console"; } /** * Set whether to output debugging info to the console * * @param d true if dubugging info is to be output */ public void setDebuggingOutput(boolean d) { m_debug = d; } /** * Get whether to output debugging info to the console * * @return true if dubugging info is to be output */ public boolean getDebuggingOutput() { return m_debug; } /** * @return a string to describe the option */ public String numExecutionSlotsTipText() { return "The number of execution slots, for example, the number of cores in the CPU."; } /** * Gets the number of threads. */ public int getNumExecutionSlots() { return m_poolSize; } /** * Sets the number of threads */ public void setNumExecutionSlots(int nT) { m_poolSize = nT; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. **/ @Override public Enumeration listOptions() { Vector newVector = new Vector(5); newVector.addElement(new Option("\tUse conservative forward search", "-C", 0, "-C")); newVector.addElement(new Option("\tUse a backward search instead of a" + "\n\tforward one.", "-B", 0, "-B")); newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7.", "P", 1, "-P <start set>")); newVector.addElement(new Option("\tProduce a ranked list of attributes.", "R", 0, "-R")); newVector.addElement(new Option("\tSpecify a theshold by which attributes" + "\n\tmay be discarded from the ranking." + "\n\tUse in conjuction with -R", "T", 1, "-T <threshold>")); newVector.addElement(new Option("\tSpecify number of attributes to select", "N", 1, "-N <num to select>")); newVector.addElement(new Option("\t" + numExecutionSlotsTipText() + " (default 1)\n", "-num-slots", 1, "-num-slots <int>")); newVector.addElement(new Option("\tPrint debugging output", "D", 0, "-D")); return newVector.elements(); } /** * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C * Use conservative forward search</pre> * * <pre> -B * Use a backward search instead of a * forward one.</pre> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -R * Produce a ranked list of attributes.</pre> * * <pre> -T &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking. * Use in conjuction with -R</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to select</pre> * * <pre> -num-slots &lt;int&gt; * The number of execution slots, for example, the number of cores in the CPU. (default 1) * </pre> * * <pre> -D * Print debugging output</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String optionString; resetOptions(); setSearchBackwards(Utils.getFlag('B', options)); setConservativeForwardSelection(Utils.getFlag('C', options)); optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setStartSet(optionString); } setGenerateRanking(Utils.getFlag('R', options)); optionString = Utils.getOption('T', options); if (optionString.length() != 0) { Double temp; temp = Double.valueOf(optionString); setThreshold(temp.doubleValue()); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setNumToSelect(Integer.parseInt(optionString)); } optionString = Utils.getOption("num-slots", options); if (optionString.length() > 0) { setNumExecutionSlots(Integer.parseInt(optionString)); } setDebuggingOutput(Utils.getFlag('D', options)); } /** * Gets the current settings of ReliefFAttributeEval. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { String[] options = new String[12]; int current = 0; if (getSearchBackwards()) { options[current++] = "-B"; } if (getConservativeForwardSelection()) { options[current++] = "-C"; } if (!(getStartSet().equals(""))) { options[current++] = "-P"; options[current++] = "" + startSetToString(); } if (getGenerateRanking()) { options[current++] = "-R"; } options[current++] = "-T"; options[current++] = "" + getThreshold(); options[current++] = "-N"; options[current++] = "" + getNumToSelect(); options[current++] = "-num-slots"; options[current++] = "" + getNumExecutionSlots(); if (getDebuggingOutput()) { options[current] = "-D"; } while (current < options.length) { options[current++] = ""; } return options; } /** * converts the array of starting attributes to a string. This is used by * getOptions to return the actual attributes specified as the starting set. * This is better than using m_startRanges.getRanges() as the same start set * can be specified in different ways from the command line---eg 1,2,3 == 1-3. * This is to ensure that stuff that is stored in a database is comparable. * * @return a comma seperated list of individual attribute numbers as a String */ protected String startSetToString() { StringBuffer FString = new StringBuffer(); boolean didPrint; if (m_starting == null) { return getStartSet(); } for (int i = 0; i < m_starting.length; i++) { didPrint = false; if ((m_hasClass == false) || (m_hasClass == true && i != m_classIndex)) { FString.append((m_starting[i] + 1)); didPrint = true; } if (i == (m_starting.length - 1)) { FString.append(""); } else { if (didPrint) { FString.append(","); } } } return FString.toString(); } /** * returns a description of the search. * * @return a description of the search as a String. */ @Override public String toString() { StringBuffer FString = new StringBuffer(); FString.append("\tGreedy Stepwise (" + ((m_backward) ? "backwards)" : "forwards)") + ".\n\tStart set: "); if (m_starting == null) { if (m_backward) { FString.append("all attributes\n"); } else { FString.append("no attributes\n"); } } else { FString.append(startSetToString() + "\n"); } if (!m_doneRanking) { FString.append("\tMerit of best subset found: " + Utils.doubleToString(Math.abs(m_bestMerit), 8, 3) + "\n"); } else { if (m_backward) { FString .append("\n\tRanking is the order that attributes were removed, " + "starting \n\twith all attributes. The merit scores in the left" + "\n\tcolumn are the goodness of the remaining attributes in the" + "\n\tsubset after removing the corresponding in the right column" + "\n\tattribute from the subset.\n"); } else { FString .append("\n\tRanking is the order that attributes were added, starting " + "\n\twith no attributes. The merit scores in the left column" + "\n\tare the goodness of the subset after the adding the" + "\n\tcorresponding attribute in the right column to the subset.\n"); } } if ((m_threshold != -Double.MAX_VALUE) && (m_doneRanking)) { FString.append("\tThreshold for discarding attributes: " + Utils.doubleToString(m_threshold, 8, 4) + "\n"); } return FString.toString(); } /** * Searches the attribute subset space by forward selection. * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ @Override public int[] search(ASEvaluation ASEval, Instances data) throws Exception { int i; double best_merit = -Double.MAX_VALUE; double temp_best, temp_merit; int temp_index = 0; BitSet temp_group; boolean parallel = (m_poolSize > 1); if (parallel) { m_pool = Executors.newFixedThreadPool(m_poolSize); } if (data != null) { // this is a fresh run so reset resetOptions(); m_Instances = data; } m_ASEval = ASEval; m_numAttribs = m_Instances.numAttributes(); if (m_best_group == null) { m_best_group = new BitSet(m_numAttribs); } if (!(m_ASEval instanceof SubsetEvaluator)) { throw new Exception(m_ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } m_startRange.setUpper(m_numAttribs - 1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } if (m_ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; m_classIndex = -1; } else { m_hasClass = true; m_classIndex = m_Instances.classIndex(); } final SubsetEvaluator ASEvaluator = (SubsetEvaluator) m_ASEval; if (m_rankedAtts == null) { m_rankedAtts = new double[m_numAttribs][2]; m_rankedSoFar = 0; } // If a starting subset has been supplied, then initialise the bitset if (m_starting != null && m_rankedSoFar <= 0) { for (i = 0; i < m_starting.length; i++) { if ((m_starting[i]) != m_classIndex) { m_best_group.set(m_starting[i]); } } } else { if (m_backward && m_rankedSoFar <= 0) { for (i = 0; i < m_numAttribs; i++) { if (i != m_classIndex) { m_best_group.set(i); } } } } // Evaluate the initial subset best_merit = ASEvaluator.evaluateSubset(m_best_group); // main search loop boolean done = false; boolean addone = false; boolean z; if (m_debug && parallel) { System.err.println("Evaluating subsets in parallel..."); } while (!done) { List<Future<Double[]>> results = new ArrayList<Future<Double[]>>(); temp_group = (BitSet) m_best_group.clone(); temp_best = best_merit; if (m_doRank) { temp_best = -Double.MAX_VALUE; } done = true; addone = false; for (i = 0; i < m_numAttribs; i++) { if (m_backward) { z = ((i != m_classIndex) && (temp_group.get(i))); } else { z = ((i != m_classIndex) && (!temp_group.get(i))); } if (z) { // set/unset the bit if (m_backward) { temp_group.clear(i); } else { temp_group.set(i); } if (parallel) { final BitSet tempCopy = (BitSet) temp_group.clone(); final int attBeingEvaluated = i; // make a copy if the evaluator is not thread safe final SubsetEvaluator theEvaluator = (ASEvaluator instanceof weka.core.ThreadSafe) ? ASEvaluator : (SubsetEvaluator) ASEvaluation.makeCopies(m_ASEval, 1)[0]; Future<Double[]> future = m_pool.submit(new Callable<Double[]>() { @Override public Double[] call() throws Exception { Double[] r = new Double[2]; double e = theEvaluator.evaluateSubset(tempCopy); r[0] = new Double(attBeingEvaluated); r[1] = e; return r; } }); results.add(future); } else { temp_merit = ASEvaluator.evaluateSubset(temp_group); if (m_backward) { z = (temp_merit >= temp_best); } else { if (m_conservativeSelection) { z = (temp_merit >= temp_best); } else { z = (temp_merit > temp_best); } } if (z) { temp_best = temp_merit; temp_index = i; addone = true; done = false; } } // unset this addition/deletion if (m_backward) { temp_group.set(i); } else { temp_group.clear(i); } if (m_doRank) { done = false; } } } if (parallel) { for (int j = 0; j < results.size(); j++) { Future<Double[]> f = results.get(j); int index = f.get()[0].intValue(); temp_merit = f.get()[1].doubleValue(); if (m_backward) { z = (temp_merit >= temp_best); } else { if (m_conservativeSelection) { z = (temp_merit >= temp_best); } else { z = (temp_merit > temp_best); } } if (z) { temp_best = temp_merit; temp_index = index; addone = true; done = false; } } } if (addone) { if (m_backward) { m_best_group.clear(temp_index); } else { m_best_group.set(temp_index); } best_merit = temp_best; if (m_debug) { System.err.print("Best subset found so far: "); int[] atts = attributeList(m_best_group); for (int a : atts) { System.err.print("" + (a + 1) + " "); } System.err.println("\nMerit: " + best_merit); } m_rankedAtts[m_rankedSoFar][0] = temp_index; m_rankedAtts[m_rankedSoFar][1] = best_merit; m_rankedSoFar++; } } if (parallel) { m_pool.shutdown(); } m_bestMerit = best_merit; return attributeList(m_best_group); } /** * Produces a ranked list of attributes. Search must have been performed prior * to calling this function. Search is called by this function to complete the * traversal of the the search space. A list of attributes and merits are * returned. The attributes a ranked by the order they are added to the subset * during a forward selection search. Individual merit values reflect the * merit associated with adding the corresponding attribute to the subset; * because of this, merit values may initially increase but then decrease as * the best subset is "passed by" on the way to the far side of the search * space. * * @return an array of attribute indexes and associated merit values * @throws Exception if something goes wrong. */ @Override public double[][] rankedAttributes() throws Exception { if (m_rankedAtts == null || m_rankedSoFar == -1) { throw new Exception("Search must be performed before attributes " + "can be ranked."); } m_doRank = true; search(m_ASEval, null); double[][] final_rank = new double[m_rankedSoFar][2]; for (int i = 0; i < m_rankedSoFar; i++) { final_rank[i][0] = m_rankedAtts[i][0]; final_rank[i][1] = m_rankedAtts[i][1]; } resetOptions(); m_doneRanking = true; if (m_numToSelect > final_rank.length) { throw new Exception("More attributes requested than exist in the data"); } if (m_numToSelect <= 0) { if (m_threshold == -Double.MAX_VALUE) { m_calculatedNumToSelect = final_rank.length; } else { determineNumToSelectFromThreshold(final_rank); } } return final_rank; } private void determineNumToSelectFromThreshold(double[][] ranking) { int count = 0; for (int i = 0; i < ranking.length; i++) { if (ranking[i][1] > m_threshold) { count++; } } m_calculatedNumToSelect = count; } /** * converts a BitSet into a list of attribute indexes * * @param group the BitSet to convert * @return an array of attribute indexes **/ protected int[] attributeList(BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * Resets options */ protected void resetOptions() { m_doRank = false; m_best_group = null; m_ASEval = null; m_Instances = null; m_rankedSoFar = -1; m_rankedAtts = null; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9848 $"); } }
28,775
27.267191
414
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/HoldOutSubsetEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HoldOutSubsetEvaluator.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.BitSet; import weka.core.Instance; import weka.core.Instances; /** * Abstract attribute subset evaluator capable of evaluating subsets with * respect to a data set that is distinct from that used to initialize/ * train the subset evaluator. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class HoldOutSubsetEvaluator extends ASEvaluation implements SubsetEvaluator { /** for serialization */ private static final long serialVersionUID = 8280529785412054174L; /** * Evaluates a subset of attributes with respect to a set of instances. * @param subset a bitset representing the attribute subset to be * evaluated * @param holdOut a set of instances (possibly seperate and distinct * from those use to build/train the evaluator) with which to * evaluate the merit of the subset * @return the "merit" of the subset on the holdOut data * @exception Exception if the subset cannot be evaluated */ public abstract double evaluateSubset(BitSet subset, Instances holdOut) throws Exception; /** * Evaluates a subset of attributes with respect to a single instance. * @param subset a bitset representing the attribute subset to be * evaluated * @param holdOut a single instance (possibly not one of those used to * build/train the evaluator) with which to evaluate the merit of the subset * @param retrain true if the classifier should be retrained with respect * to the new subset before testing on the holdOut instance. * @return the "merit" of the subset on the holdOut instance * @exception Exception if the subset cannot be evaluated */ public abstract double evaluateSubset(BitSet subset, Instance holdOut, boolean retrain) throws Exception; }
2,640
35.178082
78
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/InfoGainAttributeEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InfoGainAttributeEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ContingencyTables; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; import weka.filters.unsupervised.attribute.NumericToBinary; /** <!-- globalinfo-start --> * InfoGainAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by measuring the information gain with respect to the class.<br/> * <br/> * InfoGain(Class,Attribute) = H(Class) - H(Class | Attribute).<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * treat missing values as a seperate value.</pre> * * <pre> -B * just binarize numeric attributes instead * of properly discretizing them.</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ * @see Discretize * @see NumericToBinary */ public class InfoGainAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler { /** for serialization */ static final long serialVersionUID = -1949849512589218930L; /** Treat missing values as a seperate value */ private boolean m_missing_merge; /** Just binarize numeric attributes */ private boolean m_Binarize; /** The info gain for each attribute */ private double[] m_InfoGains; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "InfoGainAttributeEval :\n\nEvaluates the worth of an attribute " +"by measuring the information gain with respect to the class.\n\n" +"InfoGain(Class,Attribute) = H(Class) - H(Class | Attribute).\n"; } /** * Constructor */ public InfoGainAttributeEval () { resetOptions(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(2); newVector.addElement(new Option("\ttreat missing values as a seperate " + "value.", "M", 0, "-M")); newVector.addElement(new Option("\tjust binarize numeric attributes instead \n" +"\tof properly discretizing them.", "B", 0, "-B")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * treat missing values as a seperate value.</pre> * * <pre> -B * just binarize numeric attributes instead * of properly discretizing them.</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { resetOptions(); setMissingMerge(!(Utils.getFlag('M', options))); setBinarizeNumericAttributes(Utils.getFlag('B', options)); } /** * Gets the current settings of WrapperSubsetEval. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[2]; int current = 0; if (!getMissingMerge()) { options[current++] = "-M"; } if (getBinarizeNumericAttributes()) { options[current++] = "-B"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String binarizeNumericAttributesTipText() { return "Just binarize numeric attributes instead of properly discretizing them."; } /** * Binarize numeric attributes. * * @param b true=binarize numeric attributes */ public void setBinarizeNumericAttributes (boolean b) { m_Binarize = b; } /** * get whether numeric attributes are just being binarized. * * @return true if missing values are being distributed. */ public boolean getBinarizeNumericAttributes () { return m_Binarize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String missingMergeTipText() { return "Distribute counts for missing values. Counts are distributed " +"across other values in proportion to their frequency. Otherwise, " +"missing is treated as a separate value."; } /** * distribute the counts for missing values across observed values * * @param b true=distribute missing values. */ public void setMissingMerge (boolean b) { m_missing_merge = b; } /** * get whether missing values are being distributed or not * * @return true if missing values are being distributed. */ public boolean getMissingMerge () { return m_missing_merge; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Initializes an information gain attribute evaluator. * Discretizes all attributes that are numeric. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); int classIndex = data.classIndex(); int numInstances = data.numInstances(); if (!m_Binarize) { Discretize disTransform = new Discretize(); disTransform.setUseBetterEncoding(true); disTransform.setInputFormat(data); data = Filter.useFilter(data, disTransform); } else { NumericToBinary binTransform = new NumericToBinary(); binTransform.setInputFormat(data); data = Filter.useFilter(data, binTransform); } int numClasses = data.attribute(classIndex).numValues(); // Reserve space and initialize counters double[][][] counts = new double[data.numAttributes()][][]; for (int k = 0; k < data.numAttributes(); k++) { if (k != classIndex) { int numValues = data.attribute(k).numValues(); counts[k] = new double[numValues + 1][numClasses + 1]; } } // Initialize counters double[] temp = new double[numClasses + 1]; for (int k = 0; k < numInstances; k++) { Instance inst = data.instance(k); if (inst.classIsMissing()) { temp[numClasses] += inst.weight(); } else { temp[(int)inst.classValue()] += inst.weight(); } } for (int k = 0; k < counts.length; k++) { if (k != classIndex) { for (int i = 0; i < temp.length; i++) { counts[k][0][i] = temp[i]; } } } // Get counts for (int k = 0; k < numInstances; k++) { Instance inst = data.instance(k); for (int i = 0; i < inst.numValues(); i++) { if (inst.index(i) != classIndex) { if (inst.isMissingSparse(i) || inst.classIsMissing()) { if (!inst.isMissingSparse(i)) { counts[inst.index(i)][(int)inst.valueSparse(i)][numClasses] += inst.weight(); counts[inst.index(i)][0][numClasses] -= inst.weight(); } else if (!inst.classIsMissing()) { counts[inst.index(i)][data.attribute(inst.index(i)).numValues()] [(int)inst.classValue()] += inst.weight(); counts[inst.index(i)][0][(int)inst.classValue()] -= inst.weight(); } else { counts[inst.index(i)][data.attribute(inst.index(i)).numValues()] [numClasses] += inst.weight(); counts[inst.index(i)][0][numClasses] -= inst.weight(); } } else { counts[inst.index(i)][(int)inst.valueSparse(i)] [(int)inst.classValue()] += inst.weight(); counts[inst.index(i)][0][(int)inst.classValue()] -= inst.weight(); } } } } // distribute missing counts if required if (m_missing_merge) { for (int k = 0; k < data.numAttributes(); k++) { if (k != classIndex) { int numValues = data.attribute(k).numValues(); // Compute marginals double[] rowSums = new double[numValues]; double[] columnSums = new double[numClasses]; double sum = 0; for (int i = 0; i < numValues; i++) { for (int j = 0; j < numClasses; j++) { rowSums[i] += counts[k][i][j]; columnSums[j] += counts[k][i][j]; } sum += rowSums[i]; } if (Utils.gr(sum, 0)) { double[][] additions = new double[numValues][numClasses]; // Compute what needs to be added to each row for (int i = 0; i < numValues; i++) { for (int j = 0; j < numClasses; j++) { additions[i][j] = (rowSums[i] / sum) * counts[k][numValues][j]; } } // Compute what needs to be added to each column for (int i = 0; i < numClasses; i++) { for (int j = 0; j < numValues; j++) { additions[j][i] += (columnSums[i] / sum) * counts[k][j][numClasses]; } } // Compute what needs to be added to each cell for (int i = 0; i < numClasses; i++) { for (int j = 0; j < numValues; j++) { additions[j][i] += (counts[k][j][i] / sum) * counts[k][numValues][numClasses]; } } // Make new contingency table double[][] newTable = new double[numValues][numClasses]; for (int i = 0; i < numValues; i++) { for (int j = 0; j < numClasses; j++) { newTable[i][j] = counts[k][i][j] + additions[i][j]; } } counts[k] = newTable; } } } } // Compute info gains m_InfoGains = new double[data.numAttributes()]; for (int i = 0; i < data.numAttributes(); i++) { if (i != classIndex) { m_InfoGains[i] = (ContingencyTables.entropyOverColumns(counts[i]) - ContingencyTables.entropyConditionedOnRows(counts[i])); } } } /** * Reset options to their default values */ protected void resetOptions () { m_InfoGains = null; m_missing_merge = true; m_Binarize = false; } /** * evaluates an individual attribute by measuring the amount * of information gained about the class given the attribute. * * @param attribute the index of the attribute to be evaluated * @return the info gain * @throws Exception if the attribute could not be evaluated */ public double evaluateAttribute (int attribute) throws Exception { return m_InfoGains[attribute]; } /** * Describe the attribute evaluator * @return a description of the attribute evaluator as a string */ public String toString () { StringBuffer text = new StringBuffer(); if (m_InfoGains == null) { text.append("Information Gain attribute evaluator has not been built"); } else { text.append("\tInformation Gain Ranking Filter"); if (!m_missing_merge) { text.append("\n\tMissing values treated as seperate"); } if (m_Binarize) { text.append("\n\tNumeric attributes are just binarized"); } } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } // ============ // Test method. // ============ /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new InfoGainAttributeEval(), args); } }
13,899
28.574468
104
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/LFSMethods.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * LFSMethods.java * Copyright (C) 2007 Martin Guetlein * */ package weka.attributeSelection; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.io.Serializable; import java.util.BitSet; import java.util.Hashtable; /** * @author Martin Guetlein (martin.guetlein@gmail.com) * @version $Revision: 1.3 $ */ public class LFSMethods implements RevisionHandler { /** max-size of array bestGroupOfSize, should be suffient */ private final static int MAX_SUBSET_SIZE = 200; private BitSet m_bestGroup; private double m_bestMerit; private int m_evalsTotal; private int m_evalsCached; private BitSet[] m_bestGroupOfSize = new BitSet[MAX_SUBSET_SIZE]; /** * empty constructor * * methods are not static because of access to inner class Link2 and * LinkedList2 * */ public LFSMethods() { } /** * @return best group found by forwardSearch/floatingForwardSearch */ public BitSet getBestGroup() { return m_bestGroup; } /** * @return merit of best group found by forwardSearch/floatingForwardSearch */ public double getBestMerit() { return m_bestMerit; } /** * @return best group of size found by forwardSearch */ public BitSet getBestGroupOfSize(int size) { return m_bestGroupOfSize[size]; } /** * @return number of cached / not performed evaluations */ public int getNumEvalsCached() { return m_evalsCached; } /** * @return number totally performed evaluations */ public int getNumEvalsTotal() { return m_evalsTotal; } /** * @return ranking (integer array) of attributes in data with evaluator (sorting is NOT stable!) */ public int[] rankAttributes(Instances data, SubsetEvaluator evaluator, boolean verbose) throws Exception { if (verbose) { System.out.println("Ranking attributes with " + evaluator.getClass().getName()); } double[] merit = new double[data.numAttributes()]; BitSet group = new BitSet(data.numAttributes()); for (int k = 0; k < data.numAttributes(); k++) { if (k != data.classIndex()) { group.set(k); merit[k] -= evaluator.evaluateSubset(group); m_evalsTotal++; group.clear(k); } else { merit[k] = Double.MAX_VALUE; } if (verbose) { System.out.println(k + ": " + merit[k]); } } int[] ranking = Utils.sort(merit); if (verbose) { System.out.print("Ranking [ "); for (int i = 0; i < ranking.length; i++) { System.out.print(ranking[i] + " "); } System.out.println("]\n"); } return ranking; } /** * Performs linear forward selection * * @param cacheSize chacheSize (times number of instances) to store already evaluated sets * @param startGroup start group for search (can be null) * @param ranking ranking of attributes (as produced by rankAttributes), no ranking would be [0,1,2,3,4..] * @param k number of top k attributes that are taken into account * @param incrementK true -> fixed-set, false -> fixed-width * @param maxStale number of times the search proceeds even though no improvement was found (1 = hill-climbing) * @param forceResultSize stopping criteria changed from no-improvement (forceResultSize=-1) to subset-size * @param data * @param evaluator * @param verbose * @return BitSet, that cotains the best-group found * @throws Exception */ public BitSet forwardSearch(int cacheSize, BitSet startGroup, int[] ranking, int k, boolean incrementK, int maxStale, int forceResultSize, Instances data, SubsetEvaluator evaluator, boolean verbose) throws Exception { if ((forceResultSize > 0) && (maxStale > 1)) { throw new Exception("Forcing result size only works for maxStale=1"); } if (verbose) { System.out.println("Starting forward selection"); } BitSet bestGroup; BitSet tempGroup; int bestSize = 0; int tempSize = 0; double bestMerit; double tempMerit = 0; Link2 link; LinkedList2 list = new LinkedList2(maxStale); Hashtable alreadyExpanded = new Hashtable(cacheSize * data.numAttributes()); int insertCount = 0; int stale = 0; boolean improvement; int thisK = k; int evalsTotal = 0; int evalsCached = 0; bestGroup = (BitSet) startGroup.clone(); String hashKey = bestGroup.toString(); bestMerit = evaluator.evaluateSubset(bestGroup); if (verbose) { System.out.print("Group: "); printGroup(bestGroup, data.numAttributes()); System.out.println("Merit: " + tempMerit); System.out.println("----------"); } alreadyExpanded.put(hashKey, new Double(bestMerit)); insertCount++; bestSize = bestGroup.cardinality(); //the list is only used if best-first search is applied if (maxStale > 1) { Object[] best = new Object[1]; best[0] = bestGroup.clone(); list.addToList(best, bestMerit); } while (stale < maxStale) { improvement = false; //best-first: take first elem from list if (maxStale > 1) { if (list.size() == 0) { stale = maxStale; break; } link = list.getLinkAt(0); tempGroup = (BitSet) (link.getData()[0]); tempGroup = (BitSet) tempGroup.clone(); list.removeLinkAt(0); tempSize = 0; for (int i = 0; i < data.numAttributes(); i++) { if (tempGroup.get(i)) { tempSize++; } } } else //hill-climbing { tempGroup = (BitSet) bestGroup.clone(); tempSize = bestSize; } //set number of top k attributes that are taken into account if (incrementK) { thisK = Math.min(Math.max(thisK, k + tempSize), data.numAttributes()); } else { thisK = k; } //temporarilly add attributes to current set for (int i = 0; i < thisK; i++) { if ((ranking[i] == data.classIndex()) || tempGroup.get(ranking[i])) { continue; } tempGroup.set(ranking[i]); tempSize++; hashKey = tempGroup.toString(); if (!alreadyExpanded.containsKey(hashKey)) { evalsTotal++; tempMerit = evaluator.evaluateSubset(tempGroup); if (insertCount > (cacheSize * data.numAttributes())) { alreadyExpanded = new Hashtable(cacheSize * data.numAttributes()); insertCount = 0; } alreadyExpanded.put(hashKey, new Double(tempMerit)); insertCount++; } else { evalsCached++; tempMerit = ((Double) alreadyExpanded.get(hashKey)).doubleValue(); } if (verbose) { System.out.print("Group: "); printGroup(tempGroup, data.numAttributes()); System.out.println("Merit: " + tempMerit); } if (((tempMerit - bestMerit) > 0.00001) || ((forceResultSize >= tempSize) && (tempSize > bestSize))) { improvement = true; stale = 0; bestMerit = tempMerit; bestSize = tempSize; bestGroup = (BitSet) (tempGroup.clone()); m_bestGroupOfSize[bestSize] = (BitSet) (tempGroup.clone()); } if (maxStale > 1) { Object[] add = new Object[1]; add[0] = tempGroup.clone(); list.addToList(add, tempMerit); } tempGroup.clear(ranking[i]); tempSize--; } if (verbose) { System.out.println("----------"); } //handle stopping criteria if (!improvement || (forceResultSize == bestSize)) { stale++; } if ((forceResultSize > 0) && (bestSize == forceResultSize)) { break; } } if (verbose) { System.out.println("Best Group: "); printGroup(bestGroup, data.numAttributes()); System.out.println(); } m_bestGroup = bestGroup; m_bestMerit = bestMerit; m_evalsTotal += evalsTotal; m_evalsCached += evalsCached; return bestGroup; } /** * Performs linear floating forward selection * ( the stopping criteria cannot be changed to a specific size value ) * * * @param cacheSize chacheSize (times number of instances) to store already evaluated sets * @param startGroup start group for search (can be null) * @param ranking ranking of attributes (as produced by rankAttributes), no ranking would be [0,1,2,3,4..] * @param k number of top k attributes that are taken into account * @param incrementK true -> fixed-set, false -> fixed-width * @param maxStale number of times the search proceeds even though no improvement was found (1 = hill-climbing) * @param data * @param evaluator * @param verbose * @return BitSet, that cotains the best-group found * @throws Exception */ public BitSet floatingForwardSearch(int cacheSize, BitSet startGroup, int[] ranking, int k, boolean incrementK, int maxStale, Instances data, SubsetEvaluator evaluator, boolean verbose) throws Exception { if (verbose) { System.out.println("Starting floating forward selection"); } BitSet bestGroup; BitSet tempGroup; int bestSize = 0; int tempSize = 0; double bestMerit; double tempMerit = 0; Link2 link; LinkedList2 list = new LinkedList2(maxStale); Hashtable alreadyExpanded = new Hashtable(cacheSize * data.numAttributes()); int insertCount = 0; int backtrackingSteps = 0; boolean improvement; boolean backward; int thisK = k; int evalsTotal = 0; int evalsCached = 0; bestGroup = (BitSet) startGroup.clone(); String hashKey = bestGroup.toString(); bestMerit = evaluator.evaluateSubset(bestGroup); if (verbose) { System.out.print("Group: "); printGroup(bestGroup, data.numAttributes()); System.out.println("Merit: " + tempMerit); System.out.println("----------"); } alreadyExpanded.put(hashKey, new Double(bestMerit)); insertCount++; bestSize = bestGroup.cardinality(); if (maxStale > 1) { Object[] best = new Object[1]; best[0] = bestGroup.clone(); list.addToList(best, bestMerit); } backward = improvement = true; while (true) { // we are search in backward direction -> // continue backward search as long as a new best set is found if (backward) { if (!improvement) { backward = false; } } // we are searching forward -> // stop search or start backward step else { if (!improvement && (backtrackingSteps >= maxStale)) { break; } backward = true; } improvement = false; // best-first: take first elem from list if (maxStale > 1) { if (list.size() == 0) { backtrackingSteps = maxStale; break; } link = list.getLinkAt(0); tempGroup = (BitSet) (link.getData()[0]); tempGroup = (BitSet) tempGroup.clone(); list.removeLinkAt(0); tempSize = 0; for (int i = 0; i < data.numAttributes(); i++) { if (tempGroup.get(i)) { tempSize++; } } } else //hill-climbing { tempGroup = (BitSet) bestGroup.clone(); tempSize = bestSize; } //backward search only makes sense for set-size bigger than 2 if (backward && (tempSize <= 2)) { backward = false; } //set number of top k attributes that are taken into account if (incrementK) { thisK = Math.max(thisK, Math.min(Math.max(thisK, k + tempSize), data.numAttributes())); } else { thisK = k; } //temporarilly add/remove attributes to/from current set for (int i = 0; i < thisK; i++) { if (ranking[i] == data.classIndex()) { continue; } if (backward) { if (!tempGroup.get(ranking[i])) { continue; } tempGroup.clear(ranking[i]); tempSize--; } else { if ((ranking[i] == data.classIndex()) || tempGroup.get(ranking[i])) { continue; } tempGroup.set(ranking[i]); tempSize++; } hashKey = tempGroup.toString(); if (!alreadyExpanded.containsKey(hashKey)) { evalsTotal++; tempMerit = evaluator.evaluateSubset(tempGroup); if (insertCount > (cacheSize * data.numAttributes())) { alreadyExpanded = new Hashtable(cacheSize * data.numAttributes()); insertCount = 0; } alreadyExpanded.put(hashKey, new Double(tempMerit)); insertCount++; } else { evalsCached++; tempMerit = ((Double) alreadyExpanded.get(hashKey)).doubleValue(); } if (verbose) { System.out.print("Group: "); printGroup(tempGroup, data.numAttributes()); System.out.println("Merit: " + tempMerit); } if ((tempMerit - bestMerit) > 0.00001) { improvement = true; backtrackingSteps = 0; bestMerit = tempMerit; bestSize = tempSize; bestGroup = (BitSet) (tempGroup.clone()); } if (maxStale > 1) { Object[] add = new Object[1]; add[0] = tempGroup.clone(); list.addToList(add, tempMerit); } if (backward) { tempGroup.set(ranking[i]); tempSize++; } else { tempGroup.clear(ranking[i]); tempSize--; } } if (verbose) { System.out.println("----------"); } if ((maxStale > 1) && backward && !improvement) { Object[] add = new Object[1]; add[0] = tempGroup.clone(); list.addToList(add, Double.MAX_VALUE); } if (!backward && !improvement) { backtrackingSteps++; } } if (verbose) { System.out.println("Best Group: "); printGroup(bestGroup, data.numAttributes()); System.out.println(); } m_bestGroup = bestGroup; m_bestMerit = bestMerit; m_evalsTotal += evalsTotal; m_evalsCached += evalsCached; return bestGroup; } /** * Debug-out */ protected static void printGroup(BitSet tt, int numAttribs) { System.out.print("{ "); for (int i = 0; i < numAttribs; i++) { if (tt.get(i) == true) { System.out.print((i + 1) + " "); } } System.out.println("}"); } // Inner classes /** * Class for a node in a linked list. Used in best first search. * Copied from BestFirstSearch * * @author Mark Hall (mhall@cs.waikato.ac.nz) */ public class Link2 implements Serializable, RevisionHandler { /** for serialization. */ private static final long serialVersionUID = -7422719407475185086L; /* BitSet group; */ Object[] m_data; double m_merit; // Constructor public Link2(Object[] data, double mer) { // group = (BitSet)gr.clone(); m_data = data; m_merit = mer; } /** Get a group */ public Object[] getData() { return m_data; } public String toString() { return ("Node: " + m_data.toString() + " " + m_merit); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } } /** * Class for handling a linked list. Used in best first search. Extends the * Vector class. * * @author Mark Hall (mhall@cs.waikato.ac.nz) */ public class LinkedList2 extends FastVector { /** for serialization. */ private static final long serialVersionUID = -7776010892419656105L; // Max number of elements in the list int m_MaxSize; // ================ // Public methods // ================ public LinkedList2(int sz) { super(); m_MaxSize = sz; } /** * removes an element (Link) at a specific index from the list. * * @param index * the index of the element to be removed. */ public void removeLinkAt(int index) throws Exception { if ((index >= 0) && (index < size())) { removeElementAt(index); } else { throw new Exception("index out of range (removeLinkAt)"); } } /** * returns the element (Link) at a specific index from the list. * * @param index * the index of the element to be returned. */ public Link2 getLinkAt(int index) throws Exception { if (size() == 0) { throw new Exception("List is empty (getLinkAt)"); } else { if ((index >= 0) && (index < size())) { return ((Link2) (elementAt(index))); } else { throw new Exception("index out of range (getLinkAt)"); } } } /** * adds an element (Link) to the list. * * @param gr * the attribute set specification * @param mer * the "merit" of this attribute set */ public void addToList(Object[] data, double mer) throws Exception { Link2 newL = new Link2(data, mer); if (size() == 0) { addElement(newL); } else { if (mer > ((Link2) (firstElement())).m_merit) { if (size() == m_MaxSize) { removeLinkAt(m_MaxSize - 1); } // ---------- insertElementAt(newL, 0); } else { int i = 0; int size = size(); boolean done = false; // ------------ // don't insert if list contains max elements an this // is worst than the last if ((size == m_MaxSize) && (mer <= ((Link2) (lastElement())).m_merit)) { } // --------------- else { while ((!done) && (i < size)) { if (mer > ((Link2) (elementAt(i))).m_merit) { if (size == m_MaxSize) { removeLinkAt(m_MaxSize - 1); } // --------------------- insertElementAt(newL, i); done = true; } else { if (i == (size - 1)) { addElement(newL); done = true; } else { i++; } } } } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } }
20,164
26.585499
128
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/LatentSemanticAnalysis.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * LatentSemanticAnalysis.java * Copyright (C) 2008 Amri Napolitano * */ package weka.attributeSelection; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Check; import weka.core.CheckOptionHandler; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.matrix.Matrix; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SparseInstance; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.matrix.SingularValueDecomposition; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.Remove; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.util.Enumeration; import java.util.Vector; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Performs latent semantic analysis and transformation of the data. * Use in conjunction with a Ranker search. A low-rank approximation * of the full data is found by specifying the number of singular values * to use. The dataset may be transformed to give the relation of either * the attributes or the instances (default) to the concept space created * by the transformation. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Normalize input data.</pre> * * <pre> -R * Rank approximation used in LSA. May be actual number of * LSA attributes to include (if greater than 1) or a proportion * of total singular values to account for (if between 0 and 1). * A value less than or equal to zero means use all latent variables. * (default = 0.95)</pre> * * <pre> -A * Maximum number of attributes to include in * transformed attribute names. (-1 = include all)</pre> * <!-- options-end --> * * @author Amri Napolitano * @version $Revision: 5728 $ */ public class LatentSemanticAnalysis extends UnsupervisedAttributeEvaluator implements AttributeTransformer, OptionHandler { /** For serialization */ static final long serialVersionUID = -8712112988018106198L; /** The data to transform analyse/transform */ private Instances m_trainInstances; /** * Keep a copy for the class attribute (if set) and for checking * header compatibility */ private Instances m_trainHeader; /** The header for the transformed data format */ private Instances m_transformedFormat; /** Data has a class set */ private boolean m_hasClass; /** Class index */ private int m_classIndex; /** Number of attributes */ private int m_numAttributes; /** Number of instances */ private int m_numInstances; /** Is transpose necessary because numAttributes < numInstances? */ private boolean m_transpose = false; /** Will hold the left singular vectors */ private Matrix m_u = null; /** Will hold the singular values */ private Matrix m_s = null; /** Will hold the right singular values */ private Matrix m_v = null; /** Will hold the matrix used to transform instances to the new feature space */ private Matrix m_transformationMatrix = null; /** Filters for original data */ private ReplaceMissingValues m_replaceMissingFilter; private Normalize m_normalizeFilter; private NominalToBinary m_nominalToBinaryFilter; private Remove m_attributeFilter; /** The number of attributes in the LSA transformed data */ private int m_outputNumAttributes = -1; /** Normalize the input data? */ private boolean m_normalize = false; /** The approximation rank to use (between 0 and 1 means coverage proportion) */ private double m_rank = 0.95; /** The sum of the squares of the singular values */ private double m_sumSquaredSingularValues = 0.0; /** The actual rank number to use for computation */ private int m_actualRank = -1; /** Maximum number of attributes in the transformed attribute name */ private int m_maxAttributesInName = 5; /** * Returns a string describing this attribute transformer * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Performs latent semantic analysis and transformation of the data. Use in " + "conjunction with a Ranker search. A low-rank approximation of the full data is " + "found by either specifying the number of singular values to use or specifying a " + "proportion of the singular values to cover."; } /** * Returns an enumeration describing the available options. <p> * * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector options = new Vector(4); options.addElement(new Option("\tNormalize input data.", "N", 0, "-N")); options.addElement(new Option("\tRank approximation used in LSA. \n" + "\tMay be actual number of LSA attributes \n" + "\tto include (if greater than 1) or a \n" + "\tproportion of total singular values to \n" + "\taccount for (if between 0 and 1). \n" + "\tA value less than or equal to zero means \n" + "\tuse all latent variables.(default = 0.95)", "R",1,"-R")); options.addElement(new Option("\tMaximum number of attributes to include\n" + "\tin transformed attribute names.\n" + "\t(-1 = include all)" , "A", 1, "-A")); return options.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Normalize input data.</pre> * * <pre> -R * Rank approximation used in LSA. May be actual number of * LSA attributes to include (if greater than 1) or a proportion * of total singular values to account for (if between 0 and 1). * A value less than or equal to zero means use all latent variables. * (default = 0.95)</pre> * * <pre> -A * Maximum number of attributes to include in * transformed attribute names. (-1 = include all)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { resetOptions(); String optionString; //set approximation rank optionString = Utils.getOption('R', options); if (optionString.length() != 0) { double temp; temp = Double.valueOf(optionString).doubleValue(); setRank(temp); } //set number of attributes to use in transformed names optionString = Utils.getOption('A', options); if (optionString.length() != 0) { setMaximumAttributeNames(Integer.parseInt(optionString)); } //set normalize option setNormalize(Utils.getFlag('N', options)); } /** * Reset to defaults */ private void resetOptions() { m_rank = 0.95; m_normalize = true; m_maxAttributesInName = 5; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String normalizeTipText() { return "Normalize input data."; } /** * Set whether input data will be normalized. * @param newNormalize true if input data is to be normalized */ public void setNormalize(boolean newNormalize) { m_normalize = newNormalize; } /** * Gets whether or not input data is to be normalized * @return true if input data is to be normalized */ public boolean getNormalize() { return m_normalize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String rankTipText() { return "Matrix rank to use for data reduction. Can be a" + " proportion to indicate desired coverage"; } /** * Sets the desired matrix rank (or coverage proportion) for feature-space reduction * @param newRank the desired rank (or coverage) for feature-space reduction */ public void setRank(double newRank) { m_rank = newRank; } /** * Gets the desired matrix rank (or coverage proportion) for feature-space reduction * @return the rank (or coverage) for feature-space reduction */ public double getRank() { return m_rank; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maximumAttributeNamesTipText() { return "The maximum number of attributes to include in transformed attribute names."; } /** * Sets maximum number of attributes to include in * transformed attribute names. * @param newMaxAttributes the maximum number of attributes */ public void setMaximumAttributeNames(int newMaxAttributes) { m_maxAttributesInName = newMaxAttributes; } /** * Gets maximum number of attributes to include in * transformed attribute names. * @return the maximum number of attributes */ public int getMaximumAttributeNames() { return m_maxAttributesInName; } /** * Gets the current settings of LatentSemanticAnalysis * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[5]; int current = 0; if (getNormalize()) { options[current++] = "-N"; } options[current++] = "-R"; options[current++] = "" + getRank(); options[current++] = "-A"; options[current++] = "" + getMaximumAttributeNames(); while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * Initializes the singular values/vectors and performs the analysis * @param data the instances to analyse/transform * @throws Exception if analysis fails */ public void buildEvaluator(Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); buildAttributeConstructor(data); } /** * Initializes the singular values/vectors and performs the analysis * @param data the instances to analyse/transform * @throws Exception if analysis fails */ private void buildAttributeConstructor (Instances data) throws Exception { // initialize attributes for performing analysis m_transpose = false; m_s = null; m_u = null; m_v = null; m_outputNumAttributes = -1; m_actualRank = -1; m_sumSquaredSingularValues = 0.0; m_trainInstances = new Instances(data); m_trainHeader = null; m_attributeFilter = null; m_nominalToBinaryFilter = null; m_replaceMissingFilter = new ReplaceMissingValues(); m_replaceMissingFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_replaceMissingFilter); // vector to hold indices of attributes to delete (class attribute, // attributes that are all missing, or attributes with one distinct value) Vector attributesToRemove = new Vector(); // if data has a class attribute if (m_trainInstances.classIndex() >= 0) { m_hasClass = true; m_classIndex = m_trainInstances.classIndex(); // set class attribute to be removed attributesToRemove.addElement(new Integer(m_classIndex)); } // make copy of training data so the class values (if set) can be appended to final // transformed instances and we can check header compatibility m_trainHeader = new Instances(m_trainInstances, 0); // normalize data if desired if (m_normalize) { m_normalizeFilter = new Normalize(); m_normalizeFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_normalizeFilter); } // convert any nominal attributes to binary numeric attributes m_nominalToBinaryFilter = new NominalToBinary(); m_nominalToBinaryFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_nominalToBinaryFilter); // delete any attributes with only one distinct value or are all missing for (int i = 0; i < m_trainInstances.numAttributes(); i++) { if (m_trainInstances.numDistinctValues(i) <= 1) { attributesToRemove.addElement(new Integer(i)); } } // remove columns from the data if necessary if (attributesToRemove.size() > 0) { m_attributeFilter = new Remove(); int [] todelete = new int[attributesToRemove.size()]; for (int i = 0; i < attributesToRemove.size(); i++) { todelete[i] = ((Integer)(attributesToRemove.elementAt(i))).intValue(); } m_attributeFilter.setAttributeIndicesArray(todelete); m_attributeFilter.setInvertSelection(false); m_attributeFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_attributeFilter); } // can evaluator handle the processed data ? e.g., enough attributes? getCapabilities().testWithFail(m_trainInstances); // record properties of final, ready-to-process data m_numInstances = m_trainInstances.numInstances(); m_numAttributes = m_trainInstances.numAttributes(); // create matrix of attribute values and compute singular value decomposition double [][] trainValues = new double[m_numAttributes][m_numInstances]; for (int i = 0; i < m_numAttributes; i++) { trainValues[i] = m_trainInstances.attributeToDoubleArray(i); } Matrix trainMatrix = new Matrix(trainValues); // svd requires rows >= columns, so transpose data if necessary if (m_numAttributes < m_numInstances) { m_transpose = true; trainMatrix = trainMatrix.transpose(); } SingularValueDecomposition trainSVD = trainMatrix.svd(); m_u = trainSVD.getU(); // left singular vectors m_s = trainSVD.getS(); // singular values m_v = trainSVD.getV(); // right singular vectors // find actual rank to use int maxSingularValues = trainSVD.rank(); for (int i = 0; i < m_s.getRowDimension(); i++) { m_sumSquaredSingularValues += m_s.get(i, i) * m_s.get(i, i); } if (maxSingularValues == 0) { // no nonzero singular values (shouldn't happen) // reset values from computation m_s = null; m_u = null; m_v = null; m_sumSquaredSingularValues = 0.0; throw new Exception("SVD computation produced no non-zero singular values."); } if (m_rank > maxSingularValues || m_rank <= 0) { // adjust rank if too high or too low m_actualRank = maxSingularValues; } else if (m_rank < 1.0) { // determine how many singular values to include for desired coverage double currentSumOfSquaredSingularValues = 0.0; for (int i = 0; i < m_s.getRowDimension() && m_actualRank == -1; i++) { currentSumOfSquaredSingularValues += m_s.get(i, i) * m_s.get(i, i); if (currentSumOfSquaredSingularValues / m_sumSquaredSingularValues >= m_rank) { m_actualRank = i + 1; } } } else { m_actualRank = (int) m_rank; } // lower matrix ranks, adjust for transposition (if necessary), and // compute matrix for transforming future instances if (m_transpose) { Matrix tempMatrix = m_u; m_u = m_v; m_v = tempMatrix; } m_u = m_u.getMatrix(0, m_u.getRowDimension() - 1, 0, m_actualRank - 1); m_s = m_s.getMatrix(0, m_actualRank - 1, 0, m_actualRank - 1); m_v = m_v.getMatrix(0, m_v.getRowDimension() - 1, 0, m_actualRank - 1); m_transformationMatrix = m_u.times(m_s.inverse()); //create dataset header for transformed instances m_transformedFormat = setOutputFormat(); } /** * Set the format for the transformed data * @return a set of empty Instances (header only) in the new format */ private Instances setOutputFormat() { // if analysis hasn't been performed (successfully) yet if (m_s == null) { return null; } // set up transformed attributes if (m_hasClass) { m_outputNumAttributes = m_actualRank + 1; } else { m_outputNumAttributes = m_actualRank; } int numAttributesInName = m_maxAttributesInName; if (numAttributesInName <= 0 || numAttributesInName >= m_numAttributes) { numAttributesInName = m_numAttributes; } FastVector attributes = new FastVector(m_outputNumAttributes); for (int i = 0; i < m_actualRank; i++) { // create attribute name String attributeName = ""; double [] attributeCoefficients = m_transformationMatrix.getMatrix(0, m_numAttributes - 1, i, i).getColumnPackedCopy(); for (int j = 0; j < numAttributesInName; j++) { if (j > 0) { attributeName += "+"; } attributeName += Utils.doubleToString(attributeCoefficients[j], 5, 3); attributeName += m_trainInstances.attribute(j).name(); } if (numAttributesInName < m_numAttributes) { attributeName += "..."; } // add attribute attributes.addElement(new Attribute(attributeName)); } // add original class attribute if present if (m_hasClass) { attributes.addElement(m_trainHeader.classAttribute().copy()); } // create blank header Instances outputFormat = new Instances(m_trainInstances.relationName() + "_LSA", attributes, 0); m_outputNumAttributes = outputFormat.numAttributes(); // set class attribute if applicable if (m_hasClass) { outputFormat.setClassIndex(m_outputNumAttributes - 1); } return outputFormat; } /** * Returns just the header for the transformed data (ie. an empty * set of instances. This is so that AttributeSelection can * determine the structure of the transformed data without actually * having to get all the transformed data through getTransformedData(). * @return the header of the transformed data. * @throws Exception if the header of the transformed data can't * be determined. */ public Instances transformedHeader() throws Exception { if (m_s == null) { throw new Exception("Latent Semantic Analysis hasn't been successfully performed."); } return m_transformedFormat; } /** * Transform the supplied data set (assumed to be the same format * as the training data) * @return the transformed training data * @throws Exception if transformed data can't be returned */ public Instances transformedData(Instances data) throws Exception { if (m_s == null) { throw new Exception("Latent Semantic Analysis hasn't been built yet"); } Instances output = new Instances(m_transformedFormat, m_numInstances); // the transformed version of instance i from the training data // is stored as the i'th row vector in v (the right singular vectors) for (int i = 0; i < data.numInstances(); i++) { Instance currentInstance = data.instance(i); // record attribute values for converted instance double [] newValues = new double[m_outputNumAttributes]; for (int j = 0; j < m_actualRank; j++) { // fill in values from v newValues[j] = m_v.get(i, j); } if (m_hasClass) { // copy class value if applicable newValues[m_outputNumAttributes - 1] = currentInstance.classValue(); } //create new instance with recorded values and add to output dataset Instance newInstance; if (currentInstance instanceof SparseInstance) { newInstance = new SparseInstance(currentInstance.weight(), newValues); } else { newInstance = new DenseInstance(currentInstance.weight(), newValues); } output.add(newInstance); } return output; } /** * Evaluates the merit of a transformed attribute. This is defined * to be the square of the singular value for the latent variable * corresponding to the transformed attribute. * @param att the attribute to be evaluated * @return the merit of a transformed attribute * @throws Exception if attribute can't be evaluated */ public double evaluateAttribute(int att) throws Exception { if (m_s == null) { throw new Exception("Latent Semantic Analysis hasn't been successfully" + " performed yet!"); } //return the square of the corresponding singular value return (m_s.get(att, att) * m_s.get(att, att)) / m_sumSquaredSingularValues; } /** * Transform an instance in original (unnormalized) format * @param instance an instance in the original (unnormalized) format * @return a transformed instance * @throws Exception if instance can't be transformed */ public Instance convertInstance(Instance instance) throws Exception { if (m_s == null) { throw new Exception("convertInstance: Latent Semantic Analysis not " + "performed yet."); } // array to hold new attribute values double [] newValues = new double[m_outputNumAttributes]; // apply filters so new instance is in same format as training instances Instance tempInstance = (Instance)instance.copy(); if (!instance.dataset().equalHeaders(m_trainHeader)) { throw new Exception("Can't convert instance: headers don't match: " + "LatentSemanticAnalysis"); } // replace missing values m_replaceMissingFilter.input(tempInstance); m_replaceMissingFilter.batchFinished(); tempInstance = m_replaceMissingFilter.output(); // normalize if (m_normalize) { m_normalizeFilter.input(tempInstance); m_normalizeFilter.batchFinished(); tempInstance = m_normalizeFilter.output(); } // convert nominal attributes to binary m_nominalToBinaryFilter.input(tempInstance); m_nominalToBinaryFilter.batchFinished(); tempInstance = m_nominalToBinaryFilter.output(); // remove class/other attributes if (m_attributeFilter != null) { m_attributeFilter.input(tempInstance); m_attributeFilter.batchFinished(); tempInstance = m_attributeFilter.output(); } // record new attribute values if (m_hasClass) { // copy class value newValues[m_outputNumAttributes - 1] = instance.classValue(); } double [][] oldInstanceValues = new double[1][m_numAttributes]; oldInstanceValues[0] = tempInstance.toDoubleArray(); Matrix instanceVector = new Matrix(oldInstanceValues); // old attribute values instanceVector = instanceVector.times(m_transformationMatrix); // new attribute values for (int i = 0; i < m_actualRank; i++) { newValues[i] = instanceVector.get(0, i); } // return newly transformed instance if (instance instanceof SparseInstance) { return new SparseInstance(instance.weight(), newValues); } else { return new DenseInstance(instance.weight(), newValues); } } /** * Returns a description of this attribute transformer * @return a String describing this attribute transformer */ public String toString() { if (m_s == null) { return "Latent Semantic Analysis hasn't been built yet!"; } else { return "\tLatent Semantic Analysis Attribute Transformer\n\n" + lsaSummary(); } } /** * Return a summary of the analysis * @return a summary of the analysis. */ private String lsaSummary() { StringBuffer result = new StringBuffer(); // print number of latent variables used result.append("Number of latent variables utilized: " + m_actualRank); // print singular values result.append("\n\nSingularValue\tLatentVariable#\n"); // create single array of singular values rather than diagonal matrix for (int i = 0; i < m_actualRank; i++) { result.append(Utils.doubleToString(m_s.get(i, i), 9, 5) + "\t" + (i + 1) + "\n"); } // print attribute vectors result.append("\nAttribute vectors (left singular vectors) -- row vectors show\n" + "the relation between the original attributes and the latent \n" + "variables computed by the singular value decomposition:\n"); for (int i = 0; i < m_actualRank; i++) { result.append("LatentVariable#" + (i + 1) + "\t"); } result.append("AttributeName\n"); for (int i = 0; i < m_u.getRowDimension(); i++) { // for each attribute for (int j = 0; j < m_u.getColumnDimension(); j++) { // for each latent variable result.append(Utils.doubleToString(m_u.get(i, j), 9, 5) + "\t\t"); } result.append(m_trainInstances.attribute(i).name() + "\n"); } // print instance vectors result.append("\n\nInstance vectors (right singular vectors) -- column\n" + "vectors show the relation between the original instances and the\n" + "latent variables computed by the singular value decomposition:\n"); for (int i = 0; i < m_numInstances; i++) { result.append("Instance#" + (i + 1) + "\t"); } result.append("LatentVariable#\n"); for (int i = 0; i < m_v.getColumnDimension(); i++) { // for each instance for (int j = 0; j < m_v.getRowDimension(); j++) { // for each latent variable // going down columns instead of across rows because we're // printing v' but have v stored result.append(Utils.doubleToString(m_v.get(j, i), 9, 5) + "\t"); } result.append((i + 1) + "\n"); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5728 $"); } /** * Main method for testing this class * @param argv should contain the command line arguments to the * evaluator/transformer (see AttributeSelection) */ public static void main(String [] argv) { runEvaluator(new LatentSemanticAnalysis(), argv); } }
28,193
33.936803
100
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/LinearForwardSelection.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * LinearForwardSelection.java * Copyright (C) 2007 Martin Guetlein * */ package weka.attributeSelection; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.BitSet; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * LinearForwardSelection:<br/> * <br/> * Extension of BestFirst. Takes a restricted number of k attributes into account. Fixed-set selects a fixed number k of attributes, whereas k is increased in each step when fixed-width is selected. The search uses either the initial ordering to select the top k attributes, or performs a ranking (with the same evalutator the search uses later on). The search direction can be forward, or floating forward selection (with opitional backward search steps).<br/> * <br/> * For more information see:<br/> * <br/> * Martin Guetlein (2006). Large Scale Attribute Selection Using Wrappers. Freiburg, Germany. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -D &lt;0 = forward selection | 1 = floating forward selection&gt; * Forward selection method. (default = 0).</pre> * * <pre> -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search.</pre> * * <pre> -I * Perform initial ranking to select the * top-ranked attributes.</pre> * * <pre> -K &lt;num&gt; * Number of top-ranked attributes that are * taken into account by the search.</pre> * * <pre> -T &lt;0 = fixed-set | 1 = fixed-width&gt; * Type of Linear Forward Selection (default = 0).</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * * <pre> -Z * verbose on/off</pre> * <!-- options-end --> * * @author Martin Guetlein (martin.guetlein@gmail.com) * @version $Revision: 6161 $ */ public class LinearForwardSelection extends ASSearch implements OptionHandler, StartSetHandler, TechnicalInformationHandler { /** search directions */ protected static final int SEARCH_METHOD_FORWARD = 0; protected static final int SEARCH_METHOD_FLOATING = 1; public static final Tag[] TAGS_SEARCH_METHOD = { new Tag(SEARCH_METHOD_FORWARD, "Forward selection"), new Tag(SEARCH_METHOD_FLOATING, "Floating forward selection"), }; /** search directions */ protected static final int TYPE_FIXED_SET = 0; protected static final int TYPE_FIXED_WIDTH = 1; public static final Tag[] TAGS_TYPE = { new Tag(TYPE_FIXED_SET, "Fixed-set"), new Tag(TYPE_FIXED_WIDTH, "Fixed-width"), }; // member variables /** maximum number of stale nodes before terminating search */ protected int m_maxStale; /** 0 == forward selection, 1 == floating forward search */ protected int m_forwardSearchMethod; /** perform initial ranking to select top-ranked attributes */ protected boolean m_performRanking; /** * number of top-ranked attributes that are taken into account for the * search */ protected int m_numUsedAttributes; /** 0 == fixed-set, 1 == fixed-width */ protected int m_linearSelectionType; /** holds an array of starting attributes */ protected int[] m_starting; /** holds the start set for the search as a Range */ protected Range m_startRange; /** does the data have a class */ protected boolean m_hasClass; /** holds the class index */ protected int m_classIndex; /** number of attributes in the data */ protected int m_numAttribs; /** total number of subsets evaluated during a search */ protected int m_totalEvals; /** for debugging */ protected boolean m_verbose; /** holds the merit of the best subset found */ protected double m_bestMerit; /** holds the maximum size of the lookup cache for evaluated subsets */ protected int m_cacheSize; /** * Constructor */ public LinearForwardSelection() { resetOptions(); } /** * Returns a string describing this search method * * @return a description of the search method suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "LinearForwardSelection:\n\n" + "Extension of BestFirst. Takes a restricted number of k attributes " + "into account. Fixed-set selects a fixed number k of attributes, " + "whereas k is increased in each step when fixed-width is selected. " + "The search uses either the initial ordering to select the " + "top k attributes, or performs a ranking (with the same evalutator the " + "search uses later on). The search direction can be forward, " + "or floating forward selection (with opitional backward search steps).\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Martin Guetlein and Eibe Frank and Mark Hall and Andreas Karwath"); result.setValue(Field.YEAR, "2009"); result.setValue(Field.TITLE, "Large Scale Attribute Selection Using Wrappers"); result.setValue(Field.BOOKTITLE, "Proc IEEE Symposium on Computational Intelligence and Data Mining"); result.setValue(Field.PAGES, "332-339"); result.setValue(Field.PUBLISHER, "IEEE"); additional = result.add(Type.MASTERSTHESIS); additional.setValue(Field.AUTHOR, "Martin Guetlein"); additional.setValue(Field.YEAR, "2006"); additional.setValue(Field.TITLE, "Large Scale Attribute Selection Using Wrappers"); additional.setValue(Field.SCHOOL, "Albert-Ludwigs-Universitaet"); additional.setValue(Field.ADDRESS, "Freiburg, Germany"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. * */ public Enumeration listOptions() { Vector newVector = new Vector(8); newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7.", "P", 1, "-P <start set>")); newVector.addElement(new Option( "\tForward selection method. (default = 0).", "D", 1, "-D <0 = forward selection | 1 = floating forward selection>")); newVector.addElement(new Option("\tNumber of non-improving nodes to" + "\n\tconsider before terminating search.", "N", 1, "-N <num>")); newVector.addElement(new Option("\tPerform initial ranking to select the" + "\n\ttop-ranked attributes.", "I", 0, "-I")); newVector.addElement(new Option( "\tNumber of top-ranked attributes that are " + "\n\ttaken into account by the search.", "K", 1, "-K <num>")); newVector.addElement(new Option( "\tType of Linear Forward Selection (default = 0).", "T", 1, "-T <0 = fixed-set | 1 = fixed-width>")); newVector.addElement(new Option( "\tSize of lookup cache for evaluated subsets." + "\n\tExpressed as a multiple of the number of" + "\n\tattributes in the data set. (default = 1)", "S", 1, "-S <num>")); newVector.addElement(new Option("\tverbose on/off", "Z", 0, "-Z")); return newVector.elements(); } /** * Parses a given list of options. * * Valid options are: * <p> * * -P <start set> <br> * Specify a starting set of attributes. Eg 1,4,7-9. * <p> * * -D <0 = forward selection | 1 = floating forward selection> <br> * Forward selection method of the search. (default = 0). * <p> * * -N <num> <br> * Number of non improving nodes to consider before terminating search. * (default = 5). * <p> * * -I <br> * Perform initial ranking to select top-ranked attributes. * <p> * * -K <num> <br> * Number of top-ranked attributes that are taken into account. * <p> * * -T <0 = fixed-set | 1 = fixed-width> <br> * Typ of Linear Forward Selection (default = 0). * <p> * * -S <num> <br> * Size of lookup cache for evaluated subsets. Expressed as a multiple of * the number of attributes in the data set. (default = 1). * <p> * * -Z <br> * verbose on/off. * <p> * * @param options * the list of options as an array of strings * @exception Exception * if an option is not supported * */ public void setOptions(String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setStartSet(optionString); } optionString = Utils.getOption('D', options); if (optionString.length() != 0) { setForwardSelectionMethod(new SelectedTag(Integer.parseInt(optionString), TAGS_SEARCH_METHOD)); } else { setForwardSelectionMethod(new SelectedTag(SEARCH_METHOD_FORWARD, TAGS_SEARCH_METHOD)); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setSearchTermination(Integer.parseInt(optionString)); } setPerformRanking(Utils.getFlag('I', options)); optionString = Utils.getOption('K', options); if (optionString.length() != 0) { setNumUsedAttributes(Integer.parseInt(optionString)); } optionString = Utils.getOption('T', options); if (optionString.length() != 0) { setType(new SelectedTag(Integer.parseInt(optionString), TAGS_TYPE)); } else { setType(new SelectedTag(TYPE_FIXED_SET, TAGS_TYPE)); } optionString = Utils.getOption('S', options); if (optionString.length() != 0) { setLookupCacheSize(Integer.parseInt(optionString)); } m_verbose = Utils.getFlag('Z', options); } /** * Set the maximum size of the evaluated subset cache (hashtable). This is * expressed as a multiplier for the number of attributes in the data set. * (default = 1). * * @param size * the maximum size of the hashtable */ public void setLookupCacheSize(int size) { if (size >= 0) { m_cacheSize = size; } } /** * Return the maximum size of the evaluated subset cache (expressed as a * multiplier for the number of attributes in a data set. * * @return the maximum size of the hashtable. */ public int getLookupCacheSize() { return m_cacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lookupCacheSizeTipText() { return "Set the maximum size of the lookup cache of evaluated subsets. This is " + "expressed as a multiplier of the number of attributes in the data set. " + "(default = 1)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String startSetTipText() { return "Set the start point for the search. This is specified as a comma " + "seperated list off attribute indexes starting at 1. It can include " + "ranges. Eg. 1,2,5-9,17."; } /** * Sets a starting set of attributes for the search. It is the search * method's responsibility to report this start set (if any) in its * toString() method. * * @param startSet * a string containing a list of attributes (and or ranges), eg. * 1,2,6,10-15. * @exception Exception * if start set can't be set. */ public void setStartSet(String startSet) throws Exception { m_startRange.setRanges(startSet); } /** * Returns a list of attributes (and or attribute ranges) as a String * * @return a list of attributes (and or attribute ranges) */ public String getStartSet() { return m_startRange.getRanges(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String searchTerminationTipText() { return "Set the amount of backtracking. Specify the number of "; } /** * Set the numnber of non-improving nodes to consider before terminating * search. * * @param t * the number of non-improving nodes * @exception Exception * if t is less than 1 */ public void setSearchTermination(int t) throws Exception { if (t < 1) { throw new Exception("Value of -N must be > 0."); } m_maxStale = t; } /** * Get the termination criterion (number of non-improving nodes). * * @return the number of non-improving nodes */ public int getSearchTermination() { return m_maxStale; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String performRankingTipText() { return "Perform initial ranking to select top-ranked attributes."; } /** * Perform initial ranking to select top-ranked attributes. * * @param b * true if initial ranking should be performed */ public void setPerformRanking(boolean b) { m_performRanking = b; } /** * Get boolean if initial ranking should be performed to select the * top-ranked attributes * * @return true if initial ranking should be performed */ public boolean getPerformRanking() { return m_performRanking; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numUsedAttributesTipText() { return "Set the amount of top-ranked attributes that are taken into account by the search process."; } /** * Set the number of top-ranked attributes that taken into account by the * search process. * * @param k * the number of attributes * @exception Exception * if k is less than 2 */ public void setNumUsedAttributes(int k) throws Exception { if (k < 2) { throw new Exception("Value of -K must be >= 2."); } m_numUsedAttributes = k; } /** * Get the number of top-ranked attributes that taken into account by the * search process. * * @return the number of top-ranked attributes that taken into account */ public int getNumUsedAttributes() { return m_numUsedAttributes; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String forwardSelectionMethodTipText() { return "Set the direction of the search."; } /** * Set the search direction * * @param d * the direction of the search */ public void setForwardSelectionMethod(SelectedTag d) { if (d.getTags() == TAGS_SEARCH_METHOD) { m_forwardSearchMethod = d.getSelectedTag().getID(); } } /** * Get the search direction * * @return the direction of the search */ public SelectedTag getForwardSelectionMethod() { return new SelectedTag(m_forwardSearchMethod, TAGS_SEARCH_METHOD); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String typeTipText() { return "Set the type of the search."; } /** * Set the type * * @param t * the Linear Forward Selection type */ public void setType(SelectedTag t) { if (t.getTags() == TAGS_TYPE) { m_linearSelectionType = t.getSelectedTag().getID(); } } /** * Get the type * * @return the Linear Forward Selection type */ public SelectedTag getType() { return new SelectedTag(m_linearSelectionType, TAGS_TYPE); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String verboseTipText() { return "Turn on verbose output for monitoring the search's progress."; } /** * Set whether verbose output should be generated. * * @param d * true if output is to be verbose. */ public void setVerbose(boolean b) { m_verbose = b; } /** * Get whether output is to be verbose * * @return true if output will be verbose */ public boolean getVerbose() { return m_verbose; } /** * Gets the current settings of LinearForwardSelection. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions() { String[] options = new String[13]; int current = 0; if (!(getStartSet().equals(""))) { options[current++] = "-P"; options[current++] = "" + startSetToString(); } options[current++] = "-D"; options[current++] = "" + m_forwardSearchMethod; options[current++] = "-N"; options[current++] = "" + m_maxStale; if (m_performRanking) { options[current++] = "-I"; } options[current++] = "-K"; options[current++] = "" + m_numUsedAttributes; options[current++] = "-T"; options[current++] = "" + m_linearSelectionType; if (m_verbose) options[current++] = "-Z"; while (current < options.length) { options[current++] = ""; } return options; } /** * converts the array of starting attributes to a string. This is used by * getOptions to return the actual attributes specified as the starting set. * This is better than using m_startRanges.getRanges() as the same start set * can be specified in different ways from the command line---eg 1,2,3 == * 1-3. This is to ensure that stuff that is stored in a database is * comparable. * * @return a comma seperated list of individual attribute numbers as a * String */ private String startSetToString() { StringBuffer FString = new StringBuffer(); boolean didPrint; if (m_starting == null) { return getStartSet(); } for (int i = 0; i < m_starting.length; i++) { didPrint = false; if ((m_hasClass == false) || ((m_hasClass == true) && (i != m_classIndex))) { FString.append((m_starting[i] + 1)); didPrint = true; } if (i == (m_starting.length - 1)) { FString.append(""); } else { if (didPrint) { FString.append(","); } } } return FString.toString(); } /** * returns a description of the search as a String * * @return a description of the search */ public String toString() { StringBuffer LFSString = new StringBuffer(); LFSString.append("\tLinear Forward Selection.\n\tStart set: "); if (m_starting == null) { LFSString.append("no attributes\n"); } else { LFSString.append(startSetToString() + "\n"); } LFSString.append("\tForward selection method: "); if (m_forwardSearchMethod == SEARCH_METHOD_FORWARD) { LFSString.append("forward selection\n"); } else { LFSString.append("floating forward selection\n"); } LFSString.append("\tStale search after " + m_maxStale + " node expansions\n"); LFSString.append("\tLinear Forward Selection Type: "); if (m_linearSelectionType == TYPE_FIXED_SET) { LFSString.append("fixed-set\n"); } else { LFSString.append("fixed-width\n"); } LFSString.append("\tNumber of top-ranked attributes that are used: " + m_numUsedAttributes + "\n"); LFSString.append("\tTotal number of subsets evaluated: " + m_totalEvals + "\n"); LFSString.append("\tMerit of best subset found: " + Utils.doubleToString(Math.abs(m_bestMerit), 8, 3) + "\n"); return LFSString.toString(); } /** * Searches the attribute subset space by linear forward selection * * @param ASEvaluator * the attribute evaluator to guide the search * @param data * the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @exception Exception * if the search can't be completed */ public int[] search(ASEvaluation ASEval, Instances data) throws Exception { m_totalEvals = 0; if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } if (ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; } else { m_hasClass = true; m_classIndex = data.classIndex(); } ((ASEvaluation) ASEval).buildEvaluator(data); m_numAttribs = data.numAttributes(); if (m_numUsedAttributes > m_numAttribs) { System.out.println( "Decreasing number of top-ranked attributes to total number of attributes: " + data.numAttributes()); m_numUsedAttributes = m_numAttribs; } BitSet start_group = new BitSet(m_numAttribs); m_startRange.setUpper(m_numAttribs - 1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } // If a starting subset has been supplied, then initialise the bitset if (m_starting != null) { for (int i = 0; i < m_starting.length; i++) { if ((m_starting[i]) != m_classIndex) { start_group.set(m_starting[i]); } } } LFSMethods LFS = new LFSMethods(); int[] ranking; if (m_performRanking) { ranking = LFS.rankAttributes(data, (SubsetEvaluator) ASEval, m_verbose); } else { ranking = new int[m_numAttribs]; for (int i = 0; i < ranking.length; i++) { ranking[i] = i; } } if (m_forwardSearchMethod == SEARCH_METHOD_FORWARD) { LFS.forwardSearch(m_cacheSize, start_group, ranking, m_numUsedAttributes, m_linearSelectionType == TYPE_FIXED_WIDTH, m_maxStale, -1, data, (SubsetEvaluator) ASEval, m_verbose); } else if (m_forwardSearchMethod == SEARCH_METHOD_FLOATING) { LFS.floatingForwardSearch(m_cacheSize, start_group, ranking, m_numUsedAttributes, m_linearSelectionType == TYPE_FIXED_WIDTH, m_maxStale, data, (SubsetEvaluator) ASEval, m_verbose); } m_totalEvals = LFS.getNumEvalsTotal(); m_bestMerit = LFS.getBestMerit(); return attributeList(LFS.getBestGroup()); } /** * Reset options to default values */ protected void resetOptions() { m_maxStale = 5; m_forwardSearchMethod = SEARCH_METHOD_FORWARD; m_performRanking = true; m_numUsedAttributes = 50; m_linearSelectionType = TYPE_FIXED_SET; m_starting = null; m_startRange = new Range(); m_classIndex = -1; m_totalEvals = 0; m_cacheSize = 1; m_verbose = false; } /** * converts a BitSet into a list of attribute indexes * * @param group * the BitSet to convert * @return an array of attribute indexes */ protected int[] attributeList(BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6161 $"); } }
25,949
28.691076
462
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/OneRAttributeEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * OneRAttributeEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** <!-- globalinfo-start --> * OneRAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by using the OneR classifier.<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;seed&gt; * Random number seed for cross validation * (default = 1)</pre> * * <pre> -F &lt;folds&gt; * Number of folds for cross validation * (default = 10)</pre> * * <pre> -D * Use training data for evaluation rather than cross validaton</pre> * * <pre> -B &lt;minimum bucket size&gt; * Minimum number of objects in a bucket * (passed on to OneR, default = 6)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class OneRAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler { /** for serialization */ static final long serialVersionUID = 4386514823886856980L; /** The training instances */ private Instances m_trainInstances; /** The class index */ private int m_classIndex; /** The number of attributes */ private int m_numAttribs; /** The number of instances */ private int m_numInstances; /** Random number seed */ private int m_randomSeed; /** Number of folds for cross validation */ private int m_folds; /** Use training data to evaluate merit rather than x-val */ private boolean m_evalUsingTrainingData; /** Passed on to OneR */ private int m_minBucketSize; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "OneRAttributeEval :\n\nEvaluates the worth of an attribute by " +"using the OneR classifier.\n"; } /** * Returns a string for this option suitable for display in the gui * as a tip text * * @return a string describing this option */ public String seedTipText() { return "Set the seed for use in cross validation."; } /** * Set the random number seed for cross validation * * @param seed the seed to use */ public void setSeed(int seed) { m_randomSeed = seed; } /** * Get the random number seed * * @return an <code>int</code> value */ public int getSeed() { return m_randomSeed; } /** * Returns a string for this option suitable for display in the gui * as a tip text * * @return a string describing this option */ public String foldsTipText() { return "Set the number of folds for cross validation."; } /** * Set the number of folds to use for cross validation * * @param folds the number of folds */ public void setFolds(int folds) { m_folds = folds; if (m_folds < 2) { m_folds = 2; } } /** * Get the number of folds used for cross validation * * @return the number of folds */ public int getFolds() { return m_folds; } /** * Returns a string for this option suitable for display in the gui * as a tip text * * @return a string describing this option */ public String evalUsingTrainingDataTipText() { return "Use the training data to evaluate attributes rather than " + "cross validation."; } /** * Use the training data to evaluate attributes rather than cross validation * * @param e true if training data is to be used for evaluation */ public void setEvalUsingTrainingData(boolean e) { m_evalUsingTrainingData = e; } /** * Returns a string for this option suitable for display in the gui * as a tip text * * @return a string describing this option */ public String minimumBucketSizeTipText() { return "The minimum number of objects in a bucket " + "(passed to OneR)."; } /** * Set the minumum bucket size used by OneR * * @param minB the minimum bucket size to use */ public void setMinimumBucketSize(int minB) { m_minBucketSize = minB; } /** * Get the minimum bucket size used by oneR * * @return the minimum bucket size used */ public int getMinimumBucketSize() { return m_minBucketSize; } /** * Returns true if the training data is to be used for evaluation * * @return true if training data is to be used for evaluation */ public boolean getEvalUsingTrainingData() { return m_evalUsingTrainingData; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option( "\tRandom number seed for cross validation\n" + "\t(default = 1)", "S", 1, "-S <seed>")); newVector.addElement(new Option( "\tNumber of folds for cross validation\n" + "\t(default = 10)", "F", 1, "-F <folds>")); newVector.addElement(new Option( "\tUse training data for evaluation rather than cross validaton", "D", 0, "-D")); newVector.addElement(new Option( "\tMinimum number of objects in a bucket\n" + "\t(passed on to " +"OneR, default = 6)", "B", 1, "-B <minimum bucket size>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;seed&gt; * Random number seed for cross validation * (default = 1)</pre> * * <pre> -F &lt;folds&gt; * Number of folds for cross validation * (default = 10)</pre> * * <pre> -D * Use training data for evaluation rather than cross validaton</pre> * * <pre> -B &lt;minimum bucket size&gt; * Minimum number of objects in a bucket * (passed on to OneR, default = 6)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String [] options) throws Exception { String temp = Utils.getOption('S', options); if (temp.length() != 0) { setSeed(Integer.parseInt(temp)); } temp = Utils.getOption('F', options); if (temp.length() != 0) { setFolds(Integer.parseInt(temp)); } temp = Utils.getOption('B', options); if (temp.length() != 0) { setMinimumBucketSize(Integer.parseInt(temp)); } setEvalUsingTrainingData(Utils.getFlag('D', options)); Utils.checkForRemainingOptions(options); } /** * returns the current setup. * * @return the options of the current setup */ public String[] getOptions() { String [] options = new String [7]; int current = 0; if (getEvalUsingTrainingData()) { options[current++] = "-D"; } options[current++] = "-S"; options[current++] = "" + getSeed(); options[current++] = "-F"; options[current++] = "" + getFolds(); options[current++] = "-B"; options[current++] = "" + getMinimumBucketSize(); while (current < options.length) { options[current++] = ""; } return options; } /** * Constructor */ public OneRAttributeEval () { resetOptions(); } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Initializes a OneRAttribute attribute evaluator. * Discretizes all attributes that are numeric. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); m_trainInstances = data; m_classIndex = m_trainInstances.classIndex(); m_numAttribs = m_trainInstances.numAttributes(); m_numInstances = m_trainInstances.numInstances(); } /** * rests to defaults. */ protected void resetOptions () { m_trainInstances = null; m_randomSeed = 1; m_folds = 10; m_evalUsingTrainingData = false; m_minBucketSize = 6; // default used by OneR } /** * evaluates an individual attribute by measuring the amount * of information gained about the class given the attribute. * * @param attribute the index of the attribute to be evaluated * @throws Exception if the attribute could not be evaluated */ public double evaluateAttribute (int attribute) throws Exception { int[] featArray = new int[2]; // feat + class double errorRate; Evaluation o_Evaluation; Remove delTransform = new Remove(); delTransform.setInvertSelection(true); // copy the instances Instances trainCopy = new Instances(m_trainInstances); featArray[0] = attribute; featArray[1] = trainCopy.classIndex(); delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); trainCopy = Filter.useFilter(trainCopy, delTransform); o_Evaluation = new Evaluation(trainCopy); String [] oneROpts = { "-B", ""+getMinimumBucketSize()}; Classifier oneR = AbstractClassifier.forName("weka.classifiers.rules.OneR", oneROpts); if (m_evalUsingTrainingData) { oneR.buildClassifier(trainCopy); o_Evaluation.evaluateModel(oneR, trainCopy); } else { /* o_Evaluation.crossValidateModel("weka.classifiers.rules.OneR", trainCopy, 10, null, new Random(m_randomSeed)); */ o_Evaluation.crossValidateModel(oneR, trainCopy, m_folds, new Random(m_randomSeed)); } errorRate = o_Evaluation.errorRate(); return (1 - errorRate)*100.0; } /** * Return a description of the evaluator * @return description as a string */ public String toString () { StringBuffer text = new StringBuffer(); if (m_trainInstances == null) { text.append("\tOneR feature evaluator has not been built yet"); } else { text.append("\tOneR feature evaluator.\n\n"); text.append("\tUsing "); if (m_evalUsingTrainingData) { text.append("training data for evaluation of attributes."); } else { text.append(""+getFolds()+" fold cross validation for evaluating " +"attributes."); } text.append("\n\tMinimum bucket size for OneR: " +getMinimumBucketSize()); } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } // ============ // Test method. // ============ /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new OneRAttributeEval(), args); } }
12,786
25.364948
90
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/PrincipalComponents.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PrincipalComponents.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.Enumeration; import java.util.Vector; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Matrix; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SparseInstance; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Center; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Remove; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; /** <!-- globalinfo-start --> * Performs a principal components analysis and transformation of the data. Use in conjunction with a Ranker search. Dimensionality reduction is accomplished by choosing enough eigenvectors to account for some percentage of the variance in the original data---default 0.95 (95%). Attribute noise can be filtered by transforming to the PC space, eliminating some of the worst eigenvectors, and then transforming back to the original space. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C * Center (rather than standardize) the * data and compute PCA using the covariance (rather * than the correlation) matrix.</pre> * * <pre> -R * Retain enough PC attributes to account * for this proportion of variance in the original data. * (default = 0.95)</pre> * * <pre> -O * Transform through the PC space and * back to the original space.</pre> * * <pre> -A * Maximum number of attributes to include in * transformed attribute names. (-1 = include all)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Gabi Schmidberger (gabi@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class PrincipalComponents extends UnsupervisedAttributeEvaluator implements AttributeTransformer, OptionHandler { /** for serialization */ private static final long serialVersionUID = -3675307197777734007L; /** The data to transform analyse/transform */ private Instances m_trainInstances; /** Keep a copy for the class attribute (if set) */ private Instances m_trainHeader; /** The header for the transformed data format */ private Instances m_transformedFormat; /** The header for data transformed back to the original space */ private Instances m_originalSpaceFormat; /** Data has a class set */ private boolean m_hasClass; /** Class index */ private int m_classIndex; /** Number of attributes */ private int m_numAttribs; /** Number of instances */ private int m_numInstances; /** Correlation/covariance matrix for the original data */ private double [][] m_correlation; private double[] m_means; private double[] m_stdDevs; /** * If true, center (rather than standardize) the data and * compute PCA from covariance (rather than correlation) * matrix. */ private boolean m_center = false; /** Will hold the unordered linear transformations of the (normalized) original data */ private double [][] m_eigenvectors; /** Eigenvalues for the corresponding eigenvectors */ private double [] m_eigenvalues = null; /** Sorted eigenvalues */ private int [] m_sortedEigens; /** sum of the eigenvalues */ private double m_sumOfEigenValues = 0.0; /** Filters for original data */ private ReplaceMissingValues m_replaceMissingFilter; private NominalToBinary m_nominalToBinFilter; private Remove m_attributeFilter; private Center m_centerFilter; private Standardize m_standardizeFilter; /** The number of attributes in the pc transformed data */ private int m_outputNumAtts = -1; /** the amount of variance to cover in the original data when retaining the best n PC's */ private double m_coverVariance = 0.95; /** transform the data through the pc space and back to the original space ? */ private boolean m_transBackToOriginal = false; /** maximum number of attributes in the transformed attribute name */ private int m_maxAttrsInName = 5; /** holds the transposed eigenvectors for converting back to the original space */ private double [][] m_eTranspose; /** * Returns a string describing this attribute transformer * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Performs a principal components analysis and transformation of " +"the data. Use in conjunction with a Ranker search. Dimensionality " +"reduction is accomplished by choosing enough eigenvectors to " +"account for some percentage of the variance in the original data---" +"default 0.95 (95%). Attribute noise can be filtered by transforming " +"to the PC space, eliminating some of the worst eigenvectors, and " +"then transforming back to the original space."; } /** * Returns an enumeration describing the available options. <p> * * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(3); newVector.addElement(new Option("\tCenter (rather than standardize) the" + "\n\tdata and compute PCA using the covariance (rather" + "\n\t than the correlation) matrix.", "C", 0, "-C")); newVector.addElement(new Option("\tRetain enough PC attributes to account " +"\n\tfor this proportion of variance in " +"the original data.\n" + "\t(default = 0.95)", "R",1,"-R")); newVector.addElement(new Option("\tTransform through the PC space and " +"\n\tback to the original space." , "O", 0, "-O")); newVector.addElement(new Option("\tMaximum number of attributes to include in " + "\n\ttransformed attribute names. (-1 = include all)" , "A", 1, "-A")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C * Center (rather than standardize) the * data and compute PCA using the covariance (rather * than the correlation) matrix.</pre> * * <pre> -R * Retain enough PC attributes to account * for this proportion of variance in the original data. * (default = 0.95)</pre> * * <pre> -O * Transform through the PC space and * back to the original space.</pre> * * <pre> -A * Maximum number of attributes to include in * transformed attribute names. (-1 = include all)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { resetOptions(); String optionString; optionString = Utils.getOption('R', options); if (optionString.length() != 0) { Double temp; temp = Double.valueOf(optionString); setVarianceCovered(temp.doubleValue()); } optionString = Utils.getOption('A', options); if (optionString.length() != 0) { setMaximumAttributeNames(Integer.parseInt(optionString)); } setTransformBackToOriginal(Utils.getFlag('O', options)); setCenterData(Utils.getFlag('C', options)); } /** * Reset to defaults */ private void resetOptions() { m_coverVariance = 0.95; m_sumOfEigenValues = 0.0; m_transBackToOriginal = false; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String centerDataTipText() { return "Center (rather than standardize) the data. PCA will " + "be computed from the covariance (rather than correlation) " + "matrix"; } /** * Set whether to center (rather than standardize) * the data. If set to true then PCA is computed * from the covariance rather than correlation matrix. * * @param center true if the data is to be * centered rather than standardized */ public void setCenterData(boolean center) { m_center = center; } /** * Get whether to center (rather than standardize) * the data. If true then PCA is computed * from the covariance rather than correlation matrix. * * @return true if the data is to be centered rather * than standardized. */ public boolean getCenterData() { return m_center; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String varianceCoveredTipText() { return "Retain enough PC attributes to account for this proportion of " +"variance."; } /** * Sets the amount of variance to account for when retaining * principal components * @param vc the proportion of total variance to account for */ public void setVarianceCovered(double vc) { m_coverVariance = vc; } /** * Gets the proportion of total variance to account for when * retaining principal components * @return the proportion of variance to account for */ public double getVarianceCovered() { return m_coverVariance; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maximumAttributeNamesTipText() { return "The maximum number of attributes to include in transformed attribute names."; } /** * Sets maximum number of attributes to include in * transformed attribute names. * @param m the maximum number of attributes */ public void setMaximumAttributeNames(int m) { m_maxAttrsInName = m; } /** * Gets maximum number of attributes to include in * transformed attribute names. * @return the maximum number of attributes */ public int getMaximumAttributeNames() { return m_maxAttrsInName; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String transformBackToOriginalTipText() { return "Transform through the PC space and back to the original space. " +"If only the best n PCs are retained (by setting varianceCovered < 1) " +"then this option will give a dataset in the original space but with " +"less attribute noise."; } /** * Sets whether the data should be transformed back to the original * space * @param b true if the data should be transformed back to the * original space */ public void setTransformBackToOriginal(boolean b) { m_transBackToOriginal = b; } /** * Gets whether the data is to be transformed back to the original * space. * @return true if the data is to be transformed back to the original space */ public boolean getTransformBackToOriginal() { return m_transBackToOriginal; } /** * Gets the current settings of PrincipalComponents * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[6]; int current = 0; if (getCenterData()) { options[current++] = "-C"; } options[current++] = "-R"; options[current++] = ""+getVarianceCovered(); options[current++] = "-A"; options[current++] = ""+getMaximumAttributeNames(); if (getTransformBackToOriginal()) { options[current++] = "-O"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); result.enable(Capability.NO_CLASS); return result; } /** * Initializes principal components and performs the analysis * @param data the instances to analyse/transform * @throws Exception if analysis fails */ public void buildEvaluator(Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); buildAttributeConstructor(data); } private void buildAttributeConstructor (Instances data) throws Exception { m_eigenvalues = null; m_outputNumAtts = -1; m_attributeFilter = null; m_nominalToBinFilter = null; m_sumOfEigenValues = 0.0; m_trainInstances = new Instances(data); // make a copy of the training data so that we can get the class // column to append to the transformed data (if necessary) m_trainHeader = new Instances(m_trainInstances, 0); m_replaceMissingFilter = new ReplaceMissingValues(); m_replaceMissingFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_replaceMissingFilter); /*if (m_normalize) { m_normalizeFilter = new Normalize(); m_normalizeFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_normalizeFilter); } */ m_nominalToBinFilter = new NominalToBinary(); m_nominalToBinFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_nominalToBinFilter); // delete any attributes with only one distinct value or are all missing Vector deleteCols = new Vector(); for (int i=0;i<m_trainInstances.numAttributes();i++) { if (m_trainInstances.numDistinctValues(i) <=1) { deleteCols.addElement(new Integer(i)); } } if (m_trainInstances.classIndex() >=0) { // get rid of the class column m_hasClass = true; m_classIndex = m_trainInstances.classIndex(); deleteCols.addElement(new Integer(m_classIndex)); } // remove columns from the data if necessary if (deleteCols.size() > 0) { m_attributeFilter = new Remove(); int [] todelete = new int [deleteCols.size()]; for (int i=0;i<deleteCols.size();i++) { todelete[i] = ((Integer)(deleteCols.elementAt(i))).intValue(); } m_attributeFilter.setAttributeIndicesArray(todelete); m_attributeFilter.setInvertSelection(false); m_attributeFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_attributeFilter); } // can evaluator handle the processed data ? e.g., enough attributes? getCapabilities().testWithFail(m_trainInstances); m_numInstances = m_trainInstances.numInstances(); m_numAttribs = m_trainInstances.numAttributes(); //fillCorrelation(); fillCovariance(); double [] d = new double[m_numAttribs]; double [][] v = new double[m_numAttribs][m_numAttribs]; Matrix corr = new Matrix(m_correlation); corr.eigenvalueDecomposition(v, d); m_eigenvectors = (double [][])v.clone(); m_eigenvalues = (double [])d.clone(); /*for (int i = 0; i < m_numAttribs; i++) { for (int j = 0; j < m_numAttribs; j++) { System.err.println(v[i][j] + " "); } System.err.println(d[i]); } */ // any eigenvalues less than 0 are not worth anything --- change to 0 for (int i = 0; i < m_eigenvalues.length; i++) { if (m_eigenvalues[i] < 0) { m_eigenvalues[i] = 0.0; } } m_sortedEigens = Utils.sort(m_eigenvalues); m_sumOfEigenValues = Utils.sum(m_eigenvalues); m_transformedFormat = setOutputFormat(); if (m_transBackToOriginal) { m_originalSpaceFormat = setOutputFormatOriginal(); // new ordered eigenvector matrix int numVectors = (m_transformedFormat.classIndex() < 0) ? m_transformedFormat.numAttributes() : m_transformedFormat.numAttributes() - 1; double [][] orderedVectors = new double [m_eigenvectors.length][numVectors + 1]; // try converting back to the original space for (int i = m_numAttribs - 1; i > (m_numAttribs - numVectors - 1); i--) { for (int j = 0; j < m_numAttribs; j++) { orderedVectors[j][m_numAttribs - i] = m_eigenvectors[j][m_sortedEigens[i]]; } } // transpose the matrix int nr = orderedVectors.length; int nc = orderedVectors[0].length; m_eTranspose = new double [nc][nr]; for (int i = 0; i < nc; i++) { for (int j = 0; j < nr; j++) { m_eTranspose[i][j] = orderedVectors[j][i]; } } } } /** * Returns just the header for the transformed data (ie. an empty * set of instances. This is so that AttributeSelection can * determine the structure of the transformed data without actually * having to get all the transformed data through transformedData(). * @return the header of the transformed data. * @throws Exception if the header of the transformed data can't * be determined. */ public Instances transformedHeader() throws Exception { if (m_eigenvalues == null) { throw new Exception("Principal components hasn't been built yet"); } if (m_transBackToOriginal) { return m_originalSpaceFormat; } else { return m_transformedFormat; } } /** * Gets the transformed training data. * @return the transformed training data * @throws Exception if transformed data can't be returned */ public Instances transformedData(Instances data) throws Exception { if (m_eigenvalues == null) { throw new Exception("Principal components hasn't been built yet"); } Instances output = null; if (m_transBackToOriginal) { output = new Instances(m_originalSpaceFormat); } else { output = new Instances(m_transformedFormat); } for (int i = 0; i < data.numInstances(); i++) { Instance converted = convertInstance(data.instance(i)); output.add(converted); } return output; } /** * Evaluates the merit of a transformed attribute. This is defined * to be 1 minus the cumulative variance explained. Merit can't * be meaningfully evaluated if the data is to be transformed back * to the original space. * @param att the attribute to be evaluated * @return the merit of a transformed attribute * @throws Exception if attribute can't be evaluated */ public double evaluateAttribute(int att) throws Exception { if (m_eigenvalues == null) { throw new Exception("Principal components hasn't been built yet!"); } if (m_transBackToOriginal) { return 1.0; // can't evaluate back in the original space! } // return 1-cumulative variance explained for this transformed att double cumulative = 0.0; for (int i = m_numAttribs - 1; i >= m_numAttribs - att - 1; i--) { cumulative += m_eigenvalues[m_sortedEigens[i]]; } return 1.0 - cumulative / m_sumOfEigenValues; } private void fillCovariance() throws Exception { // first store the means m_means = new double[m_trainInstances.numAttributes()]; m_stdDevs = new double[m_trainInstances.numAttributes()]; for (int i = 0; i < m_trainInstances.numAttributes(); i++) { m_means[i] = m_trainInstances.meanOrMode(i); } if (!m_center) { fillCorrelation(); return; } double[] att = new double[m_trainInstances.numInstances()]; // now center the data by subtracting the mean m_centerFilter = new Center(); m_centerFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_centerFilter); // now compute the covariance matrix m_correlation = new double[m_numAttribs][m_numAttribs]; for (int i = 0; i < m_numAttribs; i++) { for (int j = 0; j < m_numAttribs; j++) { double cov = 0; for (int k = 0; k < m_numInstances; k++) { if (i == j) { cov += (m_trainInstances.instance(k).value(i) * m_trainInstances.instance(k).value(i)); } else { cov += (m_trainInstances.instance(k).value(i) * m_trainInstances.instance(k).value(j)); } } cov /= (double)(m_trainInstances.numInstances() - 1); m_correlation[i][j] = cov; m_correlation[j][i] = cov; } } } /** * Fill the correlation matrix */ private void fillCorrelation() throws Exception { m_correlation = new double[m_numAttribs][m_numAttribs]; double [] att1 = new double [m_numInstances]; double [] att2 = new double [m_numInstances]; double corr; for (int i = 0; i < m_numAttribs; i++) { for (int j = 0; j < m_numAttribs; j++) { for (int k = 0; k < m_numInstances; k++) { att1[k] = m_trainInstances.instance(k).value(i); att2[k] = m_trainInstances.instance(k).value(j); } if (i == j) { m_correlation[i][j] = 1.0; // store the standard deviation m_stdDevs[i] = Math.sqrt(Utils.variance(att1)); } else { corr = Utils.correlation(att1,att2,m_numInstances); m_correlation[i][j] = corr; m_correlation[j][i] = corr; } } } // now standardize the input data m_standardizeFilter = new Standardize(); m_standardizeFilter.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, m_standardizeFilter); } /** * Return a summary of the analysis * @return a summary of the analysis. */ private String principalComponentsSummary() { StringBuffer result = new StringBuffer(); double cumulative = 0.0; Instances output = null; int numVectors=0; try { output = setOutputFormat(); numVectors = (output.classIndex() < 0) ? output.numAttributes() : output.numAttributes()-1; } catch (Exception ex) { } //tomorrow String corrCov = (m_center) ? "Covariance " : "Correlation "; result.append(corrCov + "matrix\n"+matrixToString(m_correlation) +"\n\n"); result.append("eigenvalue\tproportion\tcumulative\n"); for (int i = m_numAttribs - 1; i > (m_numAttribs - numVectors - 1); i--) { cumulative+=m_eigenvalues[m_sortedEigens[i]]; result.append(Utils.doubleToString(m_eigenvalues[m_sortedEigens[i]],9,5) +"\t"+Utils. doubleToString((m_eigenvalues[m_sortedEigens[i]] / m_sumOfEigenValues), 9,5) +"\t"+Utils.doubleToString((cumulative / m_sumOfEigenValues),9,5) +"\t"+output.attribute(m_numAttribs - i - 1).name()+"\n"); } result.append("\nEigenvectors\n"); for (int j = 1;j <= numVectors;j++) { result.append(" V"+j+'\t'); } result.append("\n"); for (int j = 0; j < m_numAttribs; j++) { for (int i = m_numAttribs - 1; i > (m_numAttribs - numVectors - 1); i--) { result.append(Utils. doubleToString(m_eigenvectors[j][m_sortedEigens[i]],7,4) +"\t"); } result.append(m_trainInstances.attribute(j).name()+'\n'); } if (m_transBackToOriginal) { result.append("\nPC space transformed back to original space.\n" +"(Note: can't evaluate attributes in the original " +"space)\n"); } return result.toString(); } /** * Returns a description of this attribute transformer * @return a String describing this attribute transformer */ public String toString() { if (m_eigenvalues == null) { return "Principal components hasn't been built yet!"; } else { return "\tPrincipal Components Attribute Transformer\n\n" +principalComponentsSummary(); } } /** * Return a matrix as a String * @param matrix that is decribed as a string * @return a String describing a matrix */ private String matrixToString(double [][] matrix) { StringBuffer result = new StringBuffer(); int last = matrix.length - 1; for (int i = 0; i <= last; i++) { for (int j = 0; j <= last; j++) { result.append(Utils.doubleToString(matrix[i][j],6,2)+" "); if (j == last) { result.append('\n'); } } } return result.toString(); } /** * Convert a pc transformed instance back to the original space * * @param inst the instance to convert * @return the processed instance * @throws Exception if something goes wrong */ private Instance convertInstanceToOriginal(Instance inst) throws Exception { double[] newVals = null; if (m_hasClass) { newVals = new double[m_numAttribs+1]; } else { newVals = new double[m_numAttribs]; } if (m_hasClass) { // class is always appended as the last attribute newVals[m_numAttribs] = inst.value(inst.numAttributes() - 1); } for (int i = 0; i < m_eTranspose[0].length; i++) { double tempval = 0.0; for (int j = 1; j < m_eTranspose.length; j++) { tempval += (m_eTranspose[j][i] * inst.value(j - 1)); } newVals[i] = tempval; if (!m_center) { newVals[i] *= m_stdDevs[i]; } newVals[i] += m_means[i]; } if (inst instanceof SparseInstance) { return new SparseInstance(inst.weight(), newVals); } else { return new DenseInstance(inst.weight(), newVals); } } /** * Transform an instance in original (unormalized) format. Convert back * to the original space if requested. * @param instance an instance in the original (unormalized) format * @return a transformed instance * @throws Exception if instance cant be transformed */ public Instance convertInstance(Instance instance) throws Exception { if (m_eigenvalues == null) { throw new Exception("convertInstance: Principal components not " +"built yet"); } double[] newVals = new double[m_outputNumAtts]; Instance tempInst = (Instance)instance.copy(); if (!instance.dataset().equalHeaders(m_trainHeader)) { throw new Exception("Can't convert instance: header's don't match: " +"PrincipalComponents\n" + instance.dataset().equalHeadersMsg(m_trainHeader)); } m_replaceMissingFilter.input(tempInst); m_replaceMissingFilter.batchFinished(); tempInst = m_replaceMissingFilter.output(); /*if (m_normalize) { m_normalizeFilter.input(tempInst); m_normalizeFilter.batchFinished(); tempInst = m_normalizeFilter.output(); }*/ m_nominalToBinFilter.input(tempInst); m_nominalToBinFilter.batchFinished(); tempInst = m_nominalToBinFilter.output(); if (m_attributeFilter != null) { m_attributeFilter.input(tempInst); m_attributeFilter.batchFinished(); tempInst = m_attributeFilter.output(); } if (!m_center) { m_standardizeFilter.input(tempInst); m_standardizeFilter.batchFinished(); tempInst = m_standardizeFilter.output(); } else { m_centerFilter.input(tempInst); m_centerFilter.batchFinished(); tempInst = m_centerFilter.output(); } if (m_hasClass) { newVals[m_outputNumAtts - 1] = instance.value(instance.classIndex()); } double cumulative = 0; for (int i = m_numAttribs - 1; i >= 0; i--) { double tempval = 0.0; for (int j = 0; j < m_numAttribs; j++) { tempval += (m_eigenvectors[j][m_sortedEigens[i]] * tempInst.value(j)); } newVals[m_numAttribs - i - 1] = tempval; cumulative+=m_eigenvalues[m_sortedEigens[i]]; if ((cumulative / m_sumOfEigenValues) >= m_coverVariance) { break; } } if (!m_transBackToOriginal) { if (instance instanceof SparseInstance) { return new SparseInstance(instance.weight(), newVals); } else { return new DenseInstance(instance.weight(), newVals); } } else { if (instance instanceof SparseInstance) { return convertInstanceToOriginal(new SparseInstance(instance.weight(), newVals)); } else { return convertInstanceToOriginal(new DenseInstance(instance.weight(), newVals)); } } } /** * Set up the header for the PC->original space dataset * * @return the output format * @throws Exception if something goes wrong */ private Instances setOutputFormatOriginal() throws Exception { FastVector attributes = new FastVector(); for (int i = 0; i < m_numAttribs; i++) { String att = m_trainInstances.attribute(i).name(); attributes.addElement(new Attribute(att)); } if (m_hasClass) { attributes.addElement(m_trainHeader.classAttribute().copy()); } Instances outputFormat = new Instances(m_trainHeader.relationName()+"->PC->original space", attributes, 0); // set the class to be the last attribute if necessary if (m_hasClass) { outputFormat.setClassIndex(outputFormat.numAttributes()-1); } return outputFormat; } /** * Set the format for the transformed data * @return a set of empty Instances (header only) in the new format * @throws Exception if the output format can't be set */ private Instances setOutputFormat() throws Exception { if (m_eigenvalues == null) { return null; } double cumulative = 0.0; FastVector attributes = new FastVector(); for (int i = m_numAttribs - 1; i >= 0; i--) { StringBuffer attName = new StringBuffer(); // build array of coefficients double[] coeff_mags = new double[m_numAttribs]; for (int j = 0; j < m_numAttribs; j++) coeff_mags[j] = -Math.abs(m_eigenvectors[j][m_sortedEigens[i]]); int num_attrs = (m_maxAttrsInName > 0) ? Math.min(m_numAttribs, m_maxAttrsInName) : m_numAttribs; // this array contains the sorted indices of the coefficients int[] coeff_inds; if (m_numAttribs > 0) { // if m_maxAttrsInName > 0, sort coefficients by decreasing magnitude coeff_inds = Utils.sort(coeff_mags); } else { // if m_maxAttrsInName <= 0, use all coeffs in original order coeff_inds = new int[m_numAttribs]; for (int j=0; j<m_numAttribs; j++) coeff_inds[j] = j; } // build final attName string for (int j = 0; j < num_attrs; j++) { double coeff_value = m_eigenvectors[coeff_inds[j]][m_sortedEigens[i]]; if (j > 0 && coeff_value >= 0) attName.append("+"); attName.append(Utils.doubleToString(coeff_value,5,3) +m_trainInstances.attribute(coeff_inds[j]).name()); } if (num_attrs < m_numAttribs) attName.append("..."); attributes.addElement(new Attribute(attName.toString())); cumulative+=m_eigenvalues[m_sortedEigens[i]]; if ((cumulative / m_sumOfEigenValues) >= m_coverVariance) { break; } } if (m_hasClass) { attributes.addElement(m_trainHeader.classAttribute().copy()); } Instances outputFormat = new Instances(m_trainInstances.relationName()+"_principal components", attributes, 0); // set the class to be the last attribute if necessary if (m_hasClass) { outputFormat.setClassIndex(outputFormat.numAttributes()-1); } m_outputNumAtts = outputFormat.numAttributes(); return outputFormat; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class * @param argv should contain the command line arguments to the * evaluator/transformer (see AttributeSelection) */ public static void main(String [] argv) { runEvaluator(new PrincipalComponents(), argv); } }
34,327
31.384906
438
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/RaceSearch.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RaceSearch.java * Copyright (C) 2000 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Statistics; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.PairedStats; import weka.core.Stats; import java.util.BitSet; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * Races the cross validation error of competing attribute subsets. Use in conjuction with a ClassifierSubsetEval. RaceSearch has four modes:<br/> * <br/> * forward selection races all single attribute additions to a base set (initially no attributes), selects the winner to become the new base set and then iterates until there is no improvement over the base set. <br/> * <br/> * Backward elimination is similar but the initial base set has all attributes included and races all single attribute deletions. <br/> * <br/> * Schemata search is a bit different. Each iteration a series of races are run in parallel. Each race in a set determines whether a particular attribute should be included or not---ie the race is between the attribute being "in" or "out". The other attributes for this race are included or excluded randomly at each point in the evaluation. As soon as one race has a clear winner (ie it has been decided whether a particular attribute should be inor not) then the next set of races begins, using the result of the winning race from the previous iteration as new base set.<br/> * <br/> * Rank race first ranks the attributes using an attribute evaluator and then races the ranking. The race includes no attributes, the top ranked attribute, the top two attributes, the top three attributes, etc.<br/> * <br/> * It is also possible to generate a raked list of attributes through the forward racing process. If generateRanking is set to true then a complete forward race will be run---that is, racing continues until all attributes have been selected. The order that they are added in determines a complete ranking of all the attributes.<br/> * <br/> * Racing uses paired and unpaired t-tests on cross-validation errors of competing subsets. When there is a significant difference between the means of the errors of two competing subsets then the poorer of the two can be eliminated from the race. Similarly, if there is no significant difference between the mean errors of two competing subsets and they are within some threshold of each other, then one can be eliminated from the race.<br/> * <br/> * For more information see:<br/> * <br/> * Andrew W. Moore, Mary S. Lee: Efficient Algorithms for Minimizing Cross Validation Error. In: Eleventh International Conference on Machine Learning, 190-198, 1994. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Moore1994, * author = {Andrew W. Moore and Mary S. Lee}, * booktitle = {Eleventh International Conference on Machine Learning}, * pages = {190-198}, * publisher = {Morgan Kaufmann}, * title = {Efficient Algorithms for Minimizing Cross Validation Error}, * year = {1994} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -R &lt;0 = forward | 1 = backward race | 2 = schemata | 3 = rank&gt; * Type of race to perform. * (default = 0).</pre> * * <pre> -L &lt;significance&gt; * Significance level for comaparisons * (default = 0.001(forward/backward/rank)/0.01(schemata)).</pre> * * <pre> -T &lt;threshold&gt; * Threshold for error comparison. * (default = 0.001).</pre> * * <pre> -A &lt;attribute evaluator&gt; * Attribute ranker to use if doing a * rank search. Place any * evaluator options LAST on * the command line following a "--". * eg. -A weka.attributeSelection.GainRatioAttributeEval ... -- -M. * (default = GainRatioAttributeEval)</pre> * * <pre> -F &lt;0 = 10 fold | 1 = leave-one-out&gt; * Folds for cross validation * (default = 0 (1 if schemata race)</pre> * * <pre> -Q * Generate a ranked list of attributes. * Forces the search to be forward * and races until all attributes have * selected, thus producing a ranking.</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to retain from * the ranking. Overides -T. Use in conjunction with -Q</pre> * * <pre> -J &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking. * Use in conjuction with -Q</pre> * * <pre> -Z * Verbose output for monitoring the search.</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.GainRatioAttributeEval: * </pre> * * <pre> -M * treat missing values as a seperate value.</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 1.26 $ */ public class RaceSearch extends ASSearch implements RankedOutputSearch, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 4015453851212985720L; /** the training instances */ private Instances m_Instances = null; /** search types */ private static final int FORWARD_RACE = 0; private static final int BACKWARD_RACE = 1; private static final int SCHEMATA_RACE = 2; private static final int RANK_RACE = 3; public static final Tag [] TAGS_SELECTION = { new Tag(FORWARD_RACE, "Forward selection race"), new Tag(BACKWARD_RACE, "Backward elimination race"), new Tag(SCHEMATA_RACE, "Schemata race"), new Tag(RANK_RACE, "Rank race") }; /** the selected search type */ private int m_raceType = FORWARD_RACE; /** xval types */ private static final int TEN_FOLD = 0; private static final int LEAVE_ONE_OUT = 1; public static final Tag [] XVALTAGS_SELECTION = { new Tag(TEN_FOLD, "10 Fold"), new Tag(LEAVE_ONE_OUT, "Leave-one-out"), }; /** the selected xval type */ private int m_xvalType = TEN_FOLD; /** the class index */ private int m_classIndex; /** the number of attributes in the data */ private int m_numAttribs; /** the total number of partially/fully evaluated subsets */ private int m_totalEvals; /** holds the merit of the best subset found */ private double m_bestMerit = -Double.MAX_VALUE; /** the subset evaluator to use */ private HoldOutSubsetEvaluator m_theEvaluator = null; /** the significance level for comparisons */ private double m_sigLevel = 0.001; /** threshold for comparisons */ private double m_delta = 0.001; /** the number of samples above which to begin testing for similarity between competing subsets */ private int m_samples = 20; /** number of cross validation folds---equal to the number of instances for leave-one-out cv */ private int m_numFolds = 10; /** the attribute evaluator to generate the initial ranking when doing a rank race */ private ASEvaluation m_ASEval = new GainRatioAttributeEval(); /** will hold the attribute ranking produced by the above attribute evaluator if doing a rank search */ private int [] m_Ranking; /** verbose output for monitoring the search and debugging */ private boolean m_debug = false; /** If true then produce a ranked list of attributes by fully traversing a forward hillclimb race */ private boolean m_rankingRequested = false; /** The ranked list of attributes produced if m_rankingRequested is true */ private double [][] m_rankedAtts; /** The number of attributes ranked so far (if ranking is requested) */ private int m_rankedSoFar; /** The number of attributes to retain if a ranking is requested. -1 indicates that all attributes are to be retained. Has precedence over m_threshold */ private int m_numToSelect = -1; private int m_calculatedNumToSelect = -1; /** the threshold for removing attributes if ranking is requested */ private double m_threshold = -Double.MAX_VALUE; /** * Returns a string describing this search method * @return a description of the search method suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Races the cross validation error of competing " +"attribute subsets. Use in conjuction with a ClassifierSubsetEval. " +"RaceSearch has four modes:\n\nforward selection " +"races all single attribute additions to a base set (initially " +" no attributes), selects the winner to become the new base set " +"and then iterates until there is no improvement over the base set. " +"\n\nBackward elimination is similar but the initial base set has all " +"attributes included and races all single attribute deletions. " +"\n\nSchemata search is a bit different. Each iteration a series of " +"races are run in parallel. Each race in a set determines whether " +"a particular attribute should be included or not---ie the race is " +"between the attribute being \"in\" or \"out\". The other attributes " +"for this race are included or excluded randomly at each point in the " +"evaluation. As soon as one race " +"has a clear winner (ie it has been decided whether a particular " +"attribute should be inor not) then the next set of races begins, " +"using the result of the winning race from the previous iteration as " +"new base set.\n\nRank race first ranks the attributes using an " +"attribute evaluator and then races the ranking. The race includes " +"no attributes, the top ranked attribute, the top two attributes, the " +"top three attributes, etc.\n\nIt is also possible to generate a " +"raked list of attributes through the forward racing process. " +"If generateRanking is set to true then a complete forward race will " +"be run---that is, racing continues until all attributes have been " +"selected. The order that they are added in determines a complete " +"ranking of all the attributes.\n\nRacing uses paired and unpaired " +"t-tests on cross-validation errors of competing subsets. When there " +"is a significant difference between the means of the errors of two " +"competing subsets then the poorer of the two can be eliminated from " +"the race. Similarly, if there is no significant difference between " +"the mean errors of two competing subsets and they are within some " +"threshold of each other, then one can be eliminated from the race.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Andrew W. Moore and Mary S. Lee"); result.setValue(Field.TITLE, "Efficient Algorithms for Minimizing Cross Validation Error"); result.setValue(Field.BOOKTITLE, "Eleventh International Conference on Machine Learning"); result.setValue(Field.YEAR, "1994"); result.setValue(Field.PAGES, "190-198"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); return result; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String raceTypeTipText() { return "Set the type of search."; } /** * Set the race type * * @param d the type of race */ public void setRaceType (SelectedTag d) { if (d.getTags() == TAGS_SELECTION) { m_raceType = d.getSelectedTag().getID(); } if (m_raceType == SCHEMATA_RACE && !m_rankingRequested) { try { setFoldsType(new SelectedTag(LEAVE_ONE_OUT, XVALTAGS_SELECTION)); setSignificanceLevel(0.01); } catch (Exception ex) { } } else { try { setFoldsType(new SelectedTag(TEN_FOLD, XVALTAGS_SELECTION)); setSignificanceLevel(0.001); } catch (Exception ex) { } } } /** * Get the race type * * @return the type of race */ public SelectedTag getRaceType() { return new SelectedTag(m_raceType, TAGS_SELECTION); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String significanceLevelTipText() { return "Set the significance level to use for t-test comparisons."; } /** * Sets the significance level to use * @param sig the significance level */ public void setSignificanceLevel(double sig) { m_sigLevel = sig; } /** * Get the significance level * @return the current significance level */ public double getSignificanceLevel() { return m_sigLevel; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String thresholdTipText() { return "Set the error threshold by which to consider two subsets " +"equivalent."; } /** * Sets the threshold for comparisons * @param t the threshold to use */ public void setThreshold(double t) { m_delta = t; } /** * Get the threshold * @return the current threshold */ public double getThreshold() { return m_delta; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String foldsTypeTipText() { return "Set the number of folds to use for x-val error estimation; " +"leave-one-out is selected automatically for schemata search."; } /** * Set the xfold type * * @param d the type of xval */ public void setFoldsType (SelectedTag d) { if (d.getTags() == XVALTAGS_SELECTION) { m_xvalType = d.getSelectedTag().getID(); } } /** * Get the xfold type * * @return the type of xval */ public SelectedTag getFoldsType () { return new SelectedTag(m_xvalType, XVALTAGS_SELECTION); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Turn on verbose output for monitoring the search's progress."; } /** * Set whether verbose output should be generated. * @param d true if output is to be verbose. */ public void setDebug(boolean d) { m_debug = d; } /** * Get whether output is to be verbose * @return true if output will be verbose */ public boolean getDebug() { return m_debug; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String attributeEvaluatorTipText() { return "Attribute evaluator to use for generating an initial ranking. " +"Use in conjunction with a rank race"; } /** * Set the attribute evaluator to use for generating the ranking. * @param newEvaluator the attribute evaluator to use. */ public void setAttributeEvaluator(ASEvaluation newEvaluator) { m_ASEval = newEvaluator; } /** * Get the attribute evaluator used to generate the ranking. * @return the evaluator used to generate the ranking. */ public ASEvaluation getAttributeEvaluator() { return m_ASEval; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String generateRankingTipText() { return "Use the racing process to generate a ranked list of attributes. " +"Using this mode forces the race to be a forward type and then races " +"until all attributes have been added, thus giving a ranked list"; } /** * Records whether the user has requested a ranked list of attributes. * @param doRank true if ranking is requested */ public void setGenerateRanking(boolean doRank) { m_rankingRequested = doRank; if (m_rankingRequested) { try { setRaceType(new SelectedTag(FORWARD_RACE, TAGS_SELECTION)); } catch (Exception ex) { } } } /** * Gets whether ranking has been requested. This is used by the * AttributeSelection module to determine if rankedAttributes() * should be called. * @return true if ranking has been requested. */ public boolean getGenerateRanking() { return m_rankingRequested; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numToSelectTipText() { return "Specify the number of attributes to retain. Use in conjunction " +"with generateRanking. The default value " +"(-1) indicates that all attributes are to be retained. Use either " +"this option or a threshold to reduce the attribute set."; } /** * Specify the number of attributes to select from the ranked list * (if generating a ranking). -1 * indicates that all attributes are to be retained. * @param n the number of attributes to retain */ public void setNumToSelect(int n) { m_numToSelect = n; } /** * Gets the number of attributes to be retained. * @return the number of attributes to retain */ public int getNumToSelect() { return m_numToSelect; } /** * Gets the calculated number of attributes to retain. This is the * actual number of attributes to retain. This is the same as * getNumToSelect if the user specifies a number which is not less * than zero. Otherwise it should be the number of attributes in the * (potentially transformed) data. */ public int getCalculatedNumToSelect() { if (m_numToSelect >= 0) { m_calculatedNumToSelect = m_numToSelect; } return m_calculatedNumToSelect; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String selectionThresholdTipText() { return "Set threshold by which attributes can be discarded. Default value " + "results in no attributes being discarded. Use in conjunction with " + "generateRanking"; } /** * Set the threshold by which the AttributeSelection module can discard * attributes. * @param threshold the threshold. */ public void setSelectionThreshold(double threshold) { m_threshold = threshold; } /** * Returns the threshold so that the AttributeSelection module can * discard attributes from the ranking. */ public double getSelectionThreshold() { return m_threshold; } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(); newVector.addElement(new Option( "\tType of race to perform.\n" + "\t(default = 0).", "R", 1 ,"-R <0 = forward | 1 = backward race | 2 = schemata | 3 = rank>")); newVector.addElement(new Option( "\tSignificance level for comaparisons\n" + "\t(default = 0.001(forward/backward/rank)/0.01(schemata)).", "L",1,"-L <significance>")); newVector.addElement(new Option( "\tThreshold for error comparison.\n" + "\t(default = 0.001).", "T",1,"-T <threshold>")); newVector.addElement(new Option( "\tAttribute ranker to use if doing a \n" + "\trank search. Place any\n" + "\tevaluator options LAST on \n" + "\tthe command line following a \"--\".\n" + "\teg. -A weka.attributeSelection.GainRatioAttributeEval ... -- -M.\n" + "\t(default = GainRatioAttributeEval)", "A", 1, "-A <attribute evaluator>")); newVector.addElement(new Option( "\tFolds for cross validation\n" + "\t(default = 0 (1 if schemata race)", "F",1,"-F <0 = 10 fold | 1 = leave-one-out>")); newVector.addElement(new Option( "\tGenerate a ranked list of attributes.\n" +"\tForces the search to be forward\n" +"\tand races until all attributes have\n" +"\tselected, thus producing a ranking.", "Q",0,"-Q")); newVector.addElement(new Option( "\tSpecify number of attributes to retain from \n" + "\tthe ranking. Overides -T. Use in conjunction with -Q", "N", 1, "-N <num to select>")); newVector.addElement(new Option( "\tSpecify a theshold by which attributes\n" + "\tmay be discarded from the ranking.\n" +"\tUse in conjuction with -Q", "J",1, "-J <threshold>")); newVector.addElement(new Option( "\tVerbose output for monitoring the search.", "Z",0,"-Z")); if ((m_ASEval != null) && (m_ASEval instanceof OptionHandler)) { newVector.addElement(new Option( "", "", 0, "\nOptions specific to evaluator " + m_ASEval.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_ASEval).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -R &lt;0 = forward | 1 = backward race | 2 = schemata | 3 = rank&gt; * Type of race to perform. * (default = 0).</pre> * * <pre> -L &lt;significance&gt; * Significance level for comaparisons * (default = 0.001(forward/backward/rank)/0.01(schemata)).</pre> * * <pre> -T &lt;threshold&gt; * Threshold for error comparison. * (default = 0.001).</pre> * * <pre> -A &lt;attribute evaluator&gt; * Attribute ranker to use if doing a * rank search. Place any * evaluator options LAST on * the command line following a "--". * eg. -A weka.attributeSelection.GainRatioAttributeEval ... -- -M. * (default = GainRatioAttributeEval)</pre> * * <pre> -F &lt;0 = 10 fold | 1 = leave-one-out&gt; * Folds for cross validation * (default = 0 (1 if schemata race)</pre> * * <pre> -Q * Generate a ranked list of attributes. * Forces the search to be forward * and races until all attributes have * selected, thus producing a ranking.</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to retain from * the ranking. Overides -T. Use in conjunction with -Q</pre> * * <pre> -J &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking. * Use in conjuction with -Q</pre> * * <pre> -Z * Verbose output for monitoring the search.</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.GainRatioAttributeEval: * </pre> * * <pre> -M * treat missing values as a seperate value.</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('R', options); if (optionString.length() != 0) { setRaceType(new SelectedTag(Integer.parseInt(optionString), TAGS_SELECTION)); } optionString = Utils.getOption('F', options); if (optionString.length() != 0) { setFoldsType(new SelectedTag(Integer.parseInt(optionString), XVALTAGS_SELECTION)); } optionString = Utils.getOption('L', options); if (optionString.length() !=0) { setSignificanceLevel(Double.parseDouble(optionString)); } optionString = Utils.getOption('T', options); if (optionString.length() !=0) { setThreshold(Double.parseDouble(optionString)); } optionString = Utils.getOption('A', options); if (optionString.length() != 0) { setAttributeEvaluator(ASEvaluation.forName(optionString, Utils.partitionOptions(options))); } setGenerateRanking(Utils.getFlag('Q', options)); optionString = Utils.getOption('J', options); if (optionString.length() != 0) { setSelectionThreshold(Double.parseDouble(optionString)); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setNumToSelect(Integer.parseInt(optionString)); } setDebug(Utils.getFlag('Z', options)); } /** * Gets the current settings of BestFirst. * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { int current = 0; String[] evaluatorOptions = new String[0]; if ((m_ASEval != null) && (m_ASEval instanceof OptionHandler)) { evaluatorOptions = ((OptionHandler)m_ASEval).getOptions(); } String[] options = new String[17+evaluatorOptions.length]; options[current++] = "-R"; options[current++] = ""+m_raceType; options[current++] = "-L"; options[current++] = ""+getSignificanceLevel(); options[current++] = "-T"; options[current++] = ""+getThreshold(); options[current++] = "-F"; options[current++] = ""+m_xvalType; if (getGenerateRanking()) { options[current++] = "-Q"; } options[current++] = "-N"; options[current++] = ""+getNumToSelect(); options[current++] = "-J"; options[current++] = ""+getSelectionThreshold(); if (getDebug()) { options[current++] = "-Z"; } if (getAttributeEvaluator() != null) { options[current++] = "-A"; options[current++] = getAttributeEvaluator().getClass().getName(); options[current++] = "--"; System.arraycopy(evaluatorOptions, 0, options, current, evaluatorOptions.length); current += evaluatorOptions.length; } while (current < options.length) { options[current++] = ""; } return options; } /** * Searches the attribute subset space by racing cross validation * errors of competing subsets * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator! (RaceSearch)"); } if (ASEval instanceof UnsupervisedSubsetEvaluator) { throw new Exception("Can't use an unsupervised subset evaluator " +"(RaceSearch)."); } if (!(ASEval instanceof HoldOutSubsetEvaluator)) { throw new Exception("Must use a HoldOutSubsetEvaluator, eg. " +"weka.attributeSelection.ClassifierSubsetEval " +"(RaceSearch)"); } if (!(ASEval instanceof ErrorBasedMeritEvaluator)) { throw new Exception("Only error based subset evaluators can be used, " +"eg. weka.attributeSelection.ClassifierSubsetEval " +"(RaceSearch)"); } m_Instances = new Instances(data); m_Instances.deleteWithMissingClass(); if (m_Instances.numInstances() == 0) { throw new Exception("All train instances have missing class! (RaceSearch)"); } if (m_rankingRequested && m_numToSelect > m_Instances.numAttributes()-1) { throw new Exception("More attributes requested than exist in the data " +"(RaceSearch)."); } m_theEvaluator = (HoldOutSubsetEvaluator)ASEval; m_numAttribs = m_Instances.numAttributes(); m_classIndex = m_Instances.classIndex(); if (m_rankingRequested) { m_rankedAtts = new double[m_numAttribs-1][2]; m_rankedSoFar = 0; } if (m_xvalType == LEAVE_ONE_OUT) { m_numFolds = m_Instances.numInstances(); } else { m_numFolds = 10; } Random random = new Random(1); // I guess this should really be a parameter? m_Instances.randomize(random); int [] bestSubset=null; switch (m_raceType) { case FORWARD_RACE: case BACKWARD_RACE: bestSubset = hillclimbRace(m_Instances, random); break; case SCHEMATA_RACE: bestSubset = schemataRace(m_Instances, random); break; case RANK_RACE: bestSubset = rankRace(m_Instances, random); break; } return bestSubset; } public double [][] rankedAttributes() throws Exception { if (!m_rankingRequested) { throw new Exception("Need to request a ranked list of attributes " +"before attributes can be ranked (RaceSearch)."); } if (m_rankedAtts == null) { throw new Exception("Search must be performed before attributes " +"can be ranked (RaceSearch)."); } double [][] final_rank = new double [m_rankedSoFar][2]; for (int i=0;i<m_rankedSoFar;i++) { final_rank[i][0] = m_rankedAtts[i][0]; final_rank[i][1] = m_rankedAtts[i][1]; } if (m_numToSelect <= 0) { if (m_threshold == -Double.MAX_VALUE) { m_calculatedNumToSelect = final_rank.length; } else { determineNumToSelectFromThreshold(final_rank); } } return final_rank; } private void determineNumToSelectFromThreshold(double [][] ranking) { int count = 0; for (int i = 0; i < ranking.length; i++) { if (ranking[i][1] > m_threshold) { count++; } } m_calculatedNumToSelect = count; } /** * Print an attribute set. */ private String printSets(char [][]raceSets) { StringBuffer temp = new StringBuffer(); for (int i=0;i<raceSets.length;i++) { for (int j=0;j<m_numAttribs;j++) { temp.append(raceSets[i][j]); } temp.append('\n'); } return temp.toString(); } /** * Performs a schemata race---a series of races in parallel. * @param data the instances to estimate accuracy over. * @param random a random number generator * @return an array of selected attribute indices. */ private int [] schemataRace(Instances data, Random random) throws Exception { // # races, 2 (competitors in each race), # attributes char [][][] parallelRaces; int numRaces = m_numAttribs-1; Random r = new Random(42); int numInstances = data.numInstances(); Instances trainCV; Instances testCV; Instance testInstance; // statistics on the racers Stats [][] raceStats = new Stats[numRaces][2]; parallelRaces = new char [numRaces][2][m_numAttribs-1]; char [] base = new char [m_numAttribs]; for (int i=0;i<m_numAttribs;i++) { base[i] = '*'; } int count=0; // set up initial races for (int i=0;i<m_numAttribs;i++) { if (i != m_classIndex) { parallelRaces[count][0] = (char [])base.clone(); parallelRaces[count][1] = (char [])base.clone(); parallelRaces[count][0][i] = '1'; parallelRaces[count++][1][i] = '0'; } } if (m_debug) { System.err.println("Initial sets:\n"); for (int i=0;i<numRaces;i++) { System.err.print(printSets(parallelRaces[i])+"--------------\n"); } } BitSet randomB = new BitSet(m_numAttribs); char [] randomBC = new char [m_numAttribs]; // notes which bit positions have been decided boolean [] attributeConstraints = new boolean[m_numAttribs]; double error; int evaluationCount = 0; raceSet: while (numRaces > 0) { boolean won = false; for (int i=0;i<numRaces;i++) { raceStats[i][0] = new Stats(); raceStats[i][1] = new Stats(); } // keep an eye on how many test instances have been randomly sampled int sampleCount = 0; // run the current set of races while (!won) { // generate a random binary string for (int i=0;i<m_numAttribs;i++) { if (i != m_classIndex) { if (!attributeConstraints[i]) { if (r.nextDouble() < 0.5) { randomB.set(i); } else { randomB.clear(i); } } else { // this position has been decided from previous races if (base[i] == '1') { randomB.set(i); } else { randomB.clear(i); } } } } // randomly select an instance to test on int testIndex = Math.abs(r.nextInt() % numInstances); // We want to randomize the data the same way for every // learning scheme. trainCV = data.trainCV(numInstances, testIndex, new Random (1)); testCV = data.testCV(numInstances, testIndex); testInstance = testCV.instance(0); sampleCount++; /* if (sampleCount > numInstances) { throw new Exception("raceSchemata: No clear winner after sampling " +sampleCount+" instances."); } */ m_theEvaluator.buildEvaluator(trainCV); // the evaluator must retrain for every test point error = -((HoldOutSubsetEvaluator)m_theEvaluator). evaluateSubset(randomB, testInstance, true); evaluationCount++; // see which racers match this random subset for (int i=0;i<m_numAttribs;i++) { if (randomB.get(i)) { randomBC[i] = '1'; } else { randomBC[i] = '0'; } } // System.err.println("Random subset: "+(new String(randomBC))); checkRaces: for (int i=0;i<numRaces;i++) { // if a pair of racers has evaluated more than num instances // then bail out---unlikely that having any more atts is any // better than the current base set. if (((raceStats[i][0].count + raceStats[i][1].count) / 2) > (numInstances)) { break raceSet; } for (int j=0;j<2;j++) { boolean matched = true; for (int k =0;k<m_numAttribs;k++) { if (parallelRaces[i][j][k] != '*') { if (parallelRaces[i][j][k] != randomBC[k]) { matched = false; break; } } } if (matched) { // update the stats for this racer // System.err.println("Matched "+i+" "+j); raceStats[i][j].add(error); // does this race have a clear winner, meaning we can // terminate the whole set of parallel races? if (raceStats[i][0].count > m_samples && raceStats[i][1].count > m_samples) { raceStats[i][0].calculateDerived(); raceStats[i][1].calculateDerived(); // System.err.println(j+" : "+(new String(parallelRaces[i][j]))); // System.err.println(raceStats[i][0]); // System.err.println(raceStats[i][1]); // check the ttest double prob = ttest(raceStats[i][0], raceStats[i][1]); // System.err.println("Prob :"+prob); if (prob < m_sigLevel) { // stop the races we have a winner! if (raceStats[i][0].mean < raceStats[i][1].mean) { base = (char [])parallelRaces[i][0].clone(); m_bestMerit = raceStats[i][0].mean; if (m_debug) { System.err.println("contender 0 won "); } } else { base = (char [])parallelRaces[i][1].clone(); m_bestMerit = raceStats[i][1].mean; if (m_debug) { System.err.println("contender 1 won"); } } if (m_debug) { System.err.println((new String(parallelRaces[i][0])) +" "+(new String(parallelRaces[i][1]))); System.err.println("Means : "+raceStats[i][0].mean +" vs"+raceStats[i][1].mean); System.err.println("Evaluations so far : " +evaluationCount); } won = true; break checkRaces; } } } } } } numRaces--; // set up the next set of races if necessary if (numRaces > 0 && won) { parallelRaces = new char [numRaces][2][m_numAttribs-1]; raceStats = new Stats[numRaces][2]; // update the attribute constraints for (int i=0;i<m_numAttribs;i++) { if (i != m_classIndex && !attributeConstraints[i] && base[i] != '*') { attributeConstraints[i] = true; break; } } count=0; for (int i=0;i<numRaces;i++) { parallelRaces[i][0] = (char [])base.clone(); parallelRaces[i][1] = (char [])base.clone(); for (int j=count;j<m_numAttribs;j++) { if (j != m_classIndex && parallelRaces[i][0][j] == '*') { parallelRaces[i][0][j] = '1'; parallelRaces[i][1][j] = '0'; count = j+1; break; } } } if (m_debug) { System.err.println("Next sets:\n"); for (int i=0;i<numRaces;i++) { System.err.print(printSets(parallelRaces[i])+"--------------\n"); } } } } if (m_debug) { System.err.println("Total evaluations : " +evaluationCount); } return attributeList(base); } /** * t-test for unequal sample sizes and same variance. Returns probability * that observed difference in means is due to chance. */ private double ttest(Stats c1, Stats c2) throws Exception { double n1 = c1.count; double n2 = c2.count; double v1 = c1.stdDev * c1.stdDev; double v2 = c2.stdDev * c2.stdDev; double av1 = c1.mean; double av2 = c2.mean; double df = n1 + n2 - 2; double cv = (((n1 - 1) * v1) + ((n2 - 1) * v2)) /df; double t = (av1 - av2) / Math.sqrt(cv * ((1.0 / n1) + (1.0 / n2))); return Statistics.incompleteBeta(df / 2.0, 0.5, df / (df + (t * t))); } /** * Performs a rank race---race consisting of no attributes, the top * ranked attribute, the top two attributes etc. The initial ranking * is determined by an attribute evaluator. * @param data the instances to estimate accuracy over * @param random a random number generator * @return an array of selected attribute indices. */ private int [] rankRace(Instances data, Random random) throws Exception { char [] baseSet = new char [m_numAttribs]; char [] bestSet; double bestSetError; for (int i=0;i<m_numAttribs;i++) { if (i == m_classIndex) { baseSet[i] = '-'; } else { baseSet[i] = '0'; } } int numCompetitors = m_numAttribs-1; char [][] raceSets = new char [numCompetitors+1][m_numAttribs]; if (m_ASEval instanceof AttributeEvaluator) { // generate the attribute ranking first Ranker ranker = new Ranker(); m_ASEval.buildEvaluator(data); m_Ranking = ranker.search(m_ASEval,data); } else { GreedyStepwise fs = new GreedyStepwise(); double [][]rankres; fs.setGenerateRanking(true); ((ASEvaluation)m_ASEval).buildEvaluator(data); fs.search(m_ASEval, data); rankres = fs.rankedAttributes(); m_Ranking = new int[rankres.length]; for (int i=0;i<rankres.length;i++) { m_Ranking[i] = (int)rankres[i][0]; } } // set up the race raceSets[0] = (char [])baseSet.clone(); for (int i=0;i<m_Ranking.length;i++) { raceSets[i+1] = (char [])raceSets[i].clone(); raceSets[i+1][m_Ranking[i]] = '1'; } if (m_debug) { System.err.println("Initial sets:\n"+printSets(raceSets)); } // run the race double [] winnerInfo = raceSubsets(raceSets, data, true, random); bestSetError = winnerInfo[1]; bestSet = (char [])raceSets[(int)winnerInfo[0]].clone(); m_bestMerit = bestSetError; return attributeList(bestSet); } /** * Performs a hill climbing race---all single attribute changes to a * base subset are raced in parallel. The winner is chosen and becomes * the new base subset and the process is repeated until there is no * improvement in error over the base subset. * @param data the instances to estimate accuracy over * @param random a random number generator * @return an array of selected attribute indices. * @throws Exception if something goes wrong */ private int [] hillclimbRace(Instances data, Random random) throws Exception { double baseSetError; char [] baseSet = new char [m_numAttribs]; for (int i=0;i<m_numAttribs;i++) { if (i != m_classIndex) { if (m_raceType == FORWARD_RACE) { baseSet[i] = '0'; } else { baseSet[i] = '1'; } } else { baseSet[i] = '-'; } } int numCompetitors = m_numAttribs-1; char [][] raceSets = new char [numCompetitors+1][m_numAttribs]; raceSets[0] = (char [])baseSet.clone(); int count = 1; // initialize each race set to 1 attribute for (int i=0;i<m_numAttribs;i++) { if (i != m_classIndex) { raceSets[count] = (char [])baseSet.clone(); if (m_raceType == BACKWARD_RACE) { raceSets[count++][i] = '0'; } else { raceSets[count++][i] = '1'; } } } if (m_debug) { System.err.println("Initial sets:\n"+printSets(raceSets)); } // race the initial sets (base set either no or all features) double [] winnerInfo = raceSubsets(raceSets, data, true, random); baseSetError = winnerInfo[1]; m_bestMerit = baseSetError; baseSet = (char [])raceSets[(int)winnerInfo[0]].clone(); if (m_rankingRequested) { m_rankedAtts[m_rankedSoFar][0] = (int)(winnerInfo[0]-1); m_rankedAtts[m_rankedSoFar][1] = winnerInfo[1]; m_rankedSoFar++; } boolean improved = true; int j; // now race until there is no improvement over the base set or only // one competitor remains while (improved) { // generate the next set of competitors numCompetitors--; if (numCompetitors == 0) { //race finished! break; } j=0; // +1. we'll race against the base set---might be able to bail out // of the race if none from the new set are statistically better // than the base set. Base set is stored in loc 0. raceSets = new char [numCompetitors+1][m_numAttribs]; for (int i=0;i<numCompetitors+1;i++) { raceSets[i] = (char [])baseSet.clone(); if (i > 0) { for (int k=j;k<m_numAttribs;k++) { if (m_raceType == 1) { if (k != m_classIndex && raceSets[i][k] != '0') { raceSets[i][k] = '0'; j = k+1; break; } } else { if (k != m_classIndex && raceSets[i][k] != '1') { raceSets[i][k] = '1'; j = k+1; break; } } } } } if (m_debug) { System.err.println("Next set : \n"+printSets(raceSets)); } improved = false; winnerInfo = raceSubsets(raceSets, data, true, random); String bs = new String(baseSet); String win = new String(raceSets[(int)winnerInfo[0]]); if (bs.compareTo(win) == 0) { // race finished } else { if (winnerInfo[1] < baseSetError || m_rankingRequested) { improved = true; baseSetError = winnerInfo[1]; m_bestMerit = baseSetError; // find which att is different if (m_rankingRequested) { for (int i = 0; i < baseSet.length; i++) { if (win.charAt(i) != bs.charAt(i)) { m_rankedAtts[m_rankedSoFar][0] = i; m_rankedAtts[m_rankedSoFar][1] = winnerInfo[1]; m_rankedSoFar++; } } } baseSet = (char [])raceSets[(int)winnerInfo[0]].clone(); } else { // Will get here for a subset whose error is outside the delta // threshold but is not *significantly* worse than the base // subset //throw new Exception("RaceSearch: problem in hillClimbRace"); } } } return attributeList(baseSet); } /** * Convert an attribute set to an array of indices */ private int [] attributeList(char [] list) { int count = 0; for (int i=0;i<m_numAttribs;i++) { if (list[i] == '1') { count++; } } int [] rlist = new int[count]; count = 0; for (int i=0;i<m_numAttribs;i++) { if (list[i] == '1') { rlist[count++] = i; } } return rlist; } /** * Races the leave-one-out cross validation errors of a set of * attribute subsets on a set of instances. * @param raceSets a set of attribute subset specifications * @param data the instances to use when cross validating * @param baseSetIncluded true if the first attribute set is a * base set generated from the previous race * @param random a random number generator * @return the index of the winning subset * @throws Exception if an error occurs during cross validation */ private double [] raceSubsets(char [][]raceSets, Instances data, boolean baseSetIncluded, Random random) throws Exception { // the evaluators --- one for each subset ASEvaluation [] evaluators = ASEvaluation.makeCopies(m_theEvaluator, raceSets.length); // array of subsets eliminated from the race boolean [] eliminated = new boolean [raceSets.length]; // individual statistics Stats [] individualStats = new Stats [raceSets.length]; // pairwise statistics PairedStats [][] testers = new PairedStats[raceSets.length][raceSets.length]; /** do we ignore the base set or not? */ int startPt = m_rankingRequested ? 1 : 0; for (int i=0;i<raceSets.length;i++) { individualStats[i] = new Stats(); for (int j=i+1;j<raceSets.length;j++) { testers[i][j] = new PairedStats(m_sigLevel); } } BitSet [] raceBitSets = new BitSet[raceSets.length]; for (int i=0;i<raceSets.length;i++) { raceBitSets[i] = new BitSet(m_numAttribs); for (int j=0;j<m_numAttribs;j++) { if (raceSets[i][j] == '1') { raceBitSets[i].set(j); } } } // now loop over the data points collecting leave-one-out errors for // each attribute set Instances trainCV; Instances testCV; Instance testInst; double [] errors = new double [raceSets.length]; int eliminatedCount = 0; int processedCount = 0; // if there is one set left in the race then we need to continue to // evaluate it for the remaining instances in order to get an // accurate error estimate processedCount = 0; race: for (int i=0;i<m_numFolds;i++) { // We want to randomize the data the same way for every // learning scheme. trainCV = data.trainCV(m_numFolds, i, new Random (1)); testCV = data.testCV(m_numFolds, i); // loop over the surviving attribute sets building classifiers for this // training set for (int j=startPt;j<raceSets.length;j++) { if (!eliminated[j]) { evaluators[j].buildEvaluator(trainCV); } } for (int z=0;z<testCV.numInstances();z++) { testInst = testCV.instance(z); processedCount++; // loop over surviving attribute sets computing errors for this // test point for (int zz=startPt;zz<raceSets.length;zz++) { if (!eliminated[zz]) { if (z == 0) {// first test instance---make sure classifier is built errors[zz] = -((HoldOutSubsetEvaluator)evaluators[zz]). evaluateSubset(raceBitSets[zz], testInst, true); } else { // must be k fold rather than leave one out errors[zz] = -((HoldOutSubsetEvaluator)evaluators[zz]). evaluateSubset(raceBitSets[zz], testInst, false); } } } // now update the stats for (int j=startPt;j<raceSets.length;j++) { if (!eliminated[j]) { individualStats[j].add(errors[j]); for (int k=j+1;k<raceSets.length;k++) { if (!eliminated[k]) { testers[j][k].add(errors[j], errors[k]); } } } } // test for near identical models and models that are significantly // worse than some other model if (processedCount > m_samples-1 && (eliminatedCount < raceSets.length-1)) { for (int j=0;j<raceSets.length;j++) { if (!eliminated[j]) { for (int k=j+1;k<raceSets.length;k++) { if (!eliminated[k]) { testers[j][k].calculateDerived(); // near identical ? if ((testers[j][k].differencesSignificance == 0) && (Utils.eq(testers[j][k].differencesStats.mean, 0.0) || (Utils.gr(m_delta, Math.abs(testers[j][k]. differencesStats.mean))))) { // if they're exactly the same and there is a base set // in this race, make sure that the base set is NOT the // one eliminated. if (Utils.eq(testers[j][k].differencesStats.mean, 0.0)) { if (baseSetIncluded) { if (j != 0) { eliminated[j] = true; } else { eliminated[k] = true; } eliminatedCount++; } else { eliminated[j] = true; } if (m_debug) { System.err.println("Eliminating (identical) " +j+" "+raceBitSets[j].toString() +" vs "+k+" " +raceBitSets[k].toString() +" after " +processedCount +" evaluations\n" +"\nerror "+j+" : " +testers[j][k].xStats.mean +" vs "+k+" : " +testers[j][k].yStats.mean +" diff : " +testers[j][k].differencesStats .mean); } } else { // eliminate the one with the higer error if (testers[j][k].xStats.mean > testers[j][k].yStats.mean) { eliminated[j] = true; eliminatedCount++; if (m_debug) { System.err.println("Eliminating (near identical) " +j+" "+raceBitSets[j].toString() +" vs "+k+" " +raceBitSets[k].toString() +" after " +processedCount +" evaluations\n" +"\nerror "+j+" : " +testers[j][k].xStats.mean +" vs "+k+" : " +testers[j][k].yStats.mean +" diff : " +testers[j][k].differencesStats .mean); } break; } else { eliminated[k] = true; eliminatedCount++; if (m_debug) { System.err.println("Eliminating (near identical) " +k+" "+raceBitSets[k].toString() +" vs "+j+" " +raceBitSets[j].toString() +" after " +processedCount +" evaluations\n" +"\nerror "+k+" : " +testers[j][k].yStats.mean +" vs "+j+" : " +testers[j][k].xStats.mean +" diff : " +testers[j][k].differencesStats .mean); } } } } else { // significantly worse ? if (testers[j][k].differencesSignificance != 0) { if (testers[j][k].differencesSignificance > 0) { eliminated[j] = true; eliminatedCount++; if (m_debug) { System.err.println("Eliminating (-worse) " +j+" "+raceBitSets[j].toString() +" vs "+k+" " +raceBitSets[k].toString() +" after " +processedCount +" evaluations" +"\nerror "+j+" : " +testers[j][k].xStats.mean +" vs "+k+" : " +testers[j][k].yStats.mean); } break; } else { eliminated[k] = true; eliminatedCount++; if (m_debug) { System.err.println("Eliminating (worse) " +k+" "+raceBitSets[k].toString() +" vs "+j+" " +raceBitSets[j].toString() +" after " +processedCount +" evaluations" +"\nerror "+k+" : " +testers[j][k].yStats.mean +" vs "+j+" : " +testers[j][k].xStats.mean); } } } } } } } } } // if there is a base set from the previous race and it's the // only remaining subset then terminate the race. if (eliminatedCount == raceSets.length-1 && baseSetIncluded && !eliminated[0] && !m_rankingRequested) { break race; } } } if (m_debug) { System.err.println("*****eliminated count: "+eliminatedCount); } double bestError = Double.MAX_VALUE; int bestIndex=0; // return the index of the winner for (int i=startPt;i<raceSets.length;i++) { if (!eliminated[i]) { individualStats[i].calculateDerived(); if (m_debug) { System.err.println("Remaining error: "+raceBitSets[i].toString() +" "+individualStats[i].mean); } if (individualStats[i].mean < bestError) { bestError = individualStats[i].mean; bestIndex = i; } } } double [] retInfo = new double[2]; retInfo[0] = bestIndex; retInfo[1] = bestError; if (m_debug) { System.err.print("Best set from race : "); for (int i=0;i<m_numAttribs;i++) { if (raceSets[bestIndex][i] == '1') { System.err.print('1'); } else { System.err.print('0'); } } System.err.println(" :"+bestError+" Processed : "+(processedCount) +"\n"+individualStats[bestIndex].toString()); } return retInfo; } /** * Returns a string represenation * * @return a string representation */ public String toString() { StringBuffer text = new StringBuffer(); text.append("\tRaceSearch.\n\tRace type : "); switch (m_raceType) { case FORWARD_RACE: text.append("forward selection race\n\tBase set : no attributes"); break; case BACKWARD_RACE: text.append("backward elimination race\n\tBase set : all attributes"); break; case SCHEMATA_RACE: text.append("schemata race\n\tBase set : no attributes"); break; case RANK_RACE: text.append("rank race\n\tBase set : no attributes\n\t"); text.append("Attribute evaluator : " + getAttributeEvaluator().getClass().getName() +" "); if (m_ASEval instanceof OptionHandler) { String[] evaluatorOptions = new String[0]; evaluatorOptions = ((OptionHandler)m_ASEval).getOptions(); for (int i=0;i<evaluatorOptions.length;i++) { text.append(evaluatorOptions[i]+' '); } } text.append("\n"); text.append("\tAttribute ranking : \n"); int rlength = (int)(Math.log(m_Ranking.length) / Math.log(10) + 1); for (int i=0;i<m_Ranking.length;i++) { text.append("\t "+Utils.doubleToString((double)(m_Ranking[i]+1), rlength,0) +" "+m_Instances.attribute(m_Ranking[i]).name()+'\n'); } break; } text.append("\n\tCross validation mode : "); if (m_xvalType == TEN_FOLD) { text.append("10 fold"); } else { text.append("Leave-one-out"); } text.append("\n\tMerit of best subset found : "); int fieldwidth = 3; double precision = (m_bestMerit - (int)m_bestMerit); if (Math.abs(m_bestMerit) > 0) { fieldwidth = (int)Math.abs((Math.log(Math.abs(m_bestMerit)) / Math.log(10)))+2; } if (Math.abs(precision) > 0) { precision = Math.abs((Math.log(Math.abs(precision)) / Math.log(10)))+3; } else { precision = 2; } text.append(Utils.doubleToString(Math.abs(m_bestMerit), fieldwidth+(int)precision, (int)precision)+"\n"); return text.toString(); } /** * Reset the search method. */ protected void resetOptions () { m_sigLevel = 0.001; m_delta = 0.001; m_ASEval = new GainRatioAttributeEval(); m_Ranking = null; m_raceType = FORWARD_RACE; m_debug = false; m_theEvaluator = null; m_bestMerit = -Double.MAX_VALUE; m_numFolds = 10; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.26 $"); } }
62,490
34.107303
577
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/RandomSearch.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RandomSearch.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import java.util.BitSet; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * RandomSearch : <br/> * <br/> * Performs a Random search in the space of attribute subsets. If no start set is supplied, Random search starts from a random point and reports the best subset found. If a start set is supplied, Random searches randomly for subsets that are as good or better than the start point with the same or or fewer attributes. Using RandomSearch in conjunction with a start set containing all attributes equates to the LVF algorithm of Liu and Setiono (ICML-96).<br/> * <br/> * For more information see:<br/> * <br/> * H. Liu, R. Setiono: A probabilistic approach to feature selection - A filter solution. In: 13th International Conference on Machine Learning, 319-327, 1996. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Liu1996, * author = {H. Liu and R. Setiono}, * booktitle = {13th International Conference on Machine Learning}, * pages = {319-327}, * title = {A probabilistic approach to feature selection - A filter solution}, * year = {1996} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * If a start point is supplied, * random search evaluates the start * point and then randomly looks for * subsets that are as good as or better * than the start point with the same * or lower cardinality.</pre> * * <pre> -F &lt;percent&gt; * Percent of search space to consider. * (default = 25%).</pre> * * <pre> -V * Output subsets as the search progresses. * (default = false).</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 1.18 $ */ public class RandomSearch extends ASSearch implements StartSetHandler, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 7479392617377425484L; /** * holds a starting set as an array of attributes. */ private int[] m_starting; /** holds the start set as a range */ private Range m_startRange; /** the best feature set found during the search */ private BitSet m_bestGroup; /** the merit of the best subset found */ private double m_bestMerit; /** * only accept a feature set as being "better" than the best if its * merit is better or equal to the best, and it contains fewer * features than the best (this allows LVF to be implimented). */ private boolean m_onlyConsiderBetterAndSmaller; /** does the data have a class */ private boolean m_hasClass; /** holds the class index */ private int m_classIndex; /** number of attributes in the data */ private int m_numAttribs; /** seed for random number generation */ private int m_seed; /** percentage of the search space to consider */ private double m_searchSize; /** the number of iterations performed */ private int m_iterations; /** random number object */ private Random m_random; /** output new best subsets as the search progresses */ private boolean m_verbose; /** * Returns a string describing this search method * @return a description of the search suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "RandomSearch : \n\nPerforms a Random search in " +"the space of attribute subsets. If no start set is supplied, Random " +"search starts from a random point and reports the best subset found. " +"If a start set is supplied, Random searches randomly for subsets " +"that are as good or better than the start point with the same or " +"or fewer attributes. Using RandomSearch in conjunction with a start " +"set containing all attributes equates to the LVF algorithm of Liu " +"and Setiono (ICML-96).\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "H. Liu and R. Setiono"); result.setValue(Field.TITLE, "A probabilistic approach to feature selection - A filter solution"); result.setValue(Field.BOOKTITLE, "13th International Conference on Machine Learning"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.PAGES, "319-327"); return result; } /** * Constructor */ public RandomSearch () { resetOptions(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(3); newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7." +"\n\tIf a start point is supplied," +"\n\trandom search evaluates the start" +"\n\tpoint and then randomly looks for" +"\n\tsubsets that are as good as or better" +"\n\tthan the start point with the same" +"\n\tor lower cardinality." ,"P",1 , "-P <start set>")); newVector.addElement(new Option("\tPercent of search space to consider." +"\n\t(default = 25%)." , "F", 1 , "-F <percent> ")); newVector.addElement(new Option("\tOutput subsets as the search progresses." +"\n\t(default = false)." , "V", 0 , "-V")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * If a start point is supplied, * random search evaluates the start * point and then randomly looks for * subsets that are as good as or better * than the start point with the same * or lower cardinality.</pre> * * <pre> -F &lt;percent&gt; * Percent of search space to consider. * (default = 25%).</pre> * * <pre> -V * Output subsets as the search progresses. * (default = false).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported * **/ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setStartSet(optionString); } optionString = Utils.getOption('F',options); if (optionString.length() != 0) { setSearchPercent((new Double(optionString)).doubleValue()); } setVerbose(Utils.getFlag('V',options)); } /** * Gets the current settings of RandomSearch. * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[5]; int current = 0; if (m_verbose) { options[current++] = "-V"; } if (!(getStartSet().equals(""))) { options[current++] = "-P"; options[current++] = "" + startSetToString(); } options[current++] = "-F"; options[current++] = "" + getSearchPercent(); while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String startSetTipText() { return "Set the start point for the search. This is specified as a comma " +"seperated list off attribute indexes starting at 1. It can include " +"ranges. Eg. 1,2,5-9,17. If specified, Random searches for subsets " +"of attributes that are as good as or better than the start set with " +"the same or lower cardinality."; } /** * Sets a starting set of attributes for the search. It is the * search method's responsibility to report this start set (if any) * in its toString() method. * @param startSet a string containing a list of attributes (and or ranges), * eg. 1,2,6,10-15. "" indicates no start point. * If a start point is supplied, random search evaluates the * start point and then looks for subsets that are as good as or better * than the start point with the same or lower cardinality. * @throws Exception if start set can't be set. */ public void setStartSet (String startSet) throws Exception { m_startRange.setRanges(startSet); } /** * Returns a list of attributes (and or attribute ranges) as a String * @return a list of attributes (and or attribute ranges) */ public String getStartSet () { return m_startRange.getRanges(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String verboseTipText() { return "Print progress information. Sends progress info to the terminal " +"as the search progresses."; } /** * set whether or not to output new best subsets as the search proceeds * @param v true if output is to be verbose */ public void setVerbose(boolean v) { m_verbose = v; } /** * get whether or not output is verbose * @return true if output is set to verbose */ public boolean getVerbose() { return m_verbose; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String searchPercentTipText() { return "Percentage of the search space to explore."; } /** * set the percentage of the search space to consider * @param p percent of the search space ( 0 < p <= 100) */ public void setSearchPercent(double p) { p = Math.abs(p); if (p == 0) { p = 25; } if (p > 100.0) { p = 100; } m_searchSize = (p/100.0); } /** * get the percentage of the search space to consider * @return the percent of the search space explored */ public double getSearchPercent() { return m_searchSize * 100; } /** * converts the array of starting attributes to a string. This is * used by getOptions to return the actual attributes specified * as the starting set. This is better than using m_startRanges.getRanges() * as the same start set can be specified in different ways from the * command line---eg 1,2,3 == 1-3. This is to ensure that stuff that * is stored in a database is comparable. * @return a comma seperated list of individual attribute numbers as a String */ private String startSetToString() { StringBuffer FString = new StringBuffer(); boolean didPrint; if (m_starting == null) { return getStartSet(); } for (int i = 0; i < m_starting.length; i++) { didPrint = false; if ((m_hasClass == false) || (m_hasClass == true && i != m_classIndex)) { FString.append((m_starting[i] + 1)); didPrint = true; } if (i == (m_starting.length - 1)) { FString.append(""); } else { if (didPrint) { FString.append(","); } } } return FString.toString(); } /** * prints a description of the search * @return a description of the search as a string */ public String toString() { StringBuffer text = new StringBuffer(); text.append("\tRandom search.\n\tStart set: "); if (m_starting == null) { text.append("no attributes\n"); } else { text.append(startSetToString()+"\n"); } text.append("\tNumber of iterations: "+m_iterations+" (" +(m_searchSize * 100.0)+"% of the search space)\n"); text.append("\tMerit of best subset found: " +Utils.doubleToString(Math.abs(m_bestMerit),8,3)+"\n"); return text.toString(); } /** * Searches the attribute subset space randomly. * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { double best_merit; int sizeOfBest = m_numAttribs; BitSet temp; m_bestGroup = new BitSet(m_numAttribs); m_onlyConsiderBetterAndSmaller = false; if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } m_random = new Random(m_seed); if (ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; } else { m_hasClass = true; m_classIndex = data.classIndex(); } SubsetEvaluator ASEvaluator = (SubsetEvaluator)ASEval; m_numAttribs = data.numAttributes(); m_startRange.setUpper(m_numAttribs-1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } // If a starting subset has been supplied, then initialise the bitset if (m_starting != null) { for (int i = 0; i < m_starting.length; i++) { if ((m_starting[i]) != m_classIndex) { m_bestGroup.set(m_starting[i]); } } m_onlyConsiderBetterAndSmaller = true; best_merit = ASEvaluator.evaluateSubset(m_bestGroup); sizeOfBest = countFeatures(m_bestGroup); } else { // do initial random subset m_bestGroup = generateRandomSubset(); best_merit = ASEvaluator.evaluateSubset(m_bestGroup); } if (m_verbose) { System.out.println("Initial subset (" +Utils.doubleToString(Math. abs(best_merit),8,5) +"): "+printSubset(m_bestGroup)); } int i; if (m_hasClass) { i = m_numAttribs -1; } else { i = m_numAttribs; } m_iterations = (int)((m_searchSize * Math.pow(2, i))); int tempSize; double tempMerit; // main loop for (i=0;i<m_iterations;i++) { temp = generateRandomSubset(); if (m_onlyConsiderBetterAndSmaller) { tempSize = countFeatures(temp); if (tempSize <= sizeOfBest) { tempMerit = ASEvaluator.evaluateSubset(temp); if (tempMerit >= best_merit) { sizeOfBest = tempSize; m_bestGroup = temp; best_merit = tempMerit; if (m_verbose) { System.out.print("New best subset (" +Utils.doubleToString(Math. abs(best_merit),8,5) +"): "+printSubset(m_bestGroup) + " :"); System.out.println(Utils. doubleToString((((double)i)/ ((double)m_iterations)* 100.0),5,1) +"% done"); } } } } else { tempMerit = ASEvaluator.evaluateSubset(temp); if (tempMerit > best_merit) { m_bestGroup = temp; best_merit = tempMerit; if (m_verbose) { System.out.print("New best subset (" +Utils.doubleToString(Math.abs(best_merit),8,5) +"): "+printSubset(m_bestGroup) + " :"); System.out.println(Utils. doubleToString((((double)i)/ ((double)m_iterations) *100.0),5,1) +"% done"); } } } } m_bestMerit = best_merit; return attributeList(m_bestGroup); } /** * prints a subset as a series of attribute numbers * @param temp the subset to print * @return a subset as a String of attribute numbers */ private String printSubset(BitSet temp) { StringBuffer text = new StringBuffer(); for (int j=0;j<m_numAttribs;j++) { if (temp.get(j)) { text.append((j+1)+" "); } } return text.toString(); } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ private int[] attributeList (BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * generates a random subset * @return a random subset as a BitSet */ private BitSet generateRandomSubset() { BitSet temp = new BitSet(m_numAttribs); double r; for (int i=0;i<m_numAttribs;i++) { r = m_random.nextDouble(); if (r <= 0.5) { if (m_hasClass && i == m_classIndex) { } else { temp.set(i); } } } return temp; } /** * counts the number of features in a subset * @param featureSet the feature set for which to count the features * @return the number of features in the subset */ private int countFeatures(BitSet featureSet) { int count = 0; for (int i=0;i<m_numAttribs;i++) { if (featureSet.get(i)) { count++; } } return count; } /** * resets to defaults */ private void resetOptions() { m_starting = null; m_startRange = new Range(); m_searchSize = 0.25; m_seed = 1; m_onlyConsiderBetterAndSmaller = false; m_verbose = false; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.18 $"); } }
19,191
27.687593
459
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/RankSearch.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RankSearch.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.BitSet; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * RankSearch : <br/> * <br/> * Uses an attribute/subset evaluator to rank all attributes. If a subset evaluator is specified, * then a forward selection search is used to generate a ranked list. From the * ranked list of attributes, subsets of increasing size are evaluated, ie. * The best attribute, the best attribute plus the next best attribute, etc.... * The best attribute set is reported. RankSearch is linear in the number of * attributes if a simple attribute evaluator is used such as GainRatioAttributeEval. * For more information see:<br/> * <br/> * Mark Hall, Geoffrey Holmes (2003). Benchmarking attribute selection techniques * for discrete class data mining. IEEE Transactions on Knowledge and Data Engineering. 15(6):1437-1447. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;attribute evaluator&gt; * class name of attribute evaluator to use for ranking. Place any * evaluator options LAST on the command line following a "--". * eg.: * -A weka.attributeSelection.GainRatioAttributeEval ... -- -M * (default: weka.attributeSelection.GainRatioAttributeEval)</pre> * * <pre> -S &lt;step size&gt; * number of attributes to be added from the * ranking in each iteration (default = 1).</pre> * * <pre> -R &lt;start point&gt; * point in the ranking to start evaluating from. * (default = 0, ie. the head of the ranking).</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.GainRatioAttributeEval: * </pre> * * <pre> -M * treat missing values as a seperate value.</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 6253 $ */ public class RankSearch extends ASSearch implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -7992268736874353755L; /** does the data have a class */ private boolean m_hasClass; /** holds the class index */ private int m_classIndex; /** number of attributes in the data */ private int m_numAttribs; /** the best subset found */ private BitSet m_best_group; /** the attribute evaluator to use for generating the ranking */ private ASEvaluation m_ASEval; /** the subset evaluator with which to evaluate the ranking */ private ASEvaluation m_SubsetEval; /** the training instances */ private Instances m_Instances; /** the merit of the best subset found */ private double m_bestMerit; /** will hold the attribute ranking */ private int [] m_Ranking; /** add this many attributes in each iteration from the ranking */ protected int m_add = 1; /** start from this point in the ranking */ protected int m_startPoint = 0; /** * Returns a string describing this search method * @return a description of the search method suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "RankSearch : \n\n" +"Uses an attribute/subset evaluator to rank all attributes. " +"If a subset evaluator is specified, then a forward selection " +"search is used to generate a ranked list. From the ranked " +"list of attributes, subsets of increasing size are evaluated, ie. " +"The best attribute, the best attribute plus the next best attribute, " +"etc.... The best attribute set is reported. RankSearch is linear in " +"the number of attributes if a simple attribute evaluator is used " +"such as GainRatioAttributeEval. For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Mark Hall and Geoffrey Holmes"); result.setValue(Field.YEAR, "2003"); result.setValue(Field.TITLE, "Benchmarking attribute selection techniques for " + "discrete class data mining"); result.setValue(Field.JOURNAL, "IEEE Transactions on Knowledge and Data Engineering"); result.setValue(Field.VOLUME, "15"); result.setValue(Field.NUMBER, "6"); result.setValue(Field.PAGES, "1437-1447"); result.setValue(Field.PUBLISHER, "IEEE Computer Society"); return result; } /** * Constructor */ public RankSearch () { resetOptions(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String attributeEvaluatorTipText() { return "Attribute evaluator to use for generating a ranking."; } /** * Set the attribute evaluator to use for generating the ranking. * @param newEvaluator the attribute evaluator to use. */ public void setAttributeEvaluator(ASEvaluation newEvaluator) { m_ASEval = newEvaluator; } /** * Get the attribute evaluator used to generate the ranking. * @return the evaluator used to generate the ranking. */ public ASEvaluation getAttributeEvaluator() { return m_ASEval; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String stepSizeTipText() { return "Add this many attributes from the ranking in each iteration."; } /** * Set the number of attributes to add from the rankining * in each iteration * @param ss the number of attribes to add. */ public void setStepSize(int ss) { if (ss > 0) { m_add = ss; } } /** * Get the number of attributes to add from the rankining * in each iteration * @return the number of attributes to add. */ public int getStepSize() { return m_add; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String startPointTipText() { return "Start evaluating from this point in the ranking."; } /** * Set the point at which to start evaluating the ranking * @param sp the position in the ranking to start at */ public void setStartPoint(int sp) { if (sp >= 0) { m_startPoint = sp; } } /** * Get the point at which to start evaluating the ranking * @return the position in the ranking to start at */ public int getStartPoint() { return m_startPoint; } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(4); newVector.addElement(new Option( "\tclass name of attribute evaluator to use for ranking. Place any\n" + "\tevaluator options LAST on the command line following a \"--\".\n" + "\teg.:\n" + "\t\t-A weka.attributeSelection.GainRatioAttributeEval ... -- -M\n" + "\t(default: weka.attributeSelection.GainRatioAttributeEval)", "A", 1, "-A <attribute evaluator>")); newVector.addElement(new Option( "\tnumber of attributes to be added from the" +"\n\tranking in each iteration (default = 1).", "S", 1,"-S <step size>")); newVector.addElement(new Option( "\tpoint in the ranking to start evaluating from. " +"\n\t(default = 0, ie. the head of the ranking).", "R", 1,"-R <start point>")); if ((m_ASEval != null) && (m_ASEval instanceof OptionHandler)) { newVector.addElement(new Option("", "", 0, "\nOptions specific to " + "evaluator " + m_ASEval.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_ASEval).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;attribute evaluator&gt; * class name of attribute evaluator to use for ranking. Place any * evaluator options LAST on the command line following a "--". * eg.: * -A weka.attributeSelection.GainRatioAttributeEval ... -- -M * (default: weka.attributeSelection.GainRatioAttributeEval)</pre> * * <pre> -S &lt;step size&gt; * number of attributes to be added from the * ranking in each iteration (default = 1).</pre> * * <pre> -R &lt;start point&gt; * point in the ranking to start evaluating from. * (default = 0, ie. the head of the ranking).</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.GainRatioAttributeEval: * </pre> * * <pre> -M * treat missing values as a seperate value.</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('S', options); if (optionString.length() != 0) { setStepSize(Integer.parseInt(optionString)); } optionString = Utils.getOption('R', options); if (optionString.length() != 0) { setStartPoint(Integer.parseInt(optionString)); } optionString = Utils.getOption('A', options); if (optionString.length() == 0) optionString = GainRatioAttributeEval.class.getName(); setAttributeEvaluator(ASEvaluation.forName(optionString, Utils.partitionOptions(options))); } /** * Gets the current settings of WrapperSubsetEval. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] evaluatorOptions = new String[0]; if ((m_ASEval != null) && (m_ASEval instanceof OptionHandler)) { evaluatorOptions = ((OptionHandler)m_ASEval).getOptions(); } String[] options = new String[8 + evaluatorOptions.length]; int current = 0; options[current++] = "-S"; options[current++] = ""+getStepSize(); options[current++] = "-R"; options[current++] = ""+getStartPoint(); if (getAttributeEvaluator() != null) { options[current++] = "-A"; options[current++] = getAttributeEvaluator().getClass().getName(); } if (evaluatorOptions.length > 0) { options[current++] = "--"; System.arraycopy(evaluatorOptions, 0, options, current, evaluatorOptions.length); current += evaluatorOptions.length; } while (current < options.length) { options[current++] = ""; } return options; } /** * Reset the search method. */ protected void resetOptions () { m_ASEval = new GainRatioAttributeEval(); m_Ranking = null; } /** * Ranks attributes using the specified attribute evaluator and then * searches the ranking using the supplied subset evaluator. * * @param ASEval the subset evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { double best_merit = -Double.MAX_VALUE; double temp_merit; BitSet temp_group, best_group=null; if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } m_SubsetEval = ASEval; m_Instances = data; m_numAttribs = m_Instances.numAttributes(); /* if (m_ASEval instanceof AttributeTransformer) { throw new Exception("Can't use an attribute transformer " +"with RankSearch"); } */ if (m_ASEval instanceof UnsupervisedAttributeEvaluator || m_ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; /* if (!(m_SubsetEval instanceof UnsupervisedSubsetEvaluator)) { throw new Exception("Must use an unsupervised subset evaluator."); } */ } else { m_hasClass = true; m_classIndex = m_Instances.classIndex(); } if (m_ASEval instanceof AttributeEvaluator) { // generate the attribute ranking first Ranker ranker = new Ranker(); m_ASEval.buildEvaluator(m_Instances); if (m_ASEval instanceof AttributeTransformer) { // get the transformed data a rebuild the subset evaluator m_Instances = ((AttributeTransformer)m_ASEval). transformedData(m_Instances); ((ASEvaluation)m_SubsetEval).buildEvaluator(m_Instances); } m_Ranking = ranker.search(m_ASEval, m_Instances); } else { GreedyStepwise fs = new GreedyStepwise(); double [][]rankres; fs.setGenerateRanking(true); ((ASEvaluation)m_ASEval).buildEvaluator(m_Instances); fs.search(m_ASEval, m_Instances); rankres = fs.rankedAttributes(); m_Ranking = new int[rankres.length]; for (int i=0;i<rankres.length;i++) { m_Ranking[i] = (int)rankres[i][0]; } } // now evaluate the attribute ranking for (int i=m_startPoint;i<m_Ranking.length;i+=m_add) { temp_group = new BitSet(m_numAttribs); for (int j=0;j<=i;j++) { temp_group.set(m_Ranking[j]); } temp_merit = ((SubsetEvaluator)m_SubsetEval).evaluateSubset(temp_group); if (temp_merit > best_merit) { best_merit = temp_merit;; best_group = temp_group; } } m_bestMerit = best_merit; return attributeList(best_group); } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ private int[] attributeList (BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * returns a description of the search as a String * @return a description of the search */ public String toString () { StringBuffer text = new StringBuffer(); text.append("\tRankSearch :\n"); text.append("\tAttribute evaluator : " + getAttributeEvaluator().getClass().getName() +" "); if (m_ASEval instanceof OptionHandler) { String[] evaluatorOptions = new String[0]; evaluatorOptions = ((OptionHandler)m_ASEval).getOptions(); for (int i=0;i<evaluatorOptions.length;i++) { text.append(evaluatorOptions[i]+' '); } } text.append("\n"); text.append("\tAttribute ranking : \n"); int rlength = (int)(Math.log(m_Ranking.length) / Math.log(10) + 1); for (int i=0;i<m_Ranking.length;i++) { text.append("\t "+Utils.doubleToString((double)(m_Ranking[i]+1), rlength,0) +" "+m_Instances.attribute(m_Ranking[i]).name()+'\n'); } text.append("\tMerit of best subset found : "); int fieldwidth = 3; double precision = (m_bestMerit - (int)m_bestMerit); if (Math.abs(m_bestMerit) > 0) { fieldwidth = (int)Math.abs((Math.log(Math.abs(m_bestMerit)) / Math.log(10)))+2; } if (Math.abs(precision) > 0) { precision = Math.abs((Math.log(Math.abs(precision)) / Math.log(10)))+3; } else { precision = 2; } text.append(Utils.doubleToString(Math.abs(m_bestMerit), fieldwidth+(int)precision, (int)precision)+"\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6253 $"); } }
17,914
30.877224
104
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/RankedOutputSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RankedOutputSearch.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; /** * Interface for search methods capable of producing a * ranked list of attributes. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface RankedOutputSearch { // =============== // Public methods. // =============== /** * Returns a X by 2 list of attribute indexes and corresponding * evaluations from best (highest) to worst. * @return the ranked list of attribute indexes in an array of ints * @exception Exception if the ranking can't be produced */ double[][] rankedAttributes() throws Exception; /** * Sets a threshold by which attributes can be discarded from the * ranking. This threshold is used by the AttributeSelection module * which does the actual discarding of attributes---the implementer * of this method needs only to provide a variable in which to store the * supplied threshold. -Double.MAX_VALUE is reserved to mean no threshold, * ie, retain all attributes. * @param threshold the threshold. */ void setThreshold(double threshold); /** * Gets the threshold by which attributes can be discarded. Discarding * of attributes is done by the AttributeSelection module using the * threshold returned by this method. * @return a threshold by which to discard attributes */ double getThreshold(); /** * Specify the number of attributes to select from the ranked list. < 0 * indicates that all attributes are to be retained. NumToSelect has * precedence over threshold, ie. if there is a non -1 value for NumToSelect * then this will take precedence over any threshold value. * @param numToSelect the number of attributes to retain */ void setNumToSelect(int numToSelect); /** * Gets the user specified number of attributes to be retained. * @return the number of attributes to retain */ int getNumToSelect(); /** * Gets the calculated number of attributes to retain. This is the * actual number of attributes to retain. This is the same as * getNumToSelect if the user specifies a number which is not less * than zero. Otherwise it should be the number of attributes in the * (potentially transformed) data. */ int getCalculatedNumToSelect(); /** * Sets whether or not ranking is to be performed. * When a search method is capable of producing a ranked list * of attributes, the user has the choice of seeing the results of a * normal search or seeing a ranked list. * @param doRanking true if ranked list is to be produced */ void setGenerateRanking(boolean doRanking); /** * Gets whether the user has opted to see a ranked list of * attributes rather than the normal result of the search * @return true if a ranked list has been requested. */ boolean getGenerateRanking(); }
3,650
32.805556
78
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/Ranker.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Ranker.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * Ranker : <br/> * <br/> * Ranks attributes by their individual evaluations. Use in conjunction with attribute evaluators (ReliefF, GainRatio, Entropy etc).<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * Any starting attributes specified are * ignored during the ranking.</pre> * * <pre> -T &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking.</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to select</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class Ranker extends ASSearch implements RankedOutputSearch, StartSetHandler, OptionHandler { /** for serialization */ static final long serialVersionUID = -9086714848510751934L; /** Holds the starting set as an array of attributes */ private int[] m_starting; /** Holds the start set for the search as a range */ private Range m_startRange; /** Holds the ordered list of attributes */ private int[] m_attributeList; /** Holds the list of attribute merit scores */ private double[] m_attributeMerit; /** Data has class attribute---if unsupervised evaluator then no class */ private boolean m_hasClass; /** Class index of the data if supervised evaluator */ private int m_classIndex; /** The number of attribtes */ private int m_numAttribs; /** * A threshold by which to discard attributes---used by the * AttributeSelection module */ private double m_threshold; /** The number of attributes to select. -1 indicates that all attributes are to be retained. Has precedence over m_threshold */ private int m_numToSelect = -1; /** Used to compute the number to select */ private int m_calculatedNumToSelect = -1; /** * Returns a string describing this search method * @return a description of the search suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Ranker : \n\nRanks attributes by their individual evaluations. " +"Use in conjunction with attribute evaluators (ReliefF, GainRatio, " +"Entropy etc).\n"; } /** * Constructor */ public Ranker () { resetOptions(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numToSelectTipText() { return "Specify the number of attributes to retain. The default value " +"(-1) indicates that all attributes are to be retained. Use either " +"this option or a threshold to reduce the attribute set."; } /** * Specify the number of attributes to select from the ranked list. -1 * indicates that all attributes are to be retained. * @param n the number of attributes to retain */ public void setNumToSelect(int n) { m_numToSelect = n; } /** * Gets the number of attributes to be retained. * @return the number of attributes to retain */ public int getNumToSelect() { return m_numToSelect; } /** * Gets the calculated number to select. This might be computed * from a threshold, or if < 0 is set as the number to select then * it is set to the number of attributes in the (transformed) data. * @return the calculated number of attributes to select */ public int getCalculatedNumToSelect() { if (m_numToSelect >= 0) { m_calculatedNumToSelect = m_numToSelect; } return m_calculatedNumToSelect; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String thresholdTipText() { return "Set threshold by which attributes can be discarded. Default value " + "results in no attributes being discarded. Use either this option or " +"numToSelect to reduce the attribute set."; } /** * Set the threshold by which the AttributeSelection module can discard * attributes. * @param threshold the threshold. */ public void setThreshold(double threshold) { m_threshold = threshold; } /** * Returns the threshold so that the AttributeSelection module can * discard attributes from the ranking. */ public double getThreshold() { return m_threshold; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String generateRankingTipText() { return "A constant option. Ranker is only capable of generating " +" attribute rankings."; } /** * This is a dummy set method---Ranker is ONLY capable of producing * a ranked list of attributes for attribute evaluators. * @param doRank this parameter is N/A and is ignored */ public void setGenerateRanking(boolean doRank) { } /** * This is a dummy method. Ranker can ONLY be used with attribute * evaluators and as such can only produce a ranked list of attributes * @return true all the time. */ public boolean getGenerateRanking() { return true; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String startSetTipText() { return "Specify a set of attributes to ignore. " +" When generating the ranking, Ranker will not evaluate the attributes " +" in this list. " +"This is specified as a comma " +"seperated list off attribute indexes starting at 1. It can include " +"ranges. Eg. 1,2,5-9,17."; } /** * Sets a starting set of attributes for the search. It is the * search method's responsibility to report this start set (if any) * in its toString() method. * @param startSet a string containing a list of attributes (and or ranges), * eg. 1,2,6,10-15. * @throws Exception if start set can't be set. */ public void setStartSet (String startSet) throws Exception { m_startRange.setRanges(startSet); } /** * Returns a list of attributes (and or attribute ranges) as a String * @return a list of attributes (and or attribute ranges) */ public String getStartSet () { return m_startRange.getRanges(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(3); newVector .addElement(new Option("\tSpecify a starting set of attributes.\n" + "\tEg. 1,3,5-7.\n" +"\tAny starting attributes specified are\n" +"\tignored during the ranking." ,"P",1 , "-P <start set>")); newVector .addElement(new Option("\tSpecify a theshold by which attributes\n" + "\tmay be discarded from the ranking.","T",1 , "-T <threshold>")); newVector .addElement(new Option("\tSpecify number of attributes to select" ,"N",1 , "-N <num to select>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7. * Any starting attributes specified are * ignored during the ranking.</pre> * * <pre> -T &lt;threshold&gt; * Specify a theshold by which attributes * may be discarded from the ranking.</pre> * * <pre> -N &lt;num to select&gt; * Specify number of attributes to select</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setStartSet(optionString); } optionString = Utils.getOption('T', options); if (optionString.length() != 0) { Double temp; temp = Double.valueOf(optionString); setThreshold(temp.doubleValue()); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setNumToSelect(Integer.parseInt(optionString)); } } /** * Gets the current settings of ReliefFAttributeEval. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[6]; int current = 0; if (!(getStartSet().equals(""))) { options[current++] = "-P"; options[current++] = ""+startSetToString(); } options[current++] = "-T"; options[current++] = "" + getThreshold(); options[current++] = "-N"; options[current++] = ""+getNumToSelect(); while (current < options.length) { options[current++] = ""; } return options; } /** * converts the array of starting attributes to a string. This is * used by getOptions to return the actual attributes specified * as the starting set. This is better than using m_startRanges.getRanges() * as the same start set can be specified in different ways from the * command line---eg 1,2,3 == 1-3. This is to ensure that stuff that * is stored in a database is comparable. * @return a comma seperated list of individual attribute numbers as a String */ private String startSetToString() { StringBuffer FString = new StringBuffer(); boolean didPrint; if (m_starting == null) { return getStartSet(); } for (int i = 0; i < m_starting.length; i++) { didPrint = false; if ((m_hasClass == false) || (m_hasClass == true && i != m_classIndex)) { FString.append((m_starting[i] + 1)); didPrint = true; } if (i == (m_starting.length - 1)) { FString.append(""); } else { if (didPrint) { FString.append(","); } } } return FString.toString(); } /** * Kind of a dummy search algorithm. Calls a Attribute evaluator to * evaluate each attribute not included in the startSet and then sorts * them to produce a ranked list of attributes. * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { int i, j; if (!(ASEval instanceof AttributeEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a" + "Attribute evaluator!"); } m_numAttribs = data.numAttributes(); if (ASEval instanceof UnsupervisedAttributeEvaluator) { m_hasClass = false; } else { m_classIndex = data.classIndex(); if (m_classIndex >= 0) { m_hasClass = true; } else { m_hasClass = false; } } // get the transformed data and check to see if the transformer // preserves a class index if (ASEval instanceof AttributeTransformer) { data = ((AttributeTransformer)ASEval).transformedHeader(); if (m_classIndex >= 0 && data.classIndex() >= 0) { m_classIndex = data.classIndex(); m_hasClass = true; } } m_startRange.setUpper(m_numAttribs - 1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } int sl=0; if (m_starting != null) { sl = m_starting.length; } if ((m_starting != null) && (m_hasClass == true)) { // see if the supplied list contains the class index boolean ok = false; for (i = 0; i < sl; i++) { if (m_starting[i] == m_classIndex) { ok = true; break; } } if (ok == false) { sl++; } } else { if (m_hasClass == true) { sl++; } } m_attributeList = new int[m_numAttribs - sl]; m_attributeMerit = new double[m_numAttribs - sl]; // add in those attributes not in the starting (omit list) for (i = 0, j = 0; i < m_numAttribs; i++) { if (!inStarting(i)) { m_attributeList[j++] = i; } } AttributeEvaluator ASEvaluator = (AttributeEvaluator)ASEval; for (i = 0; i < m_attributeList.length; i++) { m_attributeMerit[i] = ASEvaluator.evaluateAttribute(m_attributeList[i]); } double[][] tempRanked = rankedAttributes(); int[] rankedAttributes = new int[m_attributeList.length]; for (i = 0; i < m_attributeList.length; i++) { rankedAttributes[i] = (int)tempRanked[i][0]; } return rankedAttributes; } /** * Sorts the evaluated attribute list * * @return an array of sorted (highest eval to lowest) attribute indexes * @throws Exception of sorting can't be done. */ public double[][] rankedAttributes () throws Exception { int i, j; if (m_attributeList == null || m_attributeMerit == null) { throw new Exception("Search must be performed before a ranked " + "attribute list can be obtained"); } int[] ranked = Utils.sort(m_attributeMerit); // reverse the order of the ranked indexes double[][] bestToWorst = new double[ranked.length][2]; for (i = ranked.length - 1, j = 0; i >= 0; i--) { bestToWorst[j++][0] = ranked[i]; } // convert the indexes to attribute indexes for (i = 0; i < bestToWorst.length; i++) { int temp = ((int)bestToWorst[i][0]); bestToWorst[i][0] = m_attributeList[temp]; bestToWorst[i][1] = m_attributeMerit[temp]; } if (m_numToSelect > bestToWorst.length) { throw new Exception("More attributes requested than exist in the data"); } if (m_numToSelect <= 0) { if (m_threshold == -Double.MAX_VALUE) { m_calculatedNumToSelect = bestToWorst.length; } else { determineNumToSelectFromThreshold(bestToWorst); } } /* if (m_numToSelect > 0) { determineThreshFromNumToSelect(bestToWorst); } */ return bestToWorst; } private void determineNumToSelectFromThreshold(double [][] ranking) { int count = 0; for (int i = 0; i < ranking.length; i++) { if (ranking[i][1] > m_threshold) { count++; } } m_calculatedNumToSelect = count; } private void determineThreshFromNumToSelect(double [][] ranking) throws Exception { if (m_numToSelect > ranking.length) { throw new Exception("More attributes requested than exist in the data"); } if (m_numToSelect == ranking.length) { return; } m_threshold = (ranking[m_numToSelect-1][1] + ranking[m_numToSelect][1]) / 2.0; } /** * returns a description of the search as a String * @return a description of the search */ public String toString () { StringBuffer BfString = new StringBuffer(); BfString.append("\tAttribute ranking.\n"); if (m_starting != null) { BfString.append("\tIgnored attributes: "); BfString.append(startSetToString()); BfString.append("\n"); } if (m_threshold != -Double.MAX_VALUE) { BfString.append("\tThreshold for discarding attributes: " + Utils.doubleToString(m_threshold,8,4)+"\n"); } return BfString.toString(); } /** * Resets stuff to default values */ protected void resetOptions () { m_starting = null; m_startRange = new Range(); m_attributeList = null; m_attributeMerit = null; m_threshold = -Double.MAX_VALUE; } private boolean inStarting (int feat) { // omit the class from the evaluation if ((m_hasClass == true) && (feat == m_classIndex)) { return true; } if (m_starting == null) { return false; } for (int i = 0; i < m_starting.length; i++) { if (m_starting[i] == feat) { return true; } } return false; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
17,858
27.302694
137
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ReliefFAttributeEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ReliefFAttributeEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * ReliefFAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by repeatedly sampling an instance and considering the value of the given attribute for the nearest instance of the same and different class. Can operate on both discrete and continuous class data.<br/> * <br/> * For more information see:<br/> * <br/> * Kenji Kira, Larry A. Rendell: A Practical Approach to Feature Selection. In: Ninth International Workshop on Machine Learning, 249-256, 1992.<br/> * <br/> * Igor Kononenko: Estimating Attributes: Analysis and Extensions of RELIEF. In: European Conference on Machine Learning, 171-182, 1994.<br/> * <br/> * Marko Robnik-Sikonja, Igor Kononenko: An adaptation of Relief for attribute estimation in regression. In: Fourteenth International Conference on Machine Learning, 296-304, 1997. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Kira1992, * author = {Kenji Kira and Larry A. Rendell}, * booktitle = {Ninth International Workshop on Machine Learning}, * editor = {Derek H. Sleeman and Peter Edwards}, * pages = {249-256}, * publisher = {Morgan Kaufmann}, * title = {A Practical Approach to Feature Selection}, * year = {1992} * } * * &#64;inproceedings{Kononenko1994, * author = {Igor Kononenko}, * booktitle = {European Conference on Machine Learning}, * editor = {Francesco Bergadano and Luc De Raedt}, * pages = {171-182}, * publisher = {Springer}, * title = {Estimating Attributes: Analysis and Extensions of RELIEF}, * year = {1994} * } * * &#64;inproceedings{Robnik-Sikonja1997, * author = {Marko Robnik-Sikonja and Igor Kononenko}, * booktitle = {Fourteenth International Conference on Machine Learning}, * editor = {Douglas H. Fisher}, * pages = {296-304}, * publisher = {Morgan Kaufmann}, * title = {An adaptation of Relief for attribute estimation in regression}, * year = {1997} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num instances&gt; * Specify the number of instances to * sample when estimating attributes. * If not specified, then all instances * will be used.</pre> * * <pre> -D &lt;seed&gt; * Seed for randomly sampling instances. * (Default = 1)</pre> * * <pre> -K &lt;number of neighbours&gt; * Number of nearest neighbours (k) used * to estimate attribute relevances * (Default = 10).</pre> * * <pre> -W * Weight nearest neighbours by distance</pre> * * <pre> -A &lt;num&gt; * Specify sigma value (used in an exp * function to control how quickly * weights for more distant instances * decrease. Use in conjunction with -W. * Sensible value=1/5 to 1/10 of the * number of nearest neighbours. * (Default = 2)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class ReliefFAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -8422186665795839379L; /** The training instances */ private Instances m_trainInstances; /** The class index */ private int m_classIndex; /** The number of attributes */ private int m_numAttribs; /** The number of instances */ private int m_numInstances; /** Numeric class */ private boolean m_numericClass; /** The number of classes if class is nominal */ private int m_numClasses; /** * Used to hold the probability of a different class val given nearest * instances (numeric class) */ private double m_ndc; /** * Used to hold the prob of different value of an attribute given * nearest instances (numeric class case) */ private double[] m_nda; /** * Used to hold the prob of a different class val and different att * val given nearest instances (numeric class case) */ private double[] m_ndcda; /** Holds the weights that relief assigns to attributes */ private double[] m_weights; /** Prior class probabilities (discrete class case) */ private double[] m_classProbs; /** * The number of instances to sample when estimating attributes * default == -1, use all instances */ private int m_sampleM; /** The number of nearest hits/misses */ private int m_Knn; /** k nearest scores + instance indexes for n classes */ private double[][][] m_karray; /** Upper bound for numeric attributes */ private double[] m_maxArray; /** Lower bound for numeric attributes */ private double[] m_minArray; /** Keep track of the farthest instance for each class */ private double[] m_worst; /** Index in the m_karray of the farthest instance for each class */ private int[] m_index; /** Number of nearest neighbours stored of each class */ private int[] m_stored; /** Random number seed used for sampling instances */ private int m_seed; /** * used to (optionally) weight nearest neighbours by their distance * from the instance in question. Each entry holds * exp(-((rank(r_i, i_j)/sigma)^2)) where rank(r_i,i_j) is the rank of * instance i_j in a sequence of instances ordered by the distance * from r_i. sigma is a user defined parameter, default=20 **/ private double[] m_weightsByRank; private int m_sigma; /** Weight by distance rather than equal weights */ private boolean m_weightByDistance; /** * Constructor */ public ReliefFAttributeEval () { resetOptions(); } /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "ReliefFAttributeEval :\n\nEvaluates the worth of an attribute by " +"repeatedly sampling an instance and considering the value of the " +"given attribute for the nearest instance of the same and different " +"class. Can operate on both discrete and continuous class data.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Kenji Kira and Larry A. Rendell"); result.setValue(Field.TITLE, "A Practical Approach to Feature Selection"); result.setValue(Field.BOOKTITLE, "Ninth International Workshop on Machine Learning"); result.setValue(Field.EDITOR, "Derek H. Sleeman and Peter Edwards"); result.setValue(Field.YEAR, "1992"); result.setValue(Field.PAGES, "249-256"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Igor Kononenko"); additional.setValue(Field.TITLE, "Estimating Attributes: Analysis and Extensions of RELIEF"); additional.setValue(Field.BOOKTITLE, "European Conference on Machine Learning"); additional.setValue(Field.EDITOR, "Francesco Bergadano and Luc De Raedt"); additional.setValue(Field.YEAR, "1994"); additional.setValue(Field.PAGES, "171-182"); additional.setValue(Field.PUBLISHER, "Springer"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Marko Robnik-Sikonja and Igor Kononenko"); additional.setValue(Field.TITLE, "An adaptation of Relief for attribute estimation in regression"); additional.setValue(Field.BOOKTITLE, "Fourteenth International Conference on Machine Learning"); additional.setValue(Field.EDITOR, "Douglas H. Fisher"); additional.setValue(Field.YEAR, "1997"); additional.setValue(Field.PAGES, "296-304"); additional.setValue(Field.PUBLISHER, "Morgan Kaufmann"); return result; } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(4); newVector .addElement(new Option("\tSpecify the number of instances to\n" + "\tsample when estimating attributes.\n" + "\tIf not specified, then all instances\n" + "\twill be used.", "M", 1 , "-M <num instances>")); newVector. addElement(new Option("\tSeed for randomly sampling instances.\n" + "\t(Default = 1)", "D", 1 , "-D <seed>")); newVector. addElement(new Option("\tNumber of nearest neighbours (k) used\n" + "\tto estimate attribute relevances\n" + "\t(Default = 10).", "K", 1 , "-K <number of neighbours>")); newVector. addElement(new Option("\tWeight nearest neighbours by distance", "W" , 0, "-W")); newVector. addElement(new Option("\tSpecify sigma value (used in an exp\n" + "\tfunction to control how quickly\n" + "\tweights for more distant instances\n" + "\tdecrease. Use in conjunction with -W.\n" + "\tSensible value=1/5 to 1/10 of the\n" + "\tnumber of nearest neighbours.\n" + "\t(Default = 2)", "A", 1, "-A <num>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num instances&gt; * Specify the number of instances to * sample when estimating attributes. * If not specified, then all instances * will be used.</pre> * * <pre> -D &lt;seed&gt; * Seed for randomly sampling instances. * (Default = 1)</pre> * * <pre> -K &lt;number of neighbours&gt; * Number of nearest neighbours (k) used * to estimate attribute relevances * (Default = 10).</pre> * * <pre> -W * Weight nearest neighbours by distance</pre> * * <pre> -A &lt;num&gt; * Specify sigma value (used in an exp * function to control how quickly * weights for more distant instances * decrease. Use in conjunction with -W. * Sensible value=1/5 to 1/10 of the * number of nearest neighbours. * (Default = 2)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); setWeightByDistance(Utils.getFlag('W', options)); optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setSampleSize(Integer.parseInt(optionString)); } optionString = Utils.getOption('D', options); if (optionString.length() != 0) { setSeed(Integer.parseInt(optionString)); } optionString = Utils.getOption('K', options); if (optionString.length() != 0) { setNumNeighbours(Integer.parseInt(optionString)); } optionString = Utils.getOption('A', options); if (optionString.length() != 0) { setWeightByDistance(true); // turn on weighting by distance setSigma(Integer.parseInt(optionString)); } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String sigmaTipText() { return "Set influence of nearest neighbours. Used in an exp function to " +"control how quickly weights decrease for more distant instances. " +"Use in conjunction with weightByDistance. Sensible values = 1/5 to " +"1/10 the number of nearest neighbours."; } /** * Sets the sigma value. * * @param s the value of sigma (> 0) * @throws Exception if s is not positive */ public void setSigma (int s) throws Exception { if (s <= 0) { throw new Exception("value of sigma must be > 0!"); } m_sigma = s; } /** * Get the value of sigma. * * @return the sigma value. */ public int getSigma () { return m_sigma; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numNeighboursTipText() { return "Number of nearest neighbours for attribute estimation."; } /** * Set the number of nearest neighbours * * @param n the number of nearest neighbours. */ public void setNumNeighbours (int n) { m_Knn = n; } /** * Get the number of nearest neighbours * * @return the number of nearest neighbours */ public int getNumNeighbours () { return m_Knn; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "Random seed for sampling instances."; } /** * Set the random number seed for randomly sampling instances. * * @param s the random number seed. */ public void setSeed (int s) { m_seed = s; } /** * Get the seed used for randomly sampling instances. * * @return the random number seed. */ public int getSeed () { return m_seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String sampleSizeTipText() { return "Number of instances to sample. Default (-1) indicates that all " +"instances will be used for attribute estimation."; } /** * Set the number of instances to sample for attribute estimation * * @param s the number of instances to sample. */ public void setSampleSize (int s) { m_sampleM = s; } /** * Get the number of instances used for estimating attributes * * @return the number of instances. */ public int getSampleSize () { return m_sampleM; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightByDistanceTipText() { return "Weight nearest neighbours by their distance."; } /** * Set the nearest neighbour weighting method * * @param b true nearest neighbours are to be weighted by distance. */ public void setWeightByDistance (boolean b) { m_weightByDistance = b; } /** * Get whether nearest neighbours are being weighted by distance * * @return m_weightByDiffernce */ public boolean getWeightByDistance () { return m_weightByDistance; } /** * Gets the current settings of ReliefFAttributeEval. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[9]; int current = 0; if (getWeightByDistance()) { options[current++] = "-W"; } options[current++] = "-M"; options[current++] = "" + getSampleSize(); options[current++] = "-D"; options[current++] = "" + getSeed(); options[current++] = "-K"; options[current++] = "" + getNumNeighbours(); if (getWeightByDistance()) { options[current++] = "-A"; options[current++] = "" + getSigma(); } while (current < options.length) { options[current++] = ""; } return options; } /** * Return a description of the ReliefF attribute evaluator. * * @return a description of the evaluator as a String. */ public String toString () { StringBuffer text = new StringBuffer(); if (m_trainInstances == null) { text.append("ReliefF feature evaluator has not been built yet\n"); } else { text.append("\tReliefF Ranking Filter"); text.append("\n\tInstances sampled: "); if (m_sampleM == -1) { text.append("all\n"); } else { text.append(m_sampleM + "\n"); } text.append("\tNumber of nearest neighbours (k): " + m_Knn + "\n"); if (m_weightByDistance) { text.append("\tExponentially decreasing (with distance) " + "influence for\n" + "\tnearest neighbours. Sigma: " + m_sigma + "\n"); } else { text.append("\tEqual influence nearest neighbours\n"); } } return text.toString(); } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Initializes a ReliefF attribute evaluator. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { int z, totalInstances; Random r = new Random(m_seed); // can evaluator handle data? getCapabilities().testWithFail(data); m_trainInstances = data; m_classIndex = m_trainInstances.classIndex(); m_numAttribs = m_trainInstances.numAttributes(); m_numInstances = m_trainInstances.numInstances(); if (m_trainInstances.attribute(m_classIndex).isNumeric()) { m_numericClass = true; } else { m_numericClass = false; } if (!m_numericClass) { m_numClasses = m_trainInstances.attribute(m_classIndex).numValues(); } else { m_ndc = 0; m_numClasses = 1; m_nda = new double[m_numAttribs]; m_ndcda = new double[m_numAttribs]; } if (m_weightByDistance) // set up the rank based weights { m_weightsByRank = new double[m_Knn]; for (int i = 0; i < m_Knn; i++) { m_weightsByRank[i] = Math.exp(-((i/(double)m_sigma)*(i/(double)m_sigma))); } } // the final attribute weights m_weights = new double[m_numAttribs]; // num classes (1 for numeric class) knn neighbours, // and 0 = distance, 1 = instance index m_karray = new double[m_numClasses][m_Knn][2]; if (!m_numericClass) { m_classProbs = new double[m_numClasses]; for (int i = 0; i < m_numInstances; i++) { m_classProbs[(int)m_trainInstances.instance(i).value(m_classIndex)]++; } for (int i = 0; i < m_numClasses; i++) { m_classProbs[i] /= m_numInstances; } } m_worst = new double[m_numClasses]; m_index = new int[m_numClasses]; m_stored = new int[m_numClasses]; m_minArray = new double[m_numAttribs]; m_maxArray = new double[m_numAttribs]; for (int i = 0; i < m_numAttribs; i++) { m_minArray[i] = m_maxArray[i] = Double.NaN; } for (int i = 0; i < m_numInstances; i++) { updateMinMax(m_trainInstances.instance(i)); } if ((m_sampleM > m_numInstances) || (m_sampleM < 0)) { totalInstances = m_numInstances; } else { totalInstances = m_sampleM; } // process each instance, updating attribute weights for (int i = 0; i < totalInstances; i++) { if (totalInstances == m_numInstances) { z = i; } else { z = r.nextInt()%m_numInstances; } if (z < 0) { z *= -1; } if (!(m_trainInstances.instance(z).isMissing(m_classIndex))) { // first clear the knn and worst index stuff for the classes for (int j = 0; j < m_numClasses; j++) { m_index[j] = m_stored[j] = 0; for (int k = 0; k < m_Knn; k++) { m_karray[j][k][0] = m_karray[j][k][1] = 0; } } findKHitMiss(z); if (m_numericClass) { updateWeightsNumericClass(z); } else { updateWeightsDiscreteClass(z); } } } // now scale weights by 1/m_numInstances (nominal class) or // calculate weights numeric class // System.out.println("num inst:"+m_numInstances+" r_ndc:"+r_ndc); for (int i = 0; i < m_numAttribs; i++) {if (i != m_classIndex) { if (m_numericClass) { m_weights[i] = m_ndcda[i]/m_ndc - ((m_nda[i] - m_ndcda[i])/((double)totalInstances - m_ndc)); } else { m_weights[i] *= (1.0/(double)totalInstances); } // System.out.println(r_weights[i]); } } } /** * Evaluates an individual attribute using ReliefF's instance based approach. * The actual work is done by buildEvaluator which evaluates all features. * * @param attribute the index of the attribute to be evaluated * @throws Exception if the attribute could not be evaluated */ public double evaluateAttribute (int attribute) throws Exception { return m_weights[attribute]; } /** * Reset options to their default values */ protected void resetOptions () { m_trainInstances = null; m_sampleM = -1; m_Knn = 10; m_sigma = 2; m_weightByDistance = false; m_seed = 1; } /** * Normalizes a given value of a numeric attribute. * * @param x the value to be normalized * @param i the attribute's index * @return the normalized value */ private double norm (double x, int i) { if (Double.isNaN(m_minArray[i]) || Utils.eq(m_maxArray[i], m_minArray[i])) { return 0; } else { return (x - m_minArray[i])/(m_maxArray[i] - m_minArray[i]); } } /** * Updates the minimum and maximum values for all the attributes * based on a new instance. * * @param instance the new instance */ private void updateMinMax (Instance instance) { // for (int j = 0; j < m_numAttribs; j++) { try { for (int j = 0; j < instance.numValues(); j++) { if ((instance.attributeSparse(j).isNumeric()) && (!instance.isMissingSparse(j))) { if (Double.isNaN(m_minArray[instance.index(j)])) { m_minArray[instance.index(j)] = instance.valueSparse(j); m_maxArray[instance.index(j)] = instance.valueSparse(j); } else { if (instance.valueSparse(j) < m_minArray[instance.index(j)]) { m_minArray[instance.index(j)] = instance.valueSparse(j); } else { if (instance.valueSparse(j) > m_maxArray[instance.index(j)]) { m_maxArray[instance.index(j)] = instance.valueSparse(j); } } } } } } catch (Exception ex) { System.err.println(ex); ex.printStackTrace(); } } /** * Computes the difference between two given attribute * values. */ private double difference(int index, double val1, double val2) { switch (m_trainInstances.attribute(index).type()) { case Attribute.NOMINAL: // If attribute is nominal if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) { return (1.0 - (1.0/((double)m_trainInstances. attribute(index).numValues()))); } else if ((int)val1 != (int)val2) { return 1; } else { return 0; } case Attribute.NUMERIC: // If attribute is numeric if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) { if (Utils.isMissingValue(val1) && Utils.isMissingValue(val2)) { return 1; } else { double diff; if (Utils.isMissingValue(val2)) { diff = norm(val1, index); } else { diff = norm(val2, index); } if (diff < 0.5) { diff = 1.0 - diff; } return diff; } } else { return Math.abs(norm(val1, index) - norm(val2, index)); } default: return 0; } } /** * Calculates the distance between two instances * * @param first the first instance * @param second the second instance * @return the distance between the two given instances, between 0 and 1 */ private double distance(Instance first, Instance second) { double distance = 0; int firstI, secondI; for (int p1 = 0, p2 = 0; p1 < first.numValues() || p2 < second.numValues();) { if (p1 >= first.numValues()) { firstI = m_trainInstances.numAttributes(); } else { firstI = first.index(p1); } if (p2 >= second.numValues()) { secondI = m_trainInstances.numAttributes(); } else { secondI = second.index(p2); } if (firstI == m_trainInstances.classIndex()) { p1++; continue; } if (secondI == m_trainInstances.classIndex()) { p2++; continue; } double diff; if (firstI == secondI) { diff = difference(firstI, first.valueSparse(p1), second.valueSparse(p2)); p1++; p2++; } else if (firstI > secondI) { diff = difference(secondI, 0, second.valueSparse(p2)); p2++; } else { diff = difference(firstI, first.valueSparse(p1), 0); p1++; } // distance += diff * diff; distance += diff; } // return Math.sqrt(distance / m_NumAttributesUsed); return distance; } /** * update attribute weights given an instance when the class is numeric * * @param instNum the index of the instance to use when updating weights */ private void updateWeightsNumericClass (int instNum) { int i, j; double temp,temp2; int[] tempSorted = null; double[] tempDist = null; double distNorm = 1.0; int firstI, secondI; Instance inst = m_trainInstances.instance(instNum); // sort nearest neighbours and set up normalization variable if (m_weightByDistance) { tempDist = new double[m_stored[0]]; for (j = 0, distNorm = 0; j < m_stored[0]; j++) { // copy the distances tempDist[j] = m_karray[0][j][0]; // sum normalizer distNorm += m_weightsByRank[j]; } tempSorted = Utils.sort(tempDist); } for (i = 0; i < m_stored[0]; i++) { // P diff prediction (class) given nearest instances if (m_weightByDistance) { temp = difference(m_classIndex, inst.value(m_classIndex), m_trainInstances. instance((int)m_karray[0][tempSorted[i]][1]). value(m_classIndex)); temp *= (m_weightsByRank[i]/distNorm); } else { temp = difference(m_classIndex, inst.value(m_classIndex), m_trainInstances. instance((int)m_karray[0][i][1]). value(m_classIndex)); temp *= (1.0/(double)m_stored[0]); // equal influence } m_ndc += temp; Instance cmp; cmp = (m_weightByDistance) ? m_trainInstances.instance((int)m_karray[0][tempSorted[i]][1]) : m_trainInstances.instance((int)m_karray[0][i][1]); double temp_diffP_diffA_givNearest = difference(m_classIndex, inst.value(m_classIndex), cmp.value(m_classIndex)); // now the attributes for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) { if (p1 >= inst.numValues()) { firstI = m_trainInstances.numAttributes(); } else { firstI = inst.index(p1); } if (p2 >= cmp.numValues()) { secondI = m_trainInstances.numAttributes(); } else { secondI = cmp.index(p2); } if (firstI == m_trainInstances.classIndex()) { p1++; continue; } if (secondI == m_trainInstances.classIndex()) { p2++; continue; } temp = 0.0; temp2 = 0.0; if (firstI == secondI) { j = firstI; temp = difference(j, inst.valueSparse(p1), cmp.valueSparse(p2)); p1++;p2++; } else if (firstI > secondI) { j = secondI; temp = difference(j, 0, cmp.valueSparse(p2)); p2++; } else { j = firstI; temp = difference(j, inst.valueSparse(p1), 0); p1++; } temp2 = temp_diffP_diffA_givNearest * temp; // P of different prediction and different att value given // nearest instances if (m_weightByDistance) { temp2 *= (m_weightsByRank[i]/distNorm); } else { temp2 *= (1.0/(double)m_stored[0]); // equal influence } m_ndcda[j] += temp2; // P of different attribute val given nearest instances if (m_weightByDistance) { temp *= (m_weightsByRank[i]/distNorm); } else { temp *= (1.0/(double)m_stored[0]); // equal influence } m_nda[j] += temp; } } } /** * update attribute weights given an instance when the class is discrete * * @param instNum the index of the instance to use when updating weights */ private void updateWeightsDiscreteClass (int instNum) { int i, j, k; int cl; double temp_diff, w_norm = 1.0; double[] tempDistClass; int[] tempSortedClass = null; double distNormClass = 1.0; double[] tempDistAtt; int[][] tempSortedAtt = null; double[] distNormAtt = null; int firstI, secondI; // store the indexes (sparse instances) of non-zero elements Instance inst = m_trainInstances.instance(instNum); // get the class of this instance cl = (int)m_trainInstances.instance(instNum).value(m_classIndex); // sort nearest neighbours and set up normalization variables if (m_weightByDistance) { // do class (hits) first // sort the distances tempDistClass = new double[m_stored[cl]]; for (j = 0, distNormClass = 0; j < m_stored[cl]; j++) { // copy the distances tempDistClass[j] = m_karray[cl][j][0]; // sum normalizer distNormClass += m_weightsByRank[j]; } tempSortedClass = Utils.sort(tempDistClass); // do misses (other classes) tempSortedAtt = new int[m_numClasses][1]; distNormAtt = new double[m_numClasses]; for (k = 0; k < m_numClasses; k++) { if (k != cl) // already done cl { // sort the distances tempDistAtt = new double[m_stored[k]]; for (j = 0, distNormAtt[k] = 0; j < m_stored[k]; j++) { // copy the distances tempDistAtt[j] = m_karray[k][j][0]; // sum normalizer distNormAtt[k] += m_weightsByRank[j]; } tempSortedAtt[k] = Utils.sort(tempDistAtt); } } } if (m_numClasses > 2) { // the amount of probability space left after removing the // probability of this instance's class value w_norm = (1.0 - m_classProbs[cl]); } // do the k nearest hits of the same class for (j = 0, temp_diff = 0.0; j < m_stored[cl]; j++) { Instance cmp; cmp = (m_weightByDistance) ? m_trainInstances. instance((int)m_karray[cl][tempSortedClass[j]][1]) : m_trainInstances.instance((int)m_karray[cl][j][1]); for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) { if (p1 >= inst.numValues()) { firstI = m_trainInstances.numAttributes(); } else { firstI = inst.index(p1); } if (p2 >= cmp.numValues()) { secondI = m_trainInstances.numAttributes(); } else { secondI = cmp.index(p2); } if (firstI == m_trainInstances.classIndex()) { p1++; continue; } if (secondI == m_trainInstances.classIndex()) { p2++; continue; } if (firstI == secondI) { i = firstI; temp_diff = difference(i, inst.valueSparse(p1), cmp.valueSparse(p2)); p1++;p2++; } else if (firstI > secondI) { i = secondI; temp_diff = difference(i, 0, cmp.valueSparse(p2)); p2++; } else { i = firstI; temp_diff = difference(i, inst.valueSparse(p1), 0); p1++; } if (m_weightByDistance) { temp_diff *= (m_weightsByRank[j]/distNormClass); } else { if (m_stored[cl] > 0) { temp_diff /= (double)m_stored[cl]; } } m_weights[i] -= temp_diff; } } // now do k nearest misses from each of the other classes temp_diff = 0.0; for (k = 0; k < m_numClasses; k++) { if (k != cl) // already done cl { for (j = 0; j < m_stored[k]; j++) { Instance cmp; cmp = (m_weightByDistance) ? m_trainInstances. instance((int)m_karray[k][tempSortedAtt[k][j]][1]) : m_trainInstances.instance((int)m_karray[k][j][1]); for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) { if (p1 >= inst.numValues()) { firstI = m_trainInstances.numAttributes(); } else { firstI = inst.index(p1); } if (p2 >= cmp.numValues()) { secondI = m_trainInstances.numAttributes(); } else { secondI = cmp.index(p2); } if (firstI == m_trainInstances.classIndex()) { p1++; continue; } if (secondI == m_trainInstances.classIndex()) { p2++; continue; } if (firstI == secondI) { i = firstI; temp_diff = difference(i, inst.valueSparse(p1), cmp.valueSparse(p2)); p1++;p2++; } else if (firstI > secondI) { i = secondI; temp_diff = difference(i, 0, cmp.valueSparse(p2)); p2++; } else { i = firstI; temp_diff = difference(i, inst.valueSparse(p1), 0); p1++; } if (m_weightByDistance) { temp_diff *= (m_weightsByRank[j]/distNormAtt[k]); } else { if (m_stored[k] > 0) { temp_diff /= (double)m_stored[k]; } } if (m_numClasses > 2) { m_weights[i] += ((m_classProbs[k]/w_norm)*temp_diff); } else { m_weights[i] += temp_diff; } } } } } } /** * Find the K nearest instances to supplied instance if the class is numeric, * or the K nearest Hits (same class) and Misses (K from each of the other * classes) if the class is discrete. * * @param instNum the index of the instance to find nearest neighbours of */ private void findKHitMiss (int instNum) { int i, j; int cl; double ww; double temp_diff = 0.0; Instance thisInst = m_trainInstances.instance(instNum); for (i = 0; i < m_numInstances; i++) { if (i != instNum) { Instance cmpInst = m_trainInstances.instance(i); temp_diff = distance(cmpInst, thisInst); // class of this training instance or 0 if numeric if (m_numericClass) { cl = 0; } else { cl = (int)m_trainInstances.instance(i).value(m_classIndex); } // add this diff to the list for the class of this instance if (m_stored[cl] < m_Knn) { m_karray[cl][m_stored[cl]][0] = temp_diff; m_karray[cl][m_stored[cl]][1] = i; m_stored[cl]++; // note the worst diff for this class for (j = 0, ww = -1.0; j < m_stored[cl]; j++) { if (m_karray[cl][j][0] > ww) { ww = m_karray[cl][j][0]; m_index[cl] = j; } } m_worst[cl] = ww; } else /* if we already have stored knn for this class then check to see if this instance is better than the worst */ { if (temp_diff < m_karray[cl][m_index[cl]][0]) { m_karray[cl][m_index[cl]][0] = temp_diff; m_karray[cl][m_index[cl]][1] = i; for (j = 0, ww = -1.0; j < m_stored[cl]; j++) { if (m_karray[cl][j][0] > ww) { ww = m_karray[cl][j][0]; m_index[cl] = j; } } m_worst[cl] = ww; } } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } // ============ // Test method. // ============ /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new ReliefFAttributeEval(), args); } }
40,000
28.674332
241
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/SVMAttributeEval.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * SVMAttributeEval.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.classifiers.functions.SMO; import weka.core.Capabilities; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import java.util.ArrayList; import java.util.Enumeration; import java.util.Iterator; import java.util.Vector; /** <!-- globalinfo-start --> * SVMAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by using an SVM classifier. Attributes are ranked by the square of the weight assigned by the SVM. Attribute selection for multiclass problems is handled by ranking attributes for each class seperately using a one-vs-all method and then "dealing" from the top of each pile to give a final ranking.<br/> * <br/> * For more information see:<br/> * <br/> * I. Guyon, J. Weston, S. Barnhill, V. Vapnik (2002). Gene selection for cancer classification using support vector machines. Machine Learning. 46:389-422. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Guyon2002, * author = {I. Guyon and J. Weston and S. Barnhill and V. Vapnik}, * journal = {Machine Learning}, * pages = {389-422}, * title = {Gene selection for cancer classification using support vector machines}, * volume = {46}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;constant rate of elimination&gt; * Specify the constant rate of attribute * elimination per invocation of * the support vector machine. * Default = 1.</pre> * * <pre> -Y &lt;percent rate of elimination&gt; * Specify the percentage rate of attributes to * elimination per invocation of * the support vector machine. * Trumps constant rate (above threshold). * Default = 0.</pre> * * <pre> -Z &lt;threshold for percent elimination&gt; * Specify the threshold below which * percentage attribute elimination * reverts to the constant method.</pre> * * <pre> -P &lt;epsilon&gt; * Specify the value of P (epsilon * parameter) to pass on to the * support vector machine. * Default = 1.0e-25</pre> * * <pre> -T &lt;tolerance&gt; * Specify the value of T (tolerance * parameter) to pass on to the * support vector machine. * Default = 1.0e-10</pre> * * <pre> -C &lt;complexity&gt; * Specify the value of C (complexity * parameter) to pass on to the * support vector machine. * Default = 1.0</pre> * * <pre> -N * Whether the SVM should 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Kieran Holland * @version $Revision: 1.28 $ */ public class SVMAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6489975709033967447L; /** The attribute scores */ private double[] m_attScores; /** Constant rate of attribute elimination per iteration */ private int m_numToEliminate = 1; /** Percentage rate of attribute elimination, trumps constant rate (above threshold), ignored if = 0 */ private int m_percentToEliminate = 0; /** Threshold below which percent elimination switches to constant elimination */ private int m_percentThreshold = 0; /** Complexity parameter to pass on to SMO */ private double m_smoCParameter = 1.0; /** Tolerance parameter to pass on to SMO */ private double m_smoTParameter = 1.0e-10; /** Epsilon parameter to pass on to SMO */ private double m_smoPParameter = 1.0e-25; /** Filter parameter to pass on to SMO */ private int m_smoFilterType = 0; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "SVMAttributeEval :\n\nEvaluates the worth of an attribute by " + "using an SVM classifier. Attributes are ranked by the square of the " + "weight assigned by the SVM. Attribute selection for multiclass " + "problems is handled by ranking attributes for each class seperately " + "using a one-vs-all method and then \"dealing\" from the top of " + "each pile to give a final ranking.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "I. Guyon and J. Weston and S. Barnhill and V. Vapnik"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.TITLE, "Gene selection for cancer classification using support vector machines"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "46"); result.setValue(Field.PAGES, "389-422"); return result; } /** * Constructor */ public SVMAttributeEval() { resetOptions(); } /** * Returns an enumeration describing all the available options * * @return an enumeration of options */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement( new Option( "\tSpecify the constant rate of attribute\n" + "\telimination per invocation of\n" + "\tthe support vector machine.\n" + "\tDefault = 1.", "X", 1, "-X <constant rate of elimination>")); newVector.addElement( new Option( "\tSpecify the percentage rate of attributes to\n" + "\telimination per invocation of\n" + "\tthe support vector machine.\n" + "\tTrumps constant rate (above threshold).\n" + "\tDefault = 0.", "Y", 1, "-Y <percent rate of elimination>")); newVector.addElement( new Option( "\tSpecify the threshold below which \n" + "\tpercentage attribute elimination\n" + "\treverts to the constant method.", "Z", 1, "-Z <threshold for percent elimination>")); newVector.addElement( new Option( "\tSpecify the value of P (epsilon\n" + "\tparameter) to pass on to the\n" + "\tsupport vector machine.\n" + "\tDefault = 1.0e-25", "P", 1, "-P <epsilon>")); newVector.addElement( new Option( "\tSpecify the value of T (tolerance\n" + "\tparameter) to pass on to the\n" + "\tsupport vector machine.\n" + "\tDefault = 1.0e-10", "T", 1, "-T <tolerance>")); newVector.addElement( new Option( "\tSpecify the value of C (complexity\n" + "\tparameter) to pass on to the\n" + "\tsupport vector machine.\n" + "\tDefault = 1.0", "C", 1, "-C <complexity>")); newVector.addElement(new Option("\tWhether the SVM should " + "0=normalize/1=standardize/2=neither.\n" + "\t(default 0=normalize)", "N", 1, "-N")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;constant rate of elimination&gt; * Specify the constant rate of attribute * elimination per invocation of * the support vector machine. * Default = 1.</pre> * * <pre> -Y &lt;percent rate of elimination&gt; * Specify the percentage rate of attributes to * elimination per invocation of * the support vector machine. * Trumps constant rate (above threshold). * Default = 0.</pre> * * <pre> -Z &lt;threshold for percent elimination&gt; * Specify the threshold below which * percentage attribute elimination * reverts to the constant method.</pre> * * <pre> -P &lt;epsilon&gt; * Specify the value of P (epsilon * parameter) to pass on to the * support vector machine. * Default = 1.0e-25</pre> * * <pre> -T &lt;tolerance&gt; * Specify the value of T (tolerance * parameter) to pass on to the * support vector machine. * Default = 1.0e-10</pre> * * <pre> -C &lt;complexity&gt; * Specify the value of C (complexity * parameter) to pass on to the * support vector machine. * Default = 1.0</pre> * * <pre> -N * Whether the SVM should 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an error occurs */ public void setOptions(String[] options) throws Exception { String optionString; optionString = Utils.getOption('X', options); if (optionString.length() != 0) { setAttsToEliminatePerIteration(Integer.parseInt(optionString)); } optionString = Utils.getOption('Y', options); if (optionString.length() != 0) { setPercentToEliminatePerIteration(Integer.parseInt(optionString)); } optionString = Utils.getOption('Z', options); if (optionString.length() != 0) { setPercentThreshold(Integer.parseInt(optionString)); } optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setEpsilonParameter((new Double(optionString)).doubleValue()); } optionString = Utils.getOption('T', options); if (optionString.length() != 0) { setToleranceParameter((new Double(optionString)).doubleValue()); } optionString = Utils.getOption('C', options); if (optionString.length() != 0) { setComplexityParameter((new Double(optionString)).doubleValue()); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setFilterType(new SelectedTag(Integer.parseInt(optionString), SMO.TAGS_FILTER)); } else { setFilterType(new SelectedTag(SMO.FILTER_NORMALIZE, SMO.TAGS_FILTER)); } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of SVMAttributeEval * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions() { String[] options = new String[14]; int current = 0; options[current++] = "-X"; options[current++] = "" + getAttsToEliminatePerIteration(); options[current++] = "-Y"; options[current++] = "" + getPercentToEliminatePerIteration(); options[current++] = "-Z"; options[current++] = "" + getPercentThreshold(); options[current++] = "-P"; options[current++] = "" + getEpsilonParameter(); options[current++] = "-T"; options[current++] = "" + getToleranceParameter(); options[current++] = "-C"; options[current++] = "" + getComplexityParameter(); options[current++] = "-N"; options[current++] = "" + m_smoFilterType; while (current < options.length) { options[current++] = ""; } return options; } //________________________________________________________________________ /** * Returns a tip text for this property suitable for display in the * GUI * * @return tip text string describing this property */ public String attsToEliminatePerIterationTipText() { return "Constant rate of attribute elimination."; } /** * Returns a tip text for this property suitable for display in the * GUI * * @return tip text string describing this property */ public String percentToEliminatePerIterationTipText() { return "Percent rate of attribute elimination."; } /** * Returns a tip text for this property suitable for display in the * GUI * * @return tip text string describing this property */ public String percentThresholdTipText() { return "Threshold below which percent elimination reverts to constant elimination."; } /** * Returns a tip text for this property suitable for display in the * GUI * * @return tip text string describing this property */ public String epsilonParameterTipText() { return "P epsilon parameter to pass to the SVM"; } /** * Returns a tip text for this property suitable for display in the * GUI * * @return tip text string describing this property */ public String toleranceParameterTipText() { return "T tolerance parameter to pass to the SVM"; } /** * Returns a tip text for this property suitable for display in the * GUI * * @return tip text string describing this property */ public String complexityParameterTipText() { return "C complexity parameter to pass to the SVM"; } /** * Returns a tip text for this property suitable for display in the * GUI * * @return tip text string describing this property */ public String filterTypeTipText() { return "filtering used by the SVM"; } //________________________________________________________________________ /** * Set the constant rate of attribute elimination per iteration * * @param cRate the constant rate of attribute elimination per iteration */ public void setAttsToEliminatePerIteration(int cRate) { m_numToEliminate = cRate; } /** * Get the constant rate of attribute elimination per iteration * * @return the constant rate of attribute elimination per iteration */ public int getAttsToEliminatePerIteration() { return m_numToEliminate; } /** * Set the percentage of attributes to eliminate per iteration * * @param pRate percent of attributes to eliminate per iteration */ public void setPercentToEliminatePerIteration(int pRate) { m_percentToEliminate = pRate; } /** * Get the percentage rate of attribute elimination per iteration * * @return the percentage rate of attribute elimination per iteration */ public int getPercentToEliminatePerIteration() { return m_percentToEliminate; } /** * Set the threshold below which percentage elimination reverts to * constant elimination. * * @param pThresh percent of attributes to eliminate per iteration */ public void setPercentThreshold(int pThresh) { m_percentThreshold = pThresh; } /** * Get the threshold below which percentage elimination reverts to * constant elimination. * * @return the threshold below which percentage elimination stops */ public int getPercentThreshold() { return m_percentThreshold; } /** * Set the value of P for SMO * * @param svmP the value of P */ public void setEpsilonParameter(double svmP) { m_smoPParameter = svmP; } /** * Get the value of P used with SMO * * @return the value of P */ public double getEpsilonParameter() { return m_smoPParameter; } /** * Set the value of T for SMO * * @param svmT the value of T */ public void setToleranceParameter(double svmT) { m_smoTParameter = svmT; } /** * Get the value of T used with SMO * * @return the value of T */ public double getToleranceParameter() { return m_smoTParameter; } /** * Set the value of C for SMO * * @param svmC the value of C */ public void setComplexityParameter(double svmC) { m_smoCParameter = svmC; } /** * Get the value of C used with SMO * * @return the value of C */ public double getComplexityParameter() { return m_smoCParameter; } /** * The filtering mode to pass to SMO * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == SMO.TAGS_FILTER) { m_smoFilterType = newType.getSelectedTag().getID(); } } /** * Get the filtering mode passed to SMO * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_smoFilterType, SMO.TAGS_FILTER); } //________________________________________________________________________ /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result; result = new SMO().getCapabilities(); result.setOwner(this); // only binary attributes are allowed, otherwise the NominalToBinary // filter inside SMO will increase the number of attributes which in turn // will lead to ArrayIndexOutOfBounds-Exceptions. result.disable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.BINARY_ATTRIBUTES); result.disableAllAttributeDependencies(); return result; } /** * Initializes the evaluator. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator(Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); //System.out.println("Class attribute: " + data.attribute(data.classIndex()).name()); // Check settings m_numToEliminate = (m_numToEliminate > 1) ? m_numToEliminate : 1; m_percentToEliminate = (m_percentToEliminate < 100) ? m_percentToEliminate : 100; m_percentToEliminate = (m_percentToEliminate > 0) ? m_percentToEliminate : 0; m_percentThreshold = (m_percentThreshold < data.numAttributes()) ? m_percentThreshold : data.numAttributes() - 1; m_percentThreshold = (m_percentThreshold > 0) ? m_percentThreshold : 0; // Get ranked attributes for each class seperately, one-vs-all int[][] attScoresByClass; int numAttr = data.numAttributes() - 1; if(data.numClasses()>2) { attScoresByClass = new int[data.numClasses()][numAttr]; for (int i = 0; i < data.numClasses(); i++) { attScoresByClass[i] = rankBySVM(i, data); } } else { attScoresByClass = new int[1][numAttr]; attScoresByClass[0] = rankBySVM(0, data); } // Cycle through class-specific ranked lists, poping top one off for each class // and adding it to the overall ranked attribute list if it's not there already ArrayList ordered = new ArrayList(numAttr); for (int i = 0; i < numAttr; i++) { for (int j = 0; j < (data.numClasses()>2 ? data.numClasses() : 1); j++) { Integer rank = new Integer(attScoresByClass[j][i]); if (!ordered.contains(rank)) ordered.add(rank); } } m_attScores = new double[data.numAttributes()]; Iterator listIt = ordered.iterator(); for (double i = (double) numAttr; listIt.hasNext(); i = i - 1.0) { m_attScores[((Integer) listIt.next()).intValue()] = i; } } /** * Get SVM-ranked attribute indexes (best to worst) selected for * the class attribute indexed by classInd (one-vs-all). */ private int[] rankBySVM(int classInd, Instances data) { // Holds a mapping into the original array of attribute indices int[] origIndices = new int[data.numAttributes()]; for (int i = 0; i < origIndices.length; i++) origIndices[i] = i; // Count down of number of attributes remaining int numAttrLeft = data.numAttributes()-1; // Ranked attribute indices for this class, one vs.all (highest->lowest) int[] attRanks = new int[numAttrLeft]; try { MakeIndicator filter = new MakeIndicator(); filter.setAttributeIndex("" + (data.classIndex() + 1)); filter.setNumeric(false); filter.setValueIndex(classInd); filter.setInputFormat(data); Instances trainCopy = Filter.useFilter(data, filter); double pctToElim = ((double) m_percentToEliminate) / 100.0; while (numAttrLeft > 0) { int numToElim; if (pctToElim > 0) { numToElim = (int) (trainCopy.numAttributes() * pctToElim); numToElim = (numToElim > 1) ? numToElim : 1; if (numAttrLeft - numToElim <= m_percentThreshold) { pctToElim = 0; numToElim = numAttrLeft - m_percentThreshold; } } else { numToElim = (numAttrLeft >= m_numToEliminate) ? m_numToEliminate : numAttrLeft; } // Build the linear SVM with default parameters SMO smo = new SMO(); // SMO seems to get stuck if data not normalised when few attributes remain // smo.setNormalizeData(numAttrLeft < 40); smo.setFilterType(new SelectedTag(m_smoFilterType, SMO.TAGS_FILTER)); smo.setEpsilon(m_smoPParameter); smo.setToleranceParameter(m_smoTParameter); smo.setC(m_smoCParameter); smo.buildClassifier(trainCopy); // Find the attribute with maximum weight^2 double[] weightsSparse = smo.sparseWeights()[0][1]; int[] indicesSparse = smo.sparseIndices()[0][1]; double[] weights = new double[trainCopy.numAttributes()]; for (int j = 0; j < weightsSparse.length; j++) { weights[indicesSparse[j]] = weightsSparse[j] * weightsSparse[j]; } weights[trainCopy.classIndex()] = Double.MAX_VALUE; int minWeightIndex; int[] featArray = new int[numToElim]; boolean[] eliminated = new boolean[origIndices.length]; for (int j = 0; j < numToElim; j++) { minWeightIndex = Utils.minIndex(weights); attRanks[--numAttrLeft] = origIndices[minWeightIndex]; featArray[j] = minWeightIndex; eliminated[minWeightIndex] = true; weights[minWeightIndex] = Double.MAX_VALUE; } // Delete the worst attributes. weka.filters.unsupervised.attribute.Remove delTransform = new weka.filters.unsupervised.attribute.Remove(); delTransform.setInvertSelection(false); delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); trainCopy = Filter.useFilter(trainCopy, delTransform); // Update the array of remaining attribute indices int[] temp = new int[origIndices.length - numToElim]; int k = 0; for (int j = 0; j < origIndices.length; j++) { if (!eliminated[j]) { temp[k++] = origIndices[j]; } } origIndices = temp; } // Carefully handle all exceptions } catch (Exception e) { e.printStackTrace(); } return attRanks; } /** * Resets options to defaults. */ protected void resetOptions() { m_attScores = null; } /** * Evaluates an attribute by returning the rank of the square of its coefficient in a * linear support vector machine. * * @param attribute the index of the attribute to be evaluated * @throws Exception if the attribute could not be evaluated */ public double evaluateAttribute(int attribute) throws Exception { return m_attScores[attribute]; } /** * Return a description of the evaluator * @return description as a string */ public String toString() { StringBuffer text = new StringBuffer(); if (m_attScores == null) { text.append("\tSVM feature evaluator has not been built yet"); } else { text.append("\tSVM feature evaluator"); } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.28 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runEvaluator(new SVMAttributeEval(), args); } }
26,985
31.202864
341
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/ScatterSearchV1.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ScatterSearchV1.java * Copyright (C) 2008 Adrian Pino * Copyright (C) 2008 University of Waikato, Hamilton, NZ * */ package weka.attributeSelection; import java.io.Serializable; import java.util.ArrayList; import java.util.BitSet; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; /** * Class for performing the Sequential Scatter Search. <p> * <!-- globalinfo-start --> * Scatter Search :<br/> * <br/> * Performs an Scatter Search through the space of attribute subsets. Start with a population of many significants and diverses subset stops when the result is higher than a given treshold or there's not more improvement<br/> * For more information see:<br/> * <br/> * Felix Garcia Lopez (2004). Solving feature subset selection problem by a Parallel Scatter Search. Elsevier. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -Z &lt;num&gt; * Specify the number of subsets to generate * in the initial population..</pre> * * <pre> -T &lt;threshold&gt; * Specify the treshold used for considering when a subset is significant.</pre> * * <pre> -R &lt;0 = greedy combination | 1 = reduced greedy combination &gt; * Specify the kind of combiantion * for using it in the combination method.</pre> * * <pre> -S &lt;seed&gt; * Set the random number seed. * (default = 1)</pre> * * <pre> -D * Verbose output for monitoring the search.</pre> * <!-- options-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;book{Lopez2004, * author = {Felix Garcia Lopez}, * month = {October}, * publisher = {Elsevier}, * title = {Solving feature subset selection problem by a Parallel Scatter Search}, * year = {2004}, * language = {English} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * from the Book: Solving feature subset selection problem by a Parallel Scatter Search, Felix Garcia Lopez. * @author Adrian Pino (apinoa@facinf.uho.edu.cu) * @version $Revision: 6277 $ * */ public class ScatterSearchV1 extends ASSearch implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -8512041420388121326L; /** number of attributes in the data */ private int m_numAttribs; /** holds the class index */ private int m_classIndex; /** holds the treshhold that delimits the good attributes */ private double m_treshold; /** the initial threshold */ private double m_initialThreshold; /** the kind of comination betwen parents ((0)greedy combination/(1)reduced greedy combination)*/ int m_typeOfCombination; /** random number generation */ private Random m_random; /** seed for random number generation */ private int m_seed; /** verbose output for monitoring the search and debugging */ private boolean m_debug = false; /** holds a report of the search */ private StringBuffer m_InformationReports; /** total number of subsets evaluated during a search */ private int m_totalEvals; /** holds the merit of the best subset found */ protected double m_bestMerit; /** time for procesing the search method */ private long m_processinTime; /** holds the Initial Population of Subsets*/ private List<Subset> m_population; /** holds the population size*/ private int m_popSize; /** holds the user selected initial population size */ private int m_initialPopSize; /** if no initial user pop size, then this holds the initial * pop size calculated from the number of attributes in the data * (for use in the toString() method) */ private int m_calculatedInitialPopSize; /** holds the subsets most significants and diverses * of the population (ReferenceSet). * * (transient because the subList() method returns * a non serializable Object). */ private transient List<Subset> m_ReferenceSet; /** holds the greedy combination(reduced or not) of all the subsets of the ReferenceSet*/ private transient List<Subset> m_parentsCombination; /**holds the attributes ranked*/ private List<Subset> m_attributeRanking; /**Evaluator used to know the significance of a subset (for guiding the search)*/ private SubsetEvaluator ASEvaluator =null; /** kind of combination */ protected static final int COMBINATION_NOT_REDUCED = 0; protected static final int COMBINATION_REDUCED = 1; ; public static final Tag [] TAGS_SELECTION = { new Tag(COMBINATION_NOT_REDUCED, "Greedy Combination"), new Tag(COMBINATION_REDUCED, "Reduced Greedy Combination") }; /** * Returns a string describing this search method * @return a description of the search suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Scatter Search :\n\nPerforms an Scatter Search " +"through " +"the space of attribute subsets. Start with a population of many significants and diverses subset " +" stops when the result is higher than a given treshold or there's not more improvement\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.BOOK); result.setValue(Field.AUTHOR, "Felix Garcia Lopez"); result.setValue(Field.MONTH, "October"); result.setValue(Field.YEAR, "2004"); result.setValue(Field.TITLE, "Solving feature subset selection problem by a Parallel Scatter Search"); result.setValue(Field.PUBLISHER, "Elsevier"); result.setValue(Field.LANGUAGE, "English"); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.0$"); } public ScatterSearchV1 () { resetOptions(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String thresholdTipText() { return "Set the treshold that subsets most overcome to be considered as significants"; } /** * Set the treshold * * @param threshold for identifyng significant subsets */ public void setThreshold (double threshold) { m_initialThreshold = threshold; } /** * Get the treshold * * @return the treshold that subsets most overcome to be considered as significants */ public double getThreshold () { return m_initialThreshold; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String populationSizeTipText() { return "Set the number of subset to generate in the initial Population"; } /** * Set the population size * * @param size the number of subset in the initial population */ public void setPopulationSize (int size) { m_initialPopSize = size; } /** * Get the population size * * @return the number of subsets to generate in the initial population */ public int getPopulationSize () { return m_initialPopSize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String combinationTipText() { return "Set the kind of combination for using it to combine ReferenceSet subsets."; } /** * Set the kind of combination * * @param c the kind of combination of the search */ public void setCombination (SelectedTag c) { if (c.getTags() == TAGS_SELECTION) { m_typeOfCombination = c.getSelectedTag().getID(); } } /** * Get the combination * * @return the kind of combination used in the Combination method */ public SelectedTag getCombination () { return new SelectedTag(m_typeOfCombination, TAGS_SELECTION); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "Set the random seed."; } /** * set the seed for random number generation * @param s seed value */ public void setSeed(int s) { m_seed = s; } /** * get the value of the random number generator's seed * @return the seed for random number generation */ public int getSeed() { return m_seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Turn on verbose output for monitoring the search's progress."; } /** * Set whether verbose output should be generated. * @param d true if output is to be verbose. */ public void setDebug(boolean d) { m_debug = d; } /** * Get whether output is to be verbose * @return true if output will be verbose */ public boolean getDebug() { return m_debug; } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(6); newVector.addElement(new Option("\tSpecify the number of subsets to generate " + "\n\tin the initial population.." ,"Z",1 , "-Z <num>")); newVector.addElement(new Option("\tSpecify the treshold used for considering when a subset is significant." , "T", 1 , "-T <threshold>")); newVector.addElement(new Option("\tSpecify the kind of combiantion " + "\n\tfor using it in the combination method." , "R", 1, "-R <0 = greedy combination | 1 = reduced greedy combination >")); newVector.addElement(new Option("\tSet the random number seed." +"\n\t(default = 1)" , "S", 1, "-S <seed>")); newVector.addElement(new Option("\tVerbose output for monitoring the search.","D",0,"-D")); return newVector.elements(); } /** * Parses a given list of options. * <!-- options-start --> * Valid options are: <p> * * -Z <br> * Specify the number of subsets to generate in the initial population.<p> * * -T <start set> <br> * Specify the treshold used for considering when a subset is significant. <p> * * -R <br> * Specify the kind of combiantion. <p> * * -S <br> * Set the random number seed. * (default = 1) * * -D <br> * Verbose output for monitoring the search * (default = false) * <!-- options-end --> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported * **/ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('Z', options); if (optionString.length() != 0) { setPopulationSize(Integer.parseInt(optionString)); } optionString = Utils.getOption('T', options); if (optionString.length() != 0) { setThreshold(Double.parseDouble(optionString)); } optionString = Utils.getOption('R', options); if (optionString.length() != 0) { setCombination(new SelectedTag(Integer.parseInt(optionString), TAGS_SELECTION)); } else { setCombination(new SelectedTag(COMBINATION_NOT_REDUCED, TAGS_SELECTION)); } optionString = Utils.getOption('S', options); if (optionString.length() != 0) { setSeed(Integer.parseInt(optionString)); } setDebug(Utils.getFlag('D', options)); } /** * Gets the current settings of ScatterSearchV1. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[9]; int current = 0; options[current++] = "-T"; options[current++] = "" + getThreshold (); options[current++] = "-Z"; options[current++] = ""+getPopulationSize (); options[current++] = "-R"; options[current++] = ""+String.valueOf (getCombination ().getSelectedTag ().getID ()); options[current++] = "-S"; options[current++] = "" + getSeed(); if (getDebug()) options[current++] = "-D"; while (current < options.length) options[current++] = ""; return options; } /** * returns a description of the search. * @return a description of the search as a String. */ public String toString() { StringBuffer FString = new StringBuffer(); FString.append("\tScatter Search " + "\n\tInit Population: "+m_calculatedInitialPopSize); FString.append("\n\tKind of Combination: " +getCombination ().getSelectedTag ().getReadable ()); FString.append("\n\tRandom number seed: "+m_seed); FString.append("\n\tDebug: "+m_debug); FString.append("\n\tTreshold: " +Utils.doubleToString(Math.abs(getThreshold ()),8,3)+"\n"); FString.append("\tTotal number of subsets evaluated: " + m_totalEvals + "\n"); FString.append("\tMerit of best subset found: " +Utils.doubleToString(Math.abs(m_bestMerit),8,3)+"\n"); /* FString.append("\tTime procesing the search space: " +(double)m_processinTime/1000+" seconds\n"); */ if(m_debug) return FString.toString()+"\n\n"+m_InformationReports.toString (); return FString.toString(); } /** * Searches the attribute subset space using Scatter Search. * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array of selected attribute indexes * @exception Exception if the search can't be completed */ public int[] search(ASEvaluation ASEval, Instances data) throws Exception{ m_totalEvals = 0; m_popSize = m_initialPopSize; m_calculatedInitialPopSize = m_initialPopSize; m_treshold = m_initialThreshold; m_processinTime =System.currentTimeMillis (); m_InformationReports = new StringBuffer(); m_numAttribs =data.numAttributes (); m_classIndex =data.classIndex (); if(m_popSize<=0) { m_popSize =m_numAttribs/2; m_calculatedInitialPopSize = m_popSize; } ASEvaluator =(SubsetEvaluator)ASEval; if(!(m_treshold >= 0)){ m_treshold =calculateTreshhold(); m_totalEvals++; } m_random = new Random(m_seed); m_attributeRanking =RankEachAttribute(); CreatePopulation(m_popSize); int bestSolutions =m_popSize/4; int divSolutions =m_popSize/4; if(m_popSize < 4){ bestSolutions = m_popSize/2; divSolutions = m_popSize/2; if(m_popSize == 1) return attributeList(((Subset)m_population.get (0)).subset); } m_ReferenceSet =new ArrayList<Subset>(); for (int i = 0; i<m_population.size (); i++) { m_ReferenceSet.add (m_population.get (i)) ; } GenerateReferenceSet(m_ReferenceSet, bestSolutions, divSolutions); m_InformationReports.append ("Population: "+m_population.size ()+"\n"); m_InformationReports.append ("merit \tsubset\n"); for (int i = 0; i < m_population.size (); i++) m_InformationReports.append (printSubset (m_population.get (i))); m_ReferenceSet =m_ReferenceSet.subList (0,bestSolutions+divSolutions); /*TEST*/ m_InformationReports.append ("\nReferenceSet:"); m_InformationReports.append ("\n----------------Most Significants Solutions--------------\n"); for (int i = 0; i<m_ReferenceSet.size (); i++) { if(i ==bestSolutions) m_InformationReports.append ("----------------Most Diverses Solutions--------------\n"); m_InformationReports.append(printSubset (m_ReferenceSet.get (i))); } Subset bestTemp =new Subset(new BitSet(m_numAttribs),0); while (!(bestTemp.isEqual (m_ReferenceSet.get (0))) /*|| (m_treshold > bestTemp.merit)*/) { //while(){ CombineParents(); ImproveSolutions(); // } bestTemp =m_ReferenceSet.get (0); int numBest =m_ReferenceSet.size ()/2; int numDiverses =m_ReferenceSet.size ()/2; UpdateReferenceSet(numBest, numDiverses); m_ReferenceSet = m_ReferenceSet.subList (0,numBest+numDiverses); } m_InformationReports.append("\nLast Reference Set Updated:\n"); m_InformationReports.append ("merit \tsubset\n"); for (int i = 0; i <m_ReferenceSet.size (); i++) m_InformationReports.append (printSubset (m_ReferenceSet.get (i))); m_bestMerit =bestTemp.merit; m_processinTime =System.currentTimeMillis () -m_processinTime; return attributeList (bestTemp.subset); } /** * Generate the a ReferenceSet containing the n best solutions and the m most diverse solutions of * the initial Population. * * @param ReferenceSet the ReferenceSet for storing these solutions * @param bestSolutions the number of the most pure solutions. * @param divSolutions the number of the most diverses solutions acording to the bestSolutions. */ public void GenerateReferenceSet(List<Subset> ReferenceSet, int bestSolutions, int divSolutions){ //Sorting the Initial ReferenceSet ReferenceSet =bubbleSubsetSort (ReferenceSet); // storing all the attributes that are now in the ReferenceSet (just till bestSolutions) BitSet allBits_RefSet =getAllBits (ReferenceSet.subList (0,bestSolutions)); // for stopping when ReferenceSet.size () ==bestSolutions+divSolutions int refSetlength =bestSolutions; int total =bestSolutions+divSolutions; while (refSetlength <total) { List<Integer> aux =new ArrayList<Integer>(); for (int i =refSetlength; i <ReferenceSet.size (); i ++) { aux.add (SimetricDiference (((Subset)ReferenceSet.get (i)).clone (),allBits_RefSet)); } int mostDiv =getIndexofBiggest(aux); ReferenceSet.set(refSetlength, ReferenceSet.get (refSetlength+mostDiv)); //ReferenceSet.remove (refSetlength +mostDiv); refSetlength++; allBits_RefSet =getAllBits (ReferenceSet.subList (0,refSetlength)); } ReferenceSet =filterSubset (ReferenceSet,refSetlength); } /** * Update the ReferenceSet putting the new obtained Solutions there * * @param numBestSolutions the number of the most pure solutions. * @param numDivsSolutions the number of the most diverses solutions acording to the bestSolutions. */ public void UpdateReferenceSet(int numBestSolutions, int numDivsSolutions){ for (int i = 0; i <m_parentsCombination.size (); i++) m_ReferenceSet.add (i, m_parentsCombination.get (i)); GenerateReferenceSet (m_ReferenceSet,numBestSolutions,numDivsSolutions); } /** * Improve the solutions previously combined by adding the attributes that improve that solution * @exception Exception if there is some trouble evaluating the candidate solutions */ public void ImproveSolutions() throws Exception{ for (int i = 0; i<m_parentsCombination.size (); i++) { BitSet aux1 =(BitSet)((Subset)m_parentsCombination.get (i)).subset.clone (); List<Subset> ranking =new ArrayList<Subset>(); /* for(int j=aux1.nextClearBit (0); j<=m_numAttribs; j=aux1.nextClearBit(j+1)){ if(j ==m_classIndex)continue; BitSet aux2 =new BitSet(m_numAttribs); aux2.set (j); double merit =ASEvaluator.evaluateSubset (aux2); m_totalEvals++; ranking.add (new Subset((BitSet)aux2.clone (), merit)); } ranking =bubbleSubsetSort (ranking); */ for (int k =0; k <m_attributeRanking.size (); k ++) { Subset s1 =((Subset)m_attributeRanking.get (k)).clone (); BitSet b1 =(BitSet)s1.subset.clone (); Subset s2 =((Subset)m_parentsCombination.get (i)).clone (); BitSet b2 =(BitSet)s2.subset.clone (); if(b2.get (b1.nextSetBit (0))) continue; b2.or (b1); double newMerit =ASEvaluator.evaluateSubset (b2); m_totalEvals++; if(newMerit <= s2.merit)break; m_parentsCombination.set (i,new Subset(b2,newMerit)); } filterSubset (m_parentsCombination,m_ReferenceSet.size()); } } /** * Combine all the posible pair solutions existing in the Population * * @exception Exception if there is some trouble evaluating the new childs */ public void CombineParents() throws Exception{ m_parentsCombination =new ArrayList<Subset>(); // this two 'for' are for selecting parents in the refSet for (int i= 0; i <m_ReferenceSet.size ()-1; i ++) { for (int j= i+1; j <m_ReferenceSet.size (); j ++) { // Selecting parents Subset parent1 =m_ReferenceSet.get (i); Subset parent2 =m_ReferenceSet.get (j); // Initializing childs Intersecting parents Subset child1 = intersectSubsets (parent1, parent2); Subset child2 =child1.clone (); // Initializing childs Intersecting parents Subset simDif =simetricDif (parent1, parent2, getCombination ().getSelectedTag ().getID ()); BitSet aux =(BitSet)simDif.subset.clone (); boolean improvement =true; while (improvement) { Subset best1 =getBestgen (child1,aux); Subset best2 =getBestgen (child2,aux); if(best1 !=null || best2!=null){ if(best2 ==null){ child1 =best1.clone (); continue; } if(best1 ==null){ child2 =best2.clone (); continue; } if(best1 !=null && best2 !=null){ double merit1 =best1.merit; double merit2 =best2.merit; if(merit1 >merit2){ child1 =best1.clone (); continue; } if(merit1 <merit2){ child2 =best2.clone (); continue; } if(merit1 ==merit2){ if(best1.subset.cardinality () > best2.subset.cardinality ()){ child2 =best2.clone (); continue; } if(best1.subset.cardinality () < best2.subset.cardinality ()){ child1 =best1.clone (); continue; } if(best1.subset.cardinality () == best2.subset.cardinality ()){ double random = m_random.nextDouble (); if(random < 0.5)child1 =best1.clone (); else child2 =best2.clone (); continue; } } } }else{ m_parentsCombination.add (child1); m_parentsCombination.add (child2); improvement =false; } } } } m_parentsCombination = filterSubset (m_parentsCombination,m_ReferenceSet.size()); GenerateReferenceSet (m_parentsCombination,m_ReferenceSet.size ()/2, m_ReferenceSet.size ()/2); m_parentsCombination = m_parentsCombination.subList (0, m_ReferenceSet.size ()); } /** * Create the initial Population * * @param popSize the size of the initial population * @exception Exception if there is a trouble evaluating any solution */ public void CreatePopulation(int popSize) throws Exception{ InitPopulation(popSize); /** Delimit the best attributes from the worst*/ int segmentation =m_numAttribs/2; /*TEST*/ /* System.out.println ("AttributeRanking"); for (int i = 0; i <attributeRanking.size (); i++){ if(i ==segmentation)System.out.println ("-------------------------SEGMENTATION------------------------"); printSubset (attributeRanking.get (i)); } */ for (int i = 0; i<m_popSize; i++) { List<Subset> attributeRankingCopy = new ArrayList<Subset>(); for (int j = 0; j<m_attributeRanking.size (); j++) attributeRankingCopy.add (m_attributeRanking.get (j)); double last_evaluation =-999; double current_evaluation =0; boolean doneAnew =true; while (true) { // generate a random number in the interval[0..segmentation] int random_number = m_random.nextInt (segmentation+1) /*generateRandomNumber (segmentation)*/; if(doneAnew && i <=segmentation)random_number =i; doneAnew =false; Subset s1 =((Subset)attributeRankingCopy.get (random_number)).clone (); Subset s2 =((Subset)m_population.get (i)).clone (); // trying to add a new gen in the chromosome i of the population Subset joiners =joinSubsets (s1, s2 ); current_evaluation =joiners.merit; if(current_evaluation > last_evaluation){ m_population.set (i,joiners); last_evaluation =current_evaluation; try { attributeRankingCopy.set (random_number, attributeRankingCopy.get (segmentation+1)); attributeRankingCopy.remove (segmentation+1); }catch (IndexOutOfBoundsException ex) { attributeRankingCopy.set (random_number,new Subset(new BitSet(m_numAttribs),0)); continue; } } else{ // there's not more improvement break; } } } //m_population =bubbleSubsetSort (m_population); } /** * Rank all the attributes individually acording to their merits * * @return an ordered List of Subsets with just one attribute * @exception Exception if the evaluation can not be completed */ public List<Subset> RankEachAttribute() throws Exception{ List<Subset> result =new ArrayList<Subset>(); for (int i = 0; i<m_numAttribs; i++) { if(i==m_classIndex)continue; BitSet an_Attribute =new BitSet(m_numAttribs); an_Attribute.set (i); double merit =ASEvaluator.evaluateSubset (an_Attribute); m_totalEvals++; result.add (new Subset(an_Attribute, merit)); } return bubbleSubsetSort(result); } //.......... /** * Evaluate each gen of a BitSet inserted in a Subset and get the most significant for that Subset * * @return a new Subset with the union of subset and the best gen of gens. * in case that there's not improvement with each gen return null * @exception Exception if the evaluation of can not be completed */ public Subset getBestgen(Subset subset, BitSet gens) throws Exception{ Subset result =null; double merit1 =subset.merit; for(int i =gens.nextSetBit(0); i >=0; i =gens.nextSetBit(i+1)){ BitSet aux =(BitSet)subset.subset.clone (); if(aux.get (i))continue; aux.set (i); double merit2 =ASEvaluator.evaluateSubset (aux); m_totalEvals++; if(merit2 >merit1){ merit1 =merit2; result =new Subset(aux,merit1); } } return result; } /** * Sort a List of subsets according to their merits * * @param subsetList the subsetList to be ordered * @return a List with ordered subsets */ public List<Subset> bubbleSubsetSort(List<Subset> subsetList){ List<Subset> result =new ArrayList<Subset>(); for (int i = 0; i<subsetList.size ()-1; i++) { Subset subset1 =subsetList.get (i); double merit1 =subset1.merit; for (int j = i+1; j<subsetList.size (); j++) { Subset subset2 =subsetList.get (j); double merit2 =subset2.merit; if(merit2 > merit1){ Subset temp =subset1; subsetList.set (i,subset2); subsetList.set (j,temp); subset1 =subset2; merit1 =subset1.merit; } } } return subsetList; } /** * get the index in a List where this have the biggest number * * @param simDif the Lists of numbers for getting from them the index of the bigger * @return an index that represents where the bigest number is. */ public int getIndexofBiggest(List<Integer> simDif){ int aux =-99999; int result1 =-1; List<Integer> equalSimDif =new ArrayList<Integer>(); if(simDif.size ()==0) return -1; for (int i = 0; i<simDif.size (); i++) { if(simDif.get (i) >aux){ aux =simDif.get (i); result1 =i; } } for (int i =0; i <simDif.size (); i++) { if(simDif.get (i) ==aux){ equalSimDif.add (i); } } int finalResult =equalSimDif.get (m_random.nextInt (equalSimDif.size ()) /*generateRandomNumber (equalSimDif.size ()-1)*/); return finalResult; } /** * Save in Bitset all the gens that are in many others subsets. * * @param subsets the Lists of subsets for getting from them all their gens * @return a Bitset with all the gens contained in many others subsets. */ public BitSet getAllBits(List<Subset> subsets){ BitSet result =new BitSet(m_numAttribs); for (int i =0; i <subsets.size (); i ++) { BitSet aux =((Subset)subsets.get (i)).clone ().subset; for(int j=aux.nextSetBit(0); j>=0; j=aux.nextSetBit(j+1)) { result.set (j); } } return result; } /** * Creating space for introducing the population * * @param popSize the number of subset in the initial population */ public void InitPopulation(int popSize){ m_population =new ArrayList<Subset>(); for (int i = 0; i<popSize; i++)m_population.add (new Subset(new BitSet(m_numAttribs),0)); } /** * Join two subsets * * @param subset1 one of the subsets * @param subset2 the other subset * @return a new Subset that is te result of the Join * @exception Exception if the evaluation of the subsets can not be completed */ public Subset joinSubsets(Subset subset1, Subset subset2) throws Exception{ BitSet b1 =(BitSet)subset1.subset.clone (); BitSet b2 =(BitSet)subset2.subset.clone (); b1.or (b2); double newMerit =ASEvaluator.evaluateSubset (b1); m_totalEvals++; return new Subset((BitSet)b1.clone (), newMerit); } /** * Intersects two subsets * * @param subset1 one of the subsets * @param subset2 the other subset * @return a new Subset that is te result of the intersection * @exception Exception if the evaluation of the subsets can not be completed */ public Subset intersectSubsets(Subset subset1, Subset subset2) throws Exception{ BitSet b1 =(BitSet)subset1.subset.clone (); BitSet b2 =(BitSet)subset2.subset.clone (); b1.and (b2); double newMerit =ASEvaluator.evaluateSubset (b1); m_totalEvals++; return new Subset((BitSet)b1.clone (), newMerit); } public Subset simetricDif(Subset subset1, Subset subset2, int mode) throws Exception{ BitSet b1 =(BitSet)subset1.subset.clone (); BitSet b2 =(BitSet)subset2.subset.clone (); b1.xor (b2); double newMerit =ASEvaluator.evaluateSubset (b1); m_totalEvals++; Subset result =new Subset((BitSet)b1.clone (), newMerit); if(mode == COMBINATION_REDUCED){ double avgAcurracy =0; int totalSolutions =0; List<Subset> weightVector =new ArrayList<Subset>(); BitSet res =result.subset; for(int i=res.nextSetBit(0); i>=0; i=res.nextSetBit(i+1)){ double merits =0; int numSolutions =0; Subset solution =null; for (int j = 0; j <m_ReferenceSet.size (); j ++) { solution =(Subset)m_ReferenceSet.get (j); if(solution.subset.get (i)){ merits +=solution.merit; numSolutions ++; } } BitSet b =new BitSet(m_numAttribs); b.set (i); Subset s =new Subset(b, merits/(double)numSolutions); weightVector.add (s); avgAcurracy +=merits; totalSolutions ++; } avgAcurracy =avgAcurracy/(double)totalSolutions; BitSet newResult =new BitSet(m_numAttribs); for (int i = 0; i<weightVector.size (); i++) { Subset aux =weightVector.get (i); if(aux.merit >=avgAcurracy){ newResult.or (aux.subset); } } double merit =ASEvaluator.evaluateSubset (newResult); result =new Subset(newResult, merit); } return result; } public int generateRandomNumber(int limit){ return (int)Math.round (Math.random ()*(limit+0.4)); } /** * Calculate the treshold of a dataSet given an evaluator * * @return the treshhold of the dataSet * @exception Exception if the calculation can not be completed */ public double calculateTreshhold() throws Exception{ BitSet fullSet =new BitSet(m_numAttribs); for (int i= 0; i< m_numAttribs; i++) { if(i ==m_classIndex)continue; fullSet.set (i); } return ASEvaluator.evaluateSubset (fullSet); } /** * Calculate the Simetric Diference of two subsets * * @return the Simetric Diference * @exception Exception if the calculation can not be completed */ public int SimetricDiference(Subset subset, BitSet bitset){ BitSet aux =subset.clone ().subset; aux.xor (bitset); return aux.cardinality (); } /** * Filter a given Lis of Subsets removing the equals subsets * @param subsetList to filter * @param preferredSize the preferred size of the new List (if it is -1, then the filter is make it * for all subsets, else then the filter method stops when the given preferred * size is reached or all the subset have been filtered). * @return a new List filtered * @exception Exception if the calculation can not be completed */ public List<Subset> filterSubset(List<Subset> subsetList, int preferredSize){ if(subsetList.size () <=preferredSize && preferredSize !=-1)return subsetList; for (int i =0; i <subsetList.size ()-1; i ++) { for (int j =i+1; j <subsetList.size (); j ++) { Subset focus =subsetList.get (i); if(focus.isEqual (subsetList.get (j))){ subsetList.remove (j); j--; if(subsetList.size () <=preferredSize && preferredSize !=-1)return subsetList; } } } return subsetList; } //.......... // Test Methods public String printSubset(Subset subset){ StringBuffer bufferString = new StringBuffer(); if(subset == null){ //System.out.println ("null"); return ""; } BitSet bits =subset.subset; double merit =subset.merit; List<Integer> indexes =new ArrayList<Integer>(); for (int i = 0; i<m_numAttribs; i++) { if(bits.get (i)){ //System.out.print ("1"); indexes.add (i+1); } //else System.out.print ("0"); } bufferString.append (Utils.doubleToString (merit,8,5)+"\t "+indexes.toString ()+"\n"); //System.out.print (" with a merit of: "+merit); return bufferString.toString (); } //........ protected void resetOptions () { m_popSize = -1; m_initialPopSize = -1; m_calculatedInitialPopSize = -1; m_treshold = -1; m_typeOfCombination = COMBINATION_NOT_REDUCED; m_seed = 1; m_debug = true; m_totalEvals = 0; m_bestMerit = 0; m_processinTime = 0; } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ public int[] attributeList (BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } // Auxiliar Class for handling Chromosomes and its respective merit public class Subset implements Serializable { double merit; BitSet subset; public Subset(BitSet subset, double merit){ this.subset =(BitSet)subset.clone (); this.merit =merit; } public boolean isEqual(Subset othersubset){ if(subset.equals (othersubset.subset))return true; return false; } public Subset clone(){ return new Subset((BitSet)subset.clone (), merit); } } //.......... }
37,244
27.366337
227
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/StartSetHandler.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * StartSetHandler.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; /** * Interface for search methods capable of doing something sensible * given a starting set of attributes. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface StartSetHandler { /** * Sets a starting set of attributes for the search. It is the * search method's responsibility to report this start set (if any) * in its toString() method. * @param startSet a string containing a list of attributes (and or ranges), * eg. 1,2,6,10-15. * @exception Exception if start set can't be set. */ void setStartSet (String startSet) throws Exception; /** * Returns a list of attributes (and or attribute ranges) as a String * @return a list of attributes (and or attribute ranges) */ String getStartSet (); }
1,612
31.26
78
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/SubsetEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SubsetEvaluator.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.BitSet; /** * Interface for attribute subset evaluators. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface SubsetEvaluator { /** * evaluates a subset of attributes * * @param subset a bitset representing the attribute subset to be * evaluated * @return the "merit" of the subset * @exception Exception if the subset could not be evaluated */ double evaluateSubset(BitSet subset) throws Exception; }
1,323
28.422222
74
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/SubsetSizeForwardSelection.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * SubsetSizeForwardSelection.java * Copyright (C) 2007 Martin Guetlein * */ package weka.attributeSelection; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.BitSet; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * SubsetSizeForwardSelection:<br/> * <br/> * Extension of LinearForwardSelection. The search performs an interior cross-validation (seed and number of folds can be specified). A LinearForwardSelection is performed on each foldto determine the optimal subset-size (using the given SubsetSizeEvaluator). Finally, a LinearForwardSelection up to the optimal subset-size is performed on the whole data.<br/> * <br/> * For more information see:<br/> * <br/> * Martin Guetlein (2006). Large Scale Attribute Selection Using Wrappers. Freiburg, Germany. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I * Perform initial ranking to select the * top-ranked attributes.</pre> * * <pre> -K &lt;num&gt; * Number of top-ranked attributes that are * taken into account by the search.</pre> * * <pre> -T &lt;0 = fixed-set | 1 = fixed-width&gt; * Type of Linear Forward Selection (default = 0).</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * * <pre> -E &lt;subset evaluator&gt; * Subset-evaluator used for subset-size determination.-- -M</pre> * * <pre> -F &lt;num&gt; * Number of cross validation folds * for subset size determination (default = 5).</pre> * * <pre> -R &lt;num&gt; * Seed for cross validation * subset size determination. (default = 1)</pre> * * <pre> -Z * verbose on/off</pre> * * <pre> * Options specific to evaluator weka.attributeSelection.ClassifierSubsetEval: * </pre> * * <pre> -B &lt;classifier&gt; * class name of the classifier to use for accuracy estimation. * Place any classifier options LAST on the command line * following a "--". eg.: * -B weka.classifiers.bayes.NaiveBayes ... -- -K * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> -T * Use the training data to estimate accuracy.</pre> * * <pre> -H &lt;filename&gt; * Name of the hold out/test set to * estimate accuracy on.</pre> * * <pre> * Options specific to scheme weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Martin Guetlein (martin.guetlein@gmail.com) * @version $Revision: 5605 $ */ public class SubsetSizeForwardSelection extends ASSearch implements OptionHandler { /** search directions */ protected static final int TYPE_FIXED_SET = 0; protected static final int TYPE_FIXED_WIDTH = 1; public static final Tag[] TAGS_TYPE = { new Tag(TYPE_FIXED_SET, "Fixed-set"), new Tag(TYPE_FIXED_WIDTH, "Fixed-width"), }; // member variables /** perform initial ranking to select top-ranked attributes */ protected boolean m_performRanking; /** * number of top-ranked attributes that are taken into account for the * search */ protected int m_numUsedAttributes; /** 0 == fixed-set, 1 == fixed-width */ protected int m_linearSelectionType; /** the subset evaluator to use for subset size determination */ private ASEvaluation m_setSizeEval; /** * Number of cross validation folds for subset size determination (default = * 5). */ protected int m_numFolds; /** Seed for cross validation subset size determination. (default = 1) */ protected int m_seed; /** number of attributes in the data */ protected int m_numAttribs; /** total number of subsets evaluated during a search */ protected int m_totalEvals; /** for debugging */ protected boolean m_verbose; /** holds the merit of the best subset found */ protected double m_bestMerit; /** holds the maximum size of the lookup cache for evaluated subsets */ protected int m_cacheSize; /** * Constructor */ public SubsetSizeForwardSelection() { resetOptions(); } /** * Returns a string describing this search method * * @return a description of the search method suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "SubsetSizeForwardSelection:\n\n" + "Extension of LinearForwardSelection. The search performs an interior " + "cross-validation (seed and number of folds can be specified). A " + "LinearForwardSelection is performed on each foldto determine the optimal " + "subset-size (using the given SubsetSizeEvaluator). Finally, a " + "LinearForwardSelection up to the optimal subset-size is performed on " + "the whole data.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Martin Guetlein and Eibe Frank and Mark Hall"); result.setValue(Field.YEAR, "2009"); result.setValue(Field.TITLE, "Large Scale Attribute Selection Using Wrappers"); result.setValue(Field.BOOKTITLE, "Proc IEEE Symposium on Computational Intelligence and Data Mining"); result.setValue(Field.PAGES, "332-339"); result.setValue(Field.PUBLISHER, "IEEE"); additional = result.add(Type.MASTERSTHESIS); additional.setValue(Field.AUTHOR, "Martin Guetlein"); additional.setValue(Field.YEAR, "2006"); additional.setValue(Field.TITLE, "Large Scale Attribute Selection Using Wrappers"); additional.setValue(Field.SCHOOL, "Albert-Ludwigs-Universitaet"); additional.setValue(Field.ADDRESS, "Freiburg, Germany"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. * */ public Enumeration listOptions() { Vector newVector = new Vector(9); newVector.addElement(new Option("\tPerform initial ranking to select the" + "\n\ttop-ranked attributes.", "I", 0, "-I")); newVector.addElement(new Option( "\tNumber of top-ranked attributes that are " + "\n\ttaken into account by the search.", "K", 1, "-K <num>")); newVector.addElement(new Option( "\tType of Linear Forward Selection (default = 0).", "T", 1, "-T <0 = fixed-set | 1 = fixed-width>")); newVector.addElement(new Option( "\tSize of lookup cache for evaluated subsets." + "\n\tExpressed as a multiple of the number of" + "\n\tattributes in the data set. (default = 1)", "S", 1, "-S <num>")); newVector.addElement(new Option( "\tSubset-evaluator used for subset-size determination." + "-- -M", "E", 1, "-E <subset evaluator>")); newVector.addElement(new Option("\tNumber of cross validation folds" + "\n\tfor subset size determination (default = 5).", "F", 1, "-F <num>")); newVector.addElement(new Option("\tSeed for cross validation" + "\n\tsubset size determination. (default = 1)", "R", 1, "-R <num>")); newVector.addElement(new Option("\tverbose on/off", "Z", 0, "-Z")); if ((m_setSizeEval != null) && (m_setSizeEval instanceof OptionHandler)) { newVector.addElement(new Option("", "", 0, "\nOptions specific to " + "evaluator " + m_setSizeEval.getClass().getName() + ":")); Enumeration enu = ((OptionHandler) m_setSizeEval).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Parses a given list of options. * * Valid options are: * <p> * * -I <br> * Perform initial ranking to select top-ranked attributes. * <p> * * -K <num> <br> * Number of top-ranked attributes that are taken into account. * <p> * * -T <0 = fixed-set | 1 = fixed-width> <br> * Typ of Linear Forward Selection (default = 0). * <p> * * -S <num> <br> * Size of lookup cache for evaluated subsets. Expressed as a multiple of * the number of attributes in the data set. (default = 1). * <p> * * -E <string> <br> * class name of subset evaluator to use for subset size determination * (default = null, same subset evaluator as for ranking and final forward * selection is used). Place any evaluator options LAST on the command line * following a "--". eg. -A weka.attributeSelection.ClassifierSubsetEval ... -- * -M * * </pre> * * -F <num> <br> * Number of cross validation folds for subset size determination (default = * 5). * <p> * * -R <num> <br> * Seed for cross validation subset size determination. (default = 1) * <p> * * -Z <br> * verbose on/off. * <p> * * @param options * the list of options as an array of strings * @exception Exception * if an option is not supported * */ public void setOptions(String[] options) throws Exception { String optionString; resetOptions(); setPerformRanking(Utils.getFlag('I', options)); optionString = Utils.getOption('K', options); if (optionString.length() != 0) { setNumUsedAttributes(Integer.parseInt(optionString)); } optionString = Utils.getOption('T', options); if (optionString.length() != 0) { setType(new SelectedTag(Integer.parseInt(optionString), TAGS_TYPE)); } else { setType(new SelectedTag(TYPE_FIXED_SET, TAGS_TYPE)); } optionString = Utils.getOption('S', options); if (optionString.length() != 0) { setLookupCacheSize(Integer.parseInt(optionString)); } optionString = Utils.getOption('E', options); if (optionString.length() == 0) { System.out.println( "No subset size evaluator given, using evaluator that is used for final search."); m_setSizeEval = null; } else { setSubsetSizeEvaluator(ASEvaluation.forName(optionString, Utils.partitionOptions(options))); } optionString = Utils.getOption('F', options); if (optionString.length() != 0) { setNumSubsetSizeCVFolds(Integer.parseInt(optionString)); } optionString = Utils.getOption('R', options); if (optionString.length() != 0) { setSeed(Integer.parseInt(optionString)); } m_verbose = Utils.getFlag('Z', options); } /** * Set the maximum size of the evaluated subset cache (hashtable). This is * expressed as a multiplier for the number of attributes in the data set. * (default = 1). * * @param size * the maximum size of the hashtable */ public void setLookupCacheSize(int size) { if (size >= 0) { m_cacheSize = size; } } /** * Return the maximum size of the evaluated subset cache (expressed as a * multiplier for the number of attributes in a data set. * * @return the maximum size of the hashtable. */ public int getLookupCacheSize() { return m_cacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lookupCacheSizeTipText() { return "Set the maximum size of the lookup cache of evaluated subsets. This is " + "expressed as a multiplier of the number of attributes in the data set. " + "(default = 1)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String performRankingTipText() { return "Perform initial ranking to select top-ranked attributes."; } /** * Perform initial ranking to select top-ranked attributes. * * @param b * true if initial ranking should be performed */ public void setPerformRanking(boolean b) { m_performRanking = b; } /** * Get boolean if initial ranking should be performed to select the * top-ranked attributes * * @return true if initial ranking should be performed */ public boolean getPerformRanking() { return m_performRanking; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numUsedAttributesTipText() { return "Set the amount of top-ranked attributes that are taken into account by the search process."; } /** * Set the number of top-ranked attributes that taken into account by the * search process. * * @param k * the number of attributes * @exception Exception * if k is less than 2 */ public void setNumUsedAttributes(int k) throws Exception { if (k < 2) { throw new Exception("Value of -K must be >= 2."); } m_numUsedAttributes = k; } /** * Get the number of top-ranked attributes that taken into account by the * search process. * * @return the number of top-ranked attributes that taken into account */ public int getNumUsedAttributes() { return m_numUsedAttributes; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String typeTipText() { return "Set the type of the search."; } /** * Set the type * * @param t * the Linear Forward Selection type */ public void setType(SelectedTag t) { if (t.getTags() == TAGS_TYPE) { m_linearSelectionType = t.getSelectedTag().getID(); } } /** * Get the type * * @return the Linear Forward Selection type */ public SelectedTag getType() { return new SelectedTag(m_linearSelectionType, TAGS_TYPE); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String subsetSizeEvaluatorTipText() { return "Subset evaluator to use for subset size determination."; } /** * Set the subset evaluator to use for subset size determination. * * @param eval * the subset evaluator to use for subset size determination. */ public void setSubsetSizeEvaluator(ASEvaluation eval) throws Exception { if (!(eval instanceof SubsetEvaluator)) { throw new Exception(eval.getClass().getName() + " is no subset evaluator."); } m_setSizeEval = eval; } /** * Get the subset evaluator used for subset size determination. * * @return the evaluator used for subset size determination. */ public ASEvaluation getSubsetSizeEvaluator() { return m_setSizeEval; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numSubsetSizeCVFoldsTipText() { return "Number of cross validation folds for subset size determination"; } /** * Set the number of cross validation folds for subset size determination * (default = 5). * * @param f * number of folds */ public void setNumSubsetSizeCVFolds(int f) { m_numFolds = f; } /** * Get the number of cross validation folds for subset size determination * (default = 5). * * @return number of folds */ public int getNumSubsetSizeCVFolds() { return m_numFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String seedTipText() { return "Seed for cross validation subset size determination. (default = 1)"; } /** * Seed for cross validation subset size determination. (default = 1) * * @param s * seed */ public void setSeed(int s) { m_seed = s; } /** * Seed for cross validation subset size determination. (default = 1) * * @return seed */ public int getSeed() { return m_seed; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String verboseTipText() { return "Turn on verbose output for monitoring the search's progress."; } /** * Set whether verbose output should be generated. * * @param d * true if output is to be verbose. */ public void setVerbose(boolean b) { m_verbose = b; } /** * Get whether output is to be verbose * * @return true if output will be verbose */ public boolean getVerbose() { return m_verbose; } /** * Gets the current settings of LinearForwardSelection. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions() { String[] evaluatorOptions = new String[0]; if ((m_setSizeEval != null) && (m_setSizeEval instanceof OptionHandler)) { evaluatorOptions = ((OptionHandler) m_setSizeEval).getOptions(); } String[] options = new String[15 + evaluatorOptions.length]; int current = 0; if (m_performRanking) { options[current++] = "-I"; } options[current++] = "-K"; options[current++] = "" + m_numUsedAttributes; options[current++] = "-T"; options[current++] = "" + m_linearSelectionType; options[current++] = "-F"; options[current++] = "" + m_numFolds; options[current++] = "-S"; options[current++] = "" + m_seed; options[current++] = "-Z"; options[current++] = "" + m_verbose; if (m_setSizeEval != null) { options[current++] = "-E"; options[current++] = m_setSizeEval.getClass().getName(); } options[current++] = "--"; System.arraycopy(evaluatorOptions, 0, options, current, evaluatorOptions.length); current += evaluatorOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * returns a description of the search as a String * * @return a description of the search */ public String toString() { StringBuffer LFSString = new StringBuffer(); LFSString.append("\tSubset Size Forward Selection.\n"); LFSString.append("\tLinear Forward Selection Type: "); if (m_linearSelectionType == TYPE_FIXED_SET) { LFSString.append("fixed-set\n"); } else { LFSString.append("fixed-width\n"); } LFSString.append("\tNumber of top-ranked attributes that are used: " + m_numUsedAttributes + "\n"); LFSString.append( "\tNumber of cross validation folds for subset size determination: " + m_numFolds + "\n"); LFSString.append("\tSeed for cross validation subset size determination: " + m_seed + "\n"); LFSString.append("\tTotal number of subsets evaluated: " + m_totalEvals + "\n"); LFSString.append("\tMerit of best subset found: " + Utils.doubleToString(Math.abs(m_bestMerit), 8, 3) + "\n"); return LFSString.toString(); } /** * Searches the attribute subset space by subset size forward selection * * @param ASEvaluator * the attribute evaluator to guide the search * @param data * the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @exception Exception * if the search can't be completed */ public int[] search(ASEvaluation ASEval, Instances data) throws Exception { m_totalEvals = 0; if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } if (m_setSizeEval == null) { m_setSizeEval = ASEval; } m_numAttribs = data.numAttributes(); if (m_numUsedAttributes > m_numAttribs) { System.out.println( "Decreasing number of top-ranked attributes to total number of attributes: " + data.numAttributes()); m_numUsedAttributes = m_numAttribs; } Instances[] trainData = new Instances[m_numFolds]; Instances[] testData = new Instances[m_numFolds]; LFSMethods[] searchResults = new LFSMethods[m_numFolds]; Random random = new Random(m_seed); Instances dataCopy = new Instances(data); dataCopy.randomize(random); if (dataCopy.classAttribute().isNominal()) { dataCopy.stratify(m_numFolds); } for (int f = 0; f < m_numFolds; f++) { trainData[f] = dataCopy.trainCV(m_numFolds, f, random); testData[f] = dataCopy.testCV(m_numFolds, f); } LFSMethods LSF = new LFSMethods(); int[] ranking; if (m_performRanking) { ASEval.buildEvaluator(data); ranking = LSF.rankAttributes(data, (SubsetEvaluator) ASEval, m_verbose); } else { ranking = new int[m_numAttribs]; for (int i = 0; i < ranking.length; i++) { ranking[i] = i; } } int maxSubsetSize = 0; for (int f = 0; f < m_numFolds; f++) { if (m_verbose) { System.out.println("perform search on internal fold: " + (f + 1) + "/" + m_numFolds); } m_setSizeEval.buildEvaluator(trainData[f]); searchResults[f] = new LFSMethods(); searchResults[f].forwardSearch(m_cacheSize, new BitSet(m_numAttribs), ranking, m_numUsedAttributes, m_linearSelectionType == TYPE_FIXED_WIDTH, 1, -1, trainData[f], (SubsetEvaluator)m_setSizeEval, m_verbose); maxSubsetSize = Math.max(maxSubsetSize, searchResults[f].getBestGroup().cardinality()); } if (m_verbose) { System.out.println( "continue searches on internal folds to maxSubsetSize (" + maxSubsetSize + ")"); } for (int f = 0; f < m_numFolds; f++) { if (m_verbose) { System.out.print("perform search on internal fold: " + (f + 1) + "/" + m_numFolds + " with starting set "); LFSMethods.printGroup(searchResults[f].getBestGroup(), trainData[f].numAttributes()); } if (searchResults[f].getBestGroup().cardinality() < maxSubsetSize) { m_setSizeEval.buildEvaluator(trainData[f]); searchResults[f].forwardSearch(m_cacheSize, searchResults[f].getBestGroup(), ranking, m_numUsedAttributes, m_linearSelectionType == TYPE_FIXED_WIDTH, 1, maxSubsetSize, trainData[f], (SubsetEvaluator)m_setSizeEval, m_verbose); } } double[][] testMerit = new double[m_numFolds][maxSubsetSize + 1]; for (int f = 0; f < m_numFolds; f++) { for (int s = 1; s <= maxSubsetSize; s++) { if (HoldOutSubsetEvaluator.class.isInstance(m_setSizeEval)) { m_setSizeEval.buildEvaluator(trainData[f]); testMerit[f][s] = ((HoldOutSubsetEvaluator) m_setSizeEval).evaluateSubset(searchResults[f].getBestGroupOfSize( s), testData[f]); } else { m_setSizeEval.buildEvaluator(testData[f]); testMerit[f][s] = ((SubsetEvaluator)m_setSizeEval).evaluateSubset(searchResults[f].getBestGroupOfSize( s)); } } } double[] avgTestMerit = new double[maxSubsetSize + 1]; int finalSubsetSize = -1; for (int s = 1; s <= maxSubsetSize; s++) { for (int f = 0; f < m_numFolds; f++) { avgTestMerit[s] = ((avgTestMerit[s] * f) + testMerit[f][s]) / (double) (f + 1); } if ((finalSubsetSize == -1) || (avgTestMerit[s] > avgTestMerit[finalSubsetSize])) { finalSubsetSize = s; } if (m_verbose) { System.out.println("average merit for subset-size " + s + ": " + avgTestMerit[s]); } } if (m_verbose) { System.out.println("performing final forward selection to subset-size: " + finalSubsetSize); } ASEval.buildEvaluator(data); LSF.forwardSearch(m_cacheSize, new BitSet(m_numAttribs), ranking, m_numUsedAttributes, m_linearSelectionType == TYPE_FIXED_WIDTH, 1, finalSubsetSize, data, (SubsetEvaluator) ASEval, m_verbose); m_totalEvals = LSF.getNumEvalsTotal(); m_bestMerit = LSF.getBestMerit(); return attributeList(LSF.getBestGroup()); } /** * Reset options to default values */ protected void resetOptions() { m_performRanking = true; m_numUsedAttributes = 50; m_linearSelectionType = TYPE_FIXED_SET; m_setSizeEval = new ClassifierSubsetEval(); m_numFolds = 5; m_seed = 1; m_totalEvals = 0; m_cacheSize = 1; m_verbose = false; } /** * converts a BitSet into a list of attribute indexes * * @param group * the BitSet to convert * @return an array of attribute indexes */ protected int[] attributeList(BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5605 $"); } }
28,240
29.563853
360
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/SymmetricalUncertAttributeEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SymmetricalUncertAttributeEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.ContingencyTables; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; /** <!-- globalinfo-start --> * SymmetricalUncertAttributeEval :<br/> * <br/> * Evaluates the worth of an attribute by measuring the symmetrical uncertainty with respect to the class. <br/> * <br/> * SymmU(Class, Attribute) = 2 * (H(Class) - H(Class | Attribute)) / H(Class) + H(Attribute).<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * treat missing values as a seperate value.</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ * @see Discretize */ public class SymmetricalUncertAttributeEval extends ASEvaluation implements AttributeEvaluator, OptionHandler { /** for serialization */ static final long serialVersionUID = -8096505776132296416L; /** The training instances */ private Instances m_trainInstances; /** The class index */ private int m_classIndex; /** The number of attributes */ private int m_numAttribs; /** The number of instances */ private int m_numInstances; /** The number of classes */ private int m_numClasses; /** Treat missing values as a seperate value */ private boolean m_missing_merge; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "SymmetricalUncertAttributeEval :\n\nEvaluates the worth of an attribute " +"by measuring the symmetrical uncertainty with respect to the class. " +"\n\n SymmU(Class, Attribute) = 2 * (H(Class) - H(Class | Attribute)) " +"/ H(Class) + H(Attribute).\n"; } /** * Constructor */ public SymmetricalUncertAttributeEval () { resetOptions(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(1); newVector.addElement(new Option("\ttreat missing values as a seperate " + "value.", "M", 0, "-M")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * treat missing values as a seperate value.</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported **/ public void setOptions (String[] options) throws Exception { resetOptions(); setMissingMerge(!(Utils.getFlag('M', options))); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String missingMergeTipText() { return "Distribute counts for missing values. Counts are distributed " +"across other values in proportion to their frequency. Otherwise, " +"missing is treated as a separate value."; } /** * distribute the counts for missing values across observed values * * @param b true=distribute missing values. */ public void setMissingMerge (boolean b) { m_missing_merge = b; } /** * get whether missing values are being distributed or not * * @return true if missing values are being distributed. */ public boolean getMissingMerge () { return m_missing_merge; } /** * Gets the current settings of WrapperSubsetEval. * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[1]; int current = 0; if (!getMissingMerge()) { options[current++] = "-M"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Initializes a symmetrical uncertainty attribute evaluator. * Discretizes all attributes that are numeric. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); m_trainInstances = data; m_classIndex = m_trainInstances.classIndex(); m_numAttribs = m_trainInstances.numAttributes(); m_numInstances = m_trainInstances.numInstances(); Discretize disTransform = new Discretize(); disTransform.setUseBetterEncoding(true); disTransform.setInputFormat(m_trainInstances); m_trainInstances = Filter.useFilter(m_trainInstances, disTransform); m_numClasses = m_trainInstances.attribute(m_classIndex).numValues(); } /** * set options to default values */ protected void resetOptions () { m_trainInstances = null; m_missing_merge = true; } /** * evaluates an individual attribute by measuring the symmetrical * uncertainty between it and the class. * * @param attribute the index of the attribute to be evaluated * @return the uncertainty * @throws Exception if the attribute could not be evaluated */ public double evaluateAttribute (int attribute) throws Exception { int i, j, ii, jj; int nnj, nni, ni, nj; double sum = 0.0; ni = m_trainInstances.attribute(attribute).numValues() + 1; nj = m_numClasses + 1; double[] sumi, sumj; Instance inst; double temp = 0.0; sumi = new double[ni]; sumj = new double[nj]; double[][] counts = new double[ni][nj]; sumi = new double[ni]; sumj = new double[nj]; for (i = 0; i < ni; i++) { sumi[i] = 0.0; for (j = 0; j < nj; j++) { sumj[j] = 0.0; counts[i][j] = 0.0; } } // Fill the contingency table for (i = 0; i < m_numInstances; i++) { inst = m_trainInstances.instance(i); if (inst.isMissing(attribute)) { ii = ni - 1; } else { ii = (int)inst.value(attribute); } if (inst.isMissing(m_classIndex)) { jj = nj - 1; } else { jj = (int)inst.value(m_classIndex); } counts[ii][jj]++; } // get the row totals for (i = 0; i < ni; i++) { sumi[i] = 0.0; for (j = 0; j < nj; j++) { sumi[i] += counts[i][j]; sum += counts[i][j]; } } // get the column totals for (j = 0; j < nj; j++) { sumj[j] = 0.0; for (i = 0; i < ni; i++) { sumj[j] += counts[i][j]; } } // distribute missing counts if (m_missing_merge && (sumi[ni-1] < m_numInstances) && (sumj[nj-1] < m_numInstances)) { double[] i_copy = new double[sumi.length]; double[] j_copy = new double[sumj.length]; double[][] counts_copy = new double[sumi.length][sumj.length]; for (i = 0; i < ni; i++) { System.arraycopy(counts[i], 0, counts_copy[i], 0, sumj.length); } System.arraycopy(sumi, 0, i_copy, 0, sumi.length); System.arraycopy(sumj, 0, j_copy, 0, sumj.length); double total_missing = (sumi[ni - 1] + sumj[nj - 1] - counts[ni - 1][nj - 1]); // do the missing i's if (sumi[ni - 1] > 0.0) { for (j = 0; j < nj - 1; j++) { if (counts[ni - 1][j] > 0.0) { for (i = 0; i < ni - 1; i++) { temp = ((i_copy[i]/(sum - i_copy[ni - 1])) * counts[ni - 1][j]); counts[i][j] += temp; sumi[i] += temp; } counts[ni - 1][j] = 0.0; } } } sumi[ni - 1] = 0.0; // do the missing j's if (sumj[nj - 1] > 0.0) { for (i = 0; i < ni - 1; i++) { if (counts[i][nj - 1] > 0.0) { for (j = 0; j < nj - 1; j++) { temp = ((j_copy[j]/(sum - j_copy[nj - 1]))*counts[i][nj - 1]); counts[i][j] += temp; sumj[j] += temp; } counts[i][nj - 1] = 0.0; } } } sumj[nj - 1] = 0.0; // do the both missing if (counts[ni - 1][nj - 1] > 0.0 && total_missing != sum) { for (i = 0; i < ni - 1; i++) { for (j = 0; j < nj - 1; j++) { temp = (counts_copy[i][j]/(sum - total_missing)) * counts_copy[ni - 1][nj - 1]; counts[i][j] += temp; sumi[i] += temp; sumj[j] += temp; } } counts[ni - 1][nj - 1] = 0.0; } } return ContingencyTables.symmetricalUncertainty(counts); } /** * Return a description of the evaluator * @return description as a string */ public String toString () { StringBuffer text = new StringBuffer(); if (m_trainInstances == null) { text.append("\tSymmetrical Uncertainty evaluator has not been built"); } else { text.append("\tSymmetrical Uncertainty Ranking Filter"); if (!m_missing_merge) { text.append("\n\tMissing values treated as seperate"); } } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } // ============ // Test method. // ============ /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file */ public static void main (String[] argv) { runEvaluator(new SymmetricalUncertAttributeEval(), argv); } }
11,200
24.808756
112
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/UnsupervisedAttributeEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * UnsupervisedAttributeEvaluator.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; /** * Abstract unsupervised attribute evaluator. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class UnsupervisedAttributeEvaluator extends ASEvaluation implements AttributeEvaluator { /** for serialization */ private static final long serialVersionUID = -4100897318675336178L; }
1,192
29.589744
74
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/UnsupervisedSubsetEvaluator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * UnsupervisedSubsetEvaluator.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.clusterers.Clusterer; /** * Abstract unsupervised attribute subset evaluator. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class UnsupervisedSubsetEvaluator extends ASEvaluation implements SubsetEvaluator { /** for serialization */ static final long serialVersionUID = 627934376267488763L; /** * Return the number of clusters used by the subset evaluator * * @return the number of clusters used * @exception Exception if an error occurs */ public abstract int getNumClusters() throws Exception; /** * Get the clusterer * * @return the clusterer */ public abstract Clusterer getClusterer(); /** * Set the clusterer to use * * @param d the clusterer to use */ public abstract void setClusterer(Clusterer d); }
1,676
26.491803
74
java
tsml-java
tsml-java-master/src/main/java/weka/attributeSelection/WrapperSubsetEval.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * WrapperSubsetEval.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import java.util.BitSet; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.rules.ZeroR; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** <!-- globalinfo-start --> * WrapperSubsetEval:<br/> * <br/> * Evaluates attribute sets by using a learning scheme. Cross validation is used * to estimate the accuracy of the learning scheme for a set of attributes.<br/> * <br/> * For more information see:<br/> * <br/> * Ron Kohavi, George H. John (1997). Wrappers for feature subset selection. Artificial Intelligence. 97(1-2):273-324. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Kohavi1997, * author = {Ron Kohavi and George H. John}, * journal = {Artificial Intelligence}, * note = {Special issue on relevance}, * number = {1-2}, * pages = {273-324}, * title = {Wrappers for feature subset selection}, * volume = {97}, * year = {1997}, * ISSN = {0004-3702} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;base learner&gt; * class name of base learner to use for accuracy estimation. * Place any classifier options LAST on the command line * following a "--". eg.: * -B weka.classifiers.bayes.NaiveBayes ... -- -K * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> -F &lt;num&gt; * number of cross validation folds to use for estimating accuracy. * (default=5)</pre> * * <pre> -R &lt;seed&gt; * Seed for cross validation accuracy testimation. * (default = 1)</pre> * * <pre> -T &lt;num&gt; * threshold by which to execute another cross validation * (standard deviation---expressed as a percentage of the mean). * (default: 0.01 (1%))</pre> * * <pre> -E &lt;acc | rmse | mae | f-meas | auc | auprc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class)</pre> * * <pre> -IRclass &lt;label | index&gt; * Optional class value (label or 1-based index) to use in conjunction with * IR statistics (f-meas, auc or auprc). Omitting this option will use * the class-weighted average.</pre> * * <pre> * Options specific to scheme weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 9771 $ */ public class WrapperSubsetEval extends ASEvaluation implements SubsetEvaluator, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -4573057658746728675L; /** training instances */ private Instances m_trainInstances; /** class index */ private int m_classIndex; /** number of attributes in the training data */ private int m_numAttribs; /** number of instances in the training data */ private int m_numInstances; /** holds an evaluation object */ private Evaluation m_Evaluation; /** holds the base classifier object */ private Classifier m_BaseClassifier; /** number of folds to use for cross validation */ private int m_folds; /** random number seed */ private int m_seed; /** * the threshold by which to do further cross validations when * estimating the accuracy of a subset */ private double m_threshold; public static final int EVAL_DEFAULT = 1; public static final int EVAL_ACCURACY = 2; public static final int EVAL_RMSE = 3; public static final int EVAL_MAE = 4; public static final int EVAL_FMEASURE = 5; public static final int EVAL_AUC = 6; public static final int EVAL_AUPRC = 7; public static final Tag[] TAGS_EVALUATION = { new Tag(EVAL_DEFAULT, "Default: accuracy (discrete class); RMSE (numeric class)"), new Tag(EVAL_ACCURACY, "Accuracy (discrete class only)"), new Tag(EVAL_RMSE, "RMSE (of the class probabilities for discrete class)"), new Tag(EVAL_MAE, "MAE (of the class probabilities for discrete class)"), new Tag(EVAL_FMEASURE, "F-measure (discrete class only)"), new Tag(EVAL_AUC, "AUC (area under the ROC curve - discrete class only)"), new Tag(EVAL_AUPRC, "AUPRC (area under the precision-recall curve - discrete class only)") }; /** The evaluation measure to use */ protected int m_evaluationMeasure = EVAL_DEFAULT; /** * If >= 0, and an IR metric is being used, then evaluate with * respect to this class value (0-based index) */ protected int m_IRClassVal = -1; /** User supplied option for IR class value (either name or 1-based index) */ protected String m_IRClassValS = ""; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "WrapperSubsetEval:\n\n" +"Evaluates attribute sets by using a learning scheme. Cross " +"validation is used to estimate the accuracy of the learning " +"scheme for a set of attributes.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Ron Kohavi and George H. John"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.TITLE, "Wrappers for feature subset selection"); result.setValue(Field.JOURNAL, "Artificial Intelligence"); result.setValue(Field.VOLUME, "97"); result.setValue(Field.NUMBER, "1-2"); result.setValue(Field.PAGES, "273-324"); result.setValue(Field.NOTE, "Special issue on relevance"); result.setValue(Field.ISSN, "0004-3702"); return result; } /** * Constructor. Calls restOptions to set default options **/ public WrapperSubsetEval () { resetOptions(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. **/ public Enumeration listOptions () { Vector newVector = new Vector(4); newVector.addElement(new Option( "\tclass name of base learner to use for \taccuracy estimation.\n" + "\tPlace any classifier options LAST on the command line\n" + "\tfollowing a \"--\". eg.:\n" + "\t\t-B weka.classifiers.bayes.NaiveBayes ... -- -K\n" + "\t(default: weka.classifiers.rules.ZeroR)", "B", 1, "-B <base learner>")); newVector.addElement(new Option( "\tnumber of cross validation folds to use for estimating accuracy.\n" + "\t(default=5)", "F", 1, "-F <num>")); newVector.addElement(new Option( "\tSeed for cross validation accuracy testimation.\n" + "\t(default = 1)", "R", 1,"-R <seed>")); newVector.addElement(new Option( "\tthreshold by which to execute another cross validation\n" + "\t(standard deviation---expressed as a percentage of the mean).\n" + "\t(default: 0.01 (1%))", "T", 1, "-T <num>")); newVector.addElement(new Option( "\tPerformance evaluation measure to use for selecting attributes.\n" + "\t(Default = accuracy for discrete class and rmse for numeric class)", "E", 1, "-E <acc | rmse | mae | f-meas | auc | auprc>")); newVector.addElement(new Option( "\tOptional class value (label or 1-based index) to use in conjunction with\n" + "\tIR statistics (f-meas, auc or auprc). Omitting this option will use\n" + "\tthe class-weighted average.", "IRclass", 1, "-IRclass <label | index>")); if ((m_BaseClassifier != null) && (m_BaseClassifier instanceof OptionHandler)) { newVector.addElement(new Option("", "", 0, "\nOptions specific to scheme " + m_BaseClassifier.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_BaseClassifier).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;base learner&gt; * class name of base learner to use for accuracy estimation. * Place any classifier options LAST on the command line * following a "--". eg.: * -B weka.classifiers.bayes.NaiveBayes ... -- -K * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> -F &lt;num&gt; * number of cross validation folds to use for estimating accuracy. * (default=5)</pre> * * <pre> -R &lt;seed&gt; * Seed for cross validation accuracy testimation. * (default = 1)</pre> * * <pre> -T &lt;num&gt; * threshold by which to execute another cross validation * (standard deviation---expressed as a percentage of the mean). * (default: 0.01 (1%))</pre> * * <pre> -E &lt;acc | rmse | mae | f-meas | auc | auprc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class)</pre> * * <pre> -IRclass &lt;label | index&gt; * Optional class value (label or 1-based index) to use in conjunction with * IR statistics (f-meas, auc or auprc). Omitting this option will use * the class-weighted average.</pre> * * <pre> * Options specific to scheme weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('B', options); if (optionString.length() == 0) optionString = ZeroR.class.getName(); setClassifier(AbstractClassifier.forName(optionString, Utils.partitionOptions(options))); optionString = Utils.getOption('F', options); if (optionString.length() != 0) { setFolds(Integer.parseInt(optionString)); } optionString = Utils.getOption('R', options); if (optionString.length() != 0) { setSeed(Integer.parseInt(optionString)); } // optionString = Utils.getOption('S',options); // if (optionString.length() != 0) // { // seed = Integer.parseInt(optionString); // } optionString = Utils.getOption('T', options); if (optionString.length() != 0) { Double temp; temp = Double.valueOf(optionString); setThreshold(temp.doubleValue()); } optionString = Utils.getOption('E', options); if (optionString.length() != 0) { if (optionString.equals("acc")) { setEvaluationMeasure(new SelectedTag(EVAL_ACCURACY, TAGS_EVALUATION)); } else if (optionString.equals("rmse")) { setEvaluationMeasure(new SelectedTag(EVAL_RMSE, TAGS_EVALUATION)); } else if (optionString.equals("mae")) { setEvaluationMeasure(new SelectedTag(EVAL_MAE, TAGS_EVALUATION)); } else if (optionString.equals("f-meas")) { setEvaluationMeasure(new SelectedTag(EVAL_FMEASURE, TAGS_EVALUATION)); } else if (optionString.equals("auc")) { setEvaluationMeasure(new SelectedTag(EVAL_AUC, TAGS_EVALUATION)); } else if (optionString.equals("auprc")) { setEvaluationMeasure(new SelectedTag(EVAL_AUPRC, TAGS_EVALUATION)); } else { throw new IllegalArgumentException("Invalid evaluation measure"); } } optionString = Utils.getOption("IRClass", options); if (optionString.length() > 0) { setIRClassValue(optionString); } } /** * Set the class value (label or index) to use with IR metric * evaluation of subsets. Leaving this unset will result in * the class weighted average for the IR metric being used. * * @param val the class label or 1-based index of the class label * to use when evaluating subsets with an IR metric */ public void setIRClassValue(String val) { m_IRClassValS = val; } /** * Get the class value (label or index) to use with IR metric * evaluation of subsets. Leaving this unset will result in * the class weighted average for the IR metric being used. * * @return the class label or 1-based index of the class label * to use when evaluating subsets with an IR metric */ public String getIRClassValue() { return m_IRClassValS; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String IRClassValueTipText() { return "The class label, or 1-based index of the class label, to use " + "when evaluating subsets with an IR metric (such as f-measure " + "or AUC. Leaving this unset will result in the class frequency " + "weighted average of the metric being used."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String evaluationMeasureTipText() { return "The measure used to evaluate the performance of attribute combinations."; } /** * Gets the currently set performance evaluation measure used for selecting * attributes for the decision table * * @return the performance evaluation measure */ public SelectedTag getEvaluationMeasure() { return new SelectedTag(m_evaluationMeasure, TAGS_EVALUATION); } /** * Sets the performance evaluation measure to use for selecting attributes * for the decision table * * @param newMethod the new performance evaluation metric to use */ public void setEvaluationMeasure(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_EVALUATION) { m_evaluationMeasure = newMethod.getSelectedTag().getID(); } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String thresholdTipText() { return "Repeat xval if stdev of mean exceeds this value."; } /** * Set the value of the threshold for repeating cross validation * * @param t the value of the threshold */ public void setThreshold (double t) { m_threshold = t; } /** * Get the value of the threshold * * @return the threshold as a double */ public double getThreshold () { return m_threshold; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String foldsTipText() { return "Number of xval folds to use when estimating subset accuracy."; } /** * Set the number of folds to use for accuracy estimation * * @param f the number of folds */ public void setFolds (int f) { m_folds = f; } /** * Get the number of folds used for accuracy estimation * * @return the number of folds */ public int getFolds () { return m_folds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "Seed to use for randomly generating xval splits."; } /** * Set the seed to use for cross validation * * @param s the seed */ public void setSeed (int s) { m_seed = s; } /** * Get the random number seed used for cross validation * * @return the seed */ public int getSeed () { return m_seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifierTipText() { return "Classifier to use for estimating the accuracy of subsets"; } /** * Set the classifier to use for accuracy estimation * * @param newClassifier the Classifier to use. */ public void setClassifier (Classifier newClassifier) { m_BaseClassifier = newClassifier; } /** * Get the classifier used as the base learner. * * @return the classifier used as the classifier */ public Classifier getClassifier () { return m_BaseClassifier; } /** * Gets the current settings of WrapperSubsetEval. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] classifierOptions = new String[0]; if ((m_BaseClassifier != null) && (m_BaseClassifier instanceof OptionHandler)) { classifierOptions = ((OptionHandler)m_BaseClassifier).getOptions(); } String[] options = new String[13 + classifierOptions.length]; int current = 0; if (getClassifier() != null) { options[current++] = "-B"; options[current++] = getClassifier().getClass().getName(); } options[current++] = "-F"; options[current++] = "" + getFolds(); options[current++] = "-T"; options[current++] = "" + getThreshold(); options[current++] = "-R"; options[current++] = "" + getSeed(); options[current++] = "-E"; switch (m_evaluationMeasure) { case EVAL_DEFAULT: case EVAL_ACCURACY: options[current++] = "acc"; break; case EVAL_RMSE: options[current++] = "rmse"; break; case EVAL_MAE: options[current++] = "mae"; break; case EVAL_FMEASURE: options[current++] = "f-meas"; break; case EVAL_AUC: options[current++] = "auc"; break; case EVAL_AUPRC: options[current++] = "auprc"; break; } if (m_IRClassValS != null && m_IRClassValS.length() > 0) { options[current++] = "-IRClass"; options[current++] = m_IRClassValS; } options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); current += classifierOptions.length; while (current < options.length) { options[current++] = ""; } return options; } protected void resetOptions () { m_trainInstances = null; m_Evaluation = null; m_BaseClassifier = new ZeroR(); m_folds = 5; m_seed = 1; m_threshold = 0.01; } /** * Returns the capabilities of this evaluator. * * @return the capabilities of this evaluator * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result; if (getClassifier() == null) { result = super.getCapabilities(); result.disableAll(); } else { result = getClassifier().getCapabilities(); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); // adjustment for class based on selected evaluation metric result.disable(Capability.NUMERIC_CLASS); result.disable(Capability.DATE_CLASS); if (m_evaluationMeasure != EVAL_ACCURACY && m_evaluationMeasure != EVAL_FMEASURE && m_evaluationMeasure != EVAL_AUC && m_evaluationMeasure != EVAL_AUPRC) { result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); } result.setMinimumNumberInstances(getFolds()); return result; } /** * Generates a attribute evaluator. Has to initialize all fields of the * evaluator that are not being set via options. * * @param data set of instances serving as training data * @throws Exception if the evaluator has not been * generated successfully */ public void buildEvaluator (Instances data) throws Exception { // can evaluator handle data? getCapabilities().testWithFail(data); m_trainInstances = data; m_classIndex = m_trainInstances.classIndex(); m_numAttribs = m_trainInstances.numAttributes(); m_numInstances = m_trainInstances.numInstances(); if (m_IRClassValS != null && m_IRClassValS.length() > 0) { // try to parse as a number first try { m_IRClassVal = Integer.parseInt(m_IRClassValS); // make zero-based m_IRClassVal--; } catch (NumberFormatException e) { // now try as a named class label m_IRClassVal = m_trainInstances.classAttribute().indexOfValue(m_IRClassValS); } } } /** * Evaluates a subset of attributes * * @param subset a bitset representing the attribute subset to be * evaluated * @return the error rate * @throws Exception if the subset could not be evaluated */ public double evaluateSubset (BitSet subset) throws Exception { double evalMetric = 0; double[] repError = new double[5]; int numAttributes = 0; int i, j; Random Rnd = new Random(m_seed); Remove delTransform = new Remove(); delTransform.setInvertSelection(true); // copy the instances Instances trainCopy = new Instances(m_trainInstances); // count attributes set in the BitSet for (i = 0; i < m_numAttribs; i++) { if (subset.get(i)) { numAttributes++; } } // set up an array of attribute indexes for the filter (+1 for the class) int[] featArray = new int[numAttributes + 1]; for (i = 0, j = 0; i < m_numAttribs; i++) { if (subset.get(i)) { featArray[j++] = i; } } featArray[j] = m_classIndex; delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); trainCopy = Filter.useFilter(trainCopy, delTransform); // max of 5 repetitions of cross validation for (i = 0; i < 5; i++) { m_Evaluation = new Evaluation(trainCopy); m_Evaluation.crossValidateModel(m_BaseClassifier, trainCopy, m_folds, Rnd); switch (m_evaluationMeasure) { case EVAL_DEFAULT: repError[i] = m_Evaluation.errorRate(); break; case EVAL_ACCURACY: repError[i] = m_Evaluation.errorRate(); break; case EVAL_RMSE: repError[i] = m_Evaluation.rootMeanSquaredError(); break; case EVAL_MAE: repError[i] = m_Evaluation.meanAbsoluteError(); break; case EVAL_FMEASURE: if (m_IRClassVal < 0) { repError[i] = m_Evaluation.weightedFMeasure(); } else { repError[i] = m_Evaluation.fMeasure(m_IRClassVal); } break; case EVAL_AUC: if (m_IRClassVal < 0) { repError[i] = m_Evaluation.weightedAreaUnderROC(); } else { repError[i] = m_Evaluation.areaUnderROC(m_IRClassVal); } break; case EVAL_AUPRC: if (m_IRClassVal < 0) { repError[i] = m_Evaluation.weightedAreaUnderPRC(); } else { repError[i] = m_Evaluation.areaUnderPRC(m_IRClassVal); } break; } // check on the standard deviation if (!repeat(repError, i + 1)) { i++; break; } } for (j = 0; j < i; j++) { evalMetric += repError[j]; } evalMetric /= (double)i; m_Evaluation = null; switch (m_evaluationMeasure) { case EVAL_DEFAULT: case EVAL_ACCURACY: case EVAL_RMSE: case EVAL_MAE: evalMetric = -evalMetric; // maximize break; } return evalMetric; } /** * Returns a string describing the wrapper * * @return the description as a string */ public String toString () { StringBuffer text = new StringBuffer(); if (m_trainInstances == null) { text.append("\tWrapper subset evaluator has not been built yet\n"); } else { text.append("\tWrapper Subset Evaluator\n"); text.append("\tLearning scheme: " + getClassifier().getClass().getName() + "\n"); text.append("\tScheme options: "); String[] classifierOptions = new String[0]; if (m_BaseClassifier instanceof OptionHandler) { classifierOptions = ((OptionHandler)m_BaseClassifier).getOptions(); for (int i = 0; i < classifierOptions.length; i++) { text.append(classifierOptions[i] + " "); } } text.append("\n"); String IRClassL = ""; if (m_IRClassVal >= 0) { IRClassL = "(class value: " + m_trainInstances.classAttribute().value(m_IRClassVal) + ")"; } switch (m_evaluationMeasure) { case EVAL_DEFAULT: case EVAL_ACCURACY: if (m_trainInstances.attribute(m_classIndex).isNumeric()) { text.append("\tSubset evaluation: RMSE\n"); } else { text.append("\tSubset evaluation: classification error\n"); } break; case EVAL_RMSE: if (m_trainInstances.attribute(m_classIndex).isNumeric()) { text.append("\tSubset evaluation: RMSE\n"); } else { text.append("\tSubset evaluation: RMSE (probability estimates)\n"); } break; case EVAL_MAE: if (m_trainInstances.attribute(m_classIndex).isNumeric()) { text.append("\tSubset evaluation: MAE\n"); } else { text.append("\tSubset evaluation: MAE (probability estimates)\n"); } break; case EVAL_FMEASURE: text.append("\tSubset evaluation: F-measure " + (m_IRClassVal >=0 ? IRClassL : "") + "\n"); break; case EVAL_AUC: text.append("\tSubset evaluation: area under the ROC curve " + (m_IRClassVal >=0 ? IRClassL : "") + "\n"); break; case EVAL_AUPRC: text.append("\tSubset evalation: area under the precision-recal curve " + (m_IRClassVal >=0 ? IRClassL : "") + "\n"); break; } text.append("\tNumber of folds for accuracy estimation: " + m_folds + "\n"); } return text.toString(); } /** * decides whether to do another repeat of cross validation. If the * standard deviation of the cross validations * is greater than threshold% of the mean (default 1%) then another * repeat is done. * * @param repError an array of cross validation results * @param entries the number of cross validations done so far * @return true if another cv is to be done */ private boolean repeat (double[] repError, int entries) { int i; double mean = 0; double variance = 0; // setting a threshold less than zero allows for "manual" exploration // and prevents multiple xval for each subset if (m_threshold < 0) { return false; } if (entries == 1) { return true; } for (i = 0; i < entries; i++) { mean += repError[i]; } mean /= (double)entries; for (i = 0; i < entries; i++) { variance += ((repError[i] - mean)*(repError[i] - mean)); } variance /= (double)entries; if (variance > 0) { variance = Math.sqrt(variance); } if ((variance/mean) > m_threshold) { return true; } return false; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9771 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { runEvaluator(new WrapperSubsetEval(), args); } }
29,392
29.177618
118
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/AbstractClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.Instance; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; /** * Abstract classifier. All schemes for numeric or nominal prediction in * Weka extend this class. Note that a classifier MUST either implement * distributionForInstance() or classifyInstance(). * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class AbstractClassifier implements Classifier, Cloneable, Serializable, OptionHandler, CapabilitiesHandler, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 6502780192411755341L; /** Whether the classifier is run in debug mode. */ protected boolean m_Debug = false; /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. Note that a classifier MUST * implement either this or distributionForInstance(). * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double classifyInstance(Instance instance) throws Exception { double [] dist = distributionForInstance(instance); if (dist == null) { throw new Exception("Null distribution predicted"); } switch (instance.classAttribute().type()) { case Attribute.NOMINAL: double max = 0; int maxIndex = 0; for (int i = 0; i < dist.length; i++) { if (dist[i] > max) { maxIndex = i; max = dist[i]; } } if (max > 0) { return maxIndex; } else { return Utils.missingValue(); } case Attribute.NUMERIC: return dist[0]; default: return Utils.missingValue(); } } /** * Predicts the class memberships for a given instance. If * an instance is unclassified, the returned array elements * must be all zero. If the class is numeric, the array * must consist of only one element, which contains the * predicted value. Note that a classifier MUST implement * either this or classifyInstance(). * * @param instance the instance to be classified * @return an array containing the estimated membership * probabilities of the test instance in each class * or the numeric prediction * @exception Exception if distribution could not be * computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { double[] dist = new double[instance.numClasses()]; switch (instance.classAttribute().type()) { case Attribute.NOMINAL: double classification = classifyInstance(instance); if (Utils.isMissingValue(classification)) { return dist; } else { dist[(int)classification] = 1.0; } return dist; case Attribute.NUMERIC: dist[0] = classifyInstance(instance); return dist; default: return dist; } } /** * Creates a new instance of a classifier given it's class name and * (optional) arguments to pass to it's setOptions method. If the * classifier implements OptionHandler and the options parameter is * non-null, the classifier will have it's options set. * * @param classifierName the fully qualified class name of the classifier * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created classifier, ready for use. * @exception Exception if the classifier name is invalid, or the options * supplied are not acceptable to the classifier */ public static Classifier forName(String classifierName, String [] options) throws Exception { return ((AbstractClassifier)Utils.forName(Classifier.class, classifierName, options)); } /** * Creates a deep copy of the given classifier using serialization. * * @param model the classifier to copy * @return a deep copy of the classifier * @exception Exception if an error occurs */ public static Classifier makeCopy(Classifier model) throws Exception { return (Classifier)new SerializedObject(model).getObject(); } /** * Creates a given number of deep copies of the given classifier using serialization. * * @param model the classifier to copy * @param num the number of classifier copies to create. * @return an array of classifiers. * @exception Exception if an error occurs */ public static Classifier [] makeCopies(Classifier model, int num) throws Exception { if (model == null) { throw new Exception("No model classifier set"); } Classifier [] classifiers = new Classifier [num]; SerializedObject so = new SerializedObject(model); for(int i = 0; i < classifiers.length; i++) { classifiers[i] = (Classifier) so.getObject(); } return classifiers; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(1); newVector.addElement(new Option( "\tIf set, classifier is run in debug mode and\n" + "\tmay output additional info to the console", "D", 0, "-D")); return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -D <br> * If set, classifier is run in debug mode and * may output additional info to the console.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options; if (getDebug()) { options = new String[1]; options[0] = "-D"; } else { options = new String[0]; } return options; } /** * Set debugging mode. * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Get whether debugging is turned on. * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "If set to true, classifier may output additional info to " + "the console."; } /** * Returns the Capabilities of this classifier. Maximally permissive * capabilities are allowed by default. Derived classifiers should * override this method and first disable all capabilities and then * enable just those capabilities that make sense for the scheme. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.enableAll(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * runs the classifier instance with the given options. * * @param classifier the classifier to run * @param options the commandline options */ public static void runClassifier(Classifier classifier, String[] options) { try { System.out.println(Evaluation.evaluateModel(classifier, options)); } catch (Exception e) { if ( ((e.getMessage() != null) && (e.getMessage().indexOf("General options") == -1)) || (e.getMessage() == null) ) e.printStackTrace(); else System.err.println(e.getMessage()); } } }
9,364
29.307443
93
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/BVDecompose.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BVDecompose.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.BufferedReader; import java.io.FileReader; import java.io.Reader; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for performing a Bias-Variance decomposition on any classifier using the method specified in:<br/> * <br/> * Ron Kohavi, David H. Wolpert: Bias Plus Variance Decomposition for Zero-One Loss Functions. In: Machine Learning: Proceedings of the Thirteenth International Conference, 275-283, 1996. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Kohavi1996, * author = {Ron Kohavi and David H. Wolpert}, * booktitle = {Machine Learning: Proceedings of the Thirteenth International Conference}, * editor = {Lorenza Saitta}, * pages = {275-283}, * publisher = {Morgan Kaufmann}, * title = {Bias Plus Variance Decomposition for Zero-One Loss Functions}, * year = {1996}, * PS = {http://robotics.stanford.edu/\~ronnyk/biasVar.ps} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;training pool size&gt; * The number of instances placed in the training pool. * The remainder will be used for testing. (default 100)</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -x &lt;num&gt; * The number of training repetitions used. * (default 50)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-learner. <p> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class BVDecompose implements OptionHandler, TechnicalInformationHandler, RevisionHandler { /** Debugging mode, gives extra output if true */ protected boolean m_Debug; /** An instantiated base classifier used for getting and testing options. */ protected Classifier m_Classifier = new weka.classifiers.rules.ZeroR(); /** The options to be passed to the base classifier. */ protected String [] m_ClassifierOptions; /** The number of train iterations */ protected int m_TrainIterations = 50; /** The name of the data file used for the decomposition */ protected String m_DataFileName; /** The index of the class attribute */ protected int m_ClassIndex = -1; /** The random number seed */ protected int m_Seed = 1; /** The calculated bias (squared) */ protected double m_Bias; /** The calculated variance */ protected double m_Variance; /** The calculated sigma (squared) */ protected double m_Sigma; /** The error rate */ protected double m_Error; /** The number of instances used in the training pool */ protected int m_TrainPoolSize = 100; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for performing a Bias-Variance decomposition on any classifier " + "using the method specified in:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Ron Kohavi and David H. Wolpert"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.TITLE, "Bias Plus Variance Decomposition for Zero-One Loss Functions"); result.setValue(Field.BOOKTITLE, "Machine Learning: Proceedings of the Thirteenth International Conference"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.EDITOR, "Lorenza Saitta"); result.setValue(Field.PAGES, "275-283"); result.setValue(Field.PS, "http://robotics.stanford.edu/~ronnyk/biasVar.ps"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(7); newVector.addElement(new Option( "\tThe index of the class attribute.\n"+ "\t(default last)", "c", 1, "-c <class index>")); newVector.addElement(new Option( "\tThe name of the arff file used for the decomposition.", "t", 1, "-t <name of arff file>")); newVector.addElement(new Option( "\tThe number of instances placed in the training pool.\n" + "\tThe remainder will be used for testing. (default 100)", "T", 1, "-T <training pool size>")); newVector.addElement(new Option( "\tThe random number seed used.", "s", 1, "-s <seed>")); newVector.addElement(new Option( "\tThe number of training repetitions used.\n" +"\t(default 50)", "x", 1, "-x <num>")); newVector.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); newVector.addElement(new Option( "\tFull class name of the learner used in the decomposition.\n" +"\teg: weka.classifiers.bayes.NaiveBayes", "W", 1, "-W <classifier class name>")); if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { newVector.addElement(new Option( "", "", 0, "\nOptions specific to learner " + m_Classifier.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_Classifier).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;training pool size&gt; * The number of instances placed in the training pool. * The remainder will be used for testing. (default 100)</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -x &lt;num&gt; * The number of training repetitions used. * (default 50)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-learner. <p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String classIndex = Utils.getOption('c', options); if (classIndex.length() != 0) { if (classIndex.toLowerCase().equals("last")) { setClassIndex(0); } else if (classIndex.toLowerCase().equals("first")) { setClassIndex(1); } else { setClassIndex(Integer.parseInt(classIndex)); } } else { setClassIndex(0); } String trainIterations = Utils.getOption('x', options); if (trainIterations.length() != 0) { setTrainIterations(Integer.parseInt(trainIterations)); } else { setTrainIterations(50); } String trainPoolSize = Utils.getOption('T', options); if (trainPoolSize.length() != 0) { setTrainPoolSize(Integer.parseInt(trainPoolSize)); } else { setTrainPoolSize(100); } String seedString = Utils.getOption('s', options); if (seedString.length() != 0) { setSeed(Integer.parseInt(seedString)); } else { setSeed(1); } String dataFile = Utils.getOption('t', options); if (dataFile.length() == 0) { throw new Exception("An arff file must be specified" + " with the -t option."); } setDataFileName(dataFile); String classifierName = Utils.getOption('W', options); if (classifierName.length() == 0) { throw new Exception("A learner must be specified with the -W option."); } setClassifier(AbstractClassifier.forName(classifierName, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] classifierOptions = new String [0]; if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { classifierOptions = ((OptionHandler)m_Classifier).getOptions(); } String [] options = new String [classifierOptions.length + 14]; int current = 0; if (getDebug()) { options[current++] = "-D"; } options[current++] = "-c"; options[current++] = "" + getClassIndex(); options[current++] = "-x"; options[current++] = "" + getTrainIterations(); options[current++] = "-T"; options[current++] = "" + getTrainPoolSize(); options[current++] = "-s"; options[current++] = "" + getSeed(); if (getDataFileName() != null) { options[current++] = "-t"; options[current++] = "" + getDataFileName(); } if (getClassifier() != null) { options[current++] = "-W"; options[current++] = getClassifier().getClass().getName(); } options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); current += classifierOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Get the number of instances in the training pool. * * @return number of instances in the training pool. */ public int getTrainPoolSize() { return m_TrainPoolSize; } /** * Set the number of instances in the training pool. * * @param numTrain number of instances in the training pool. */ public void setTrainPoolSize(int numTrain) { m_TrainPoolSize = numTrain; } /** * Set the classifiers being analysed * * @param newClassifier the Classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Gets the name of the classifier being analysed * * @return the classifier being analysed. */ public Classifier getClassifier() { return m_Classifier; } /** * Sets debugging mode * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Gets whether debugging is turned on * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Sets the random number seed * * @param seed the random number seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the random number seed * * @return the random number seed */ public int getSeed() { return m_Seed; } /** * Sets the maximum number of boost iterations * * @param trainIterations the number of boost iterations */ public void setTrainIterations(int trainIterations) { m_TrainIterations = trainIterations; } /** * Gets the maximum number of boost iterations * * @return the maximum number of boost iterations */ public int getTrainIterations() { return m_TrainIterations; } /** * Sets the name of the data file used for the decomposition * * @param dataFileName the data file to use */ public void setDataFileName(String dataFileName) { m_DataFileName = dataFileName; } /** * Get the name of the data file used for the decomposition * * @return the name of the data file */ public String getDataFileName() { return m_DataFileName; } /** * Get the index (starting from 1) of the attribute used as the class. * * @return the index of the class attribute */ public int getClassIndex() { return m_ClassIndex + 1; } /** * Sets index of attribute to discretize on * * @param classIndex the index (starting from 1) of the class attribute */ public void setClassIndex(int classIndex) { m_ClassIndex = classIndex - 1; } /** * Get the calculated bias squared * * @return the bias squared */ public double getBias() { return m_Bias; } /** * Get the calculated variance * * @return the variance */ public double getVariance() { return m_Variance; } /** * Get the calculated sigma squared * * @return the sigma squared */ public double getSigma() { return m_Sigma; } /** * Get the calculated error rate * * @return the error rate */ public double getError() { return m_Error; } /** * Carry out the bias-variance decomposition * * @throws Exception if the decomposition couldn't be carried out */ public void decompose() throws Exception { Reader dataReader = new BufferedReader(new FileReader(m_DataFileName)); Instances data = new Instances(dataReader); if (m_ClassIndex < 0) { data.setClassIndex(data.numAttributes() - 1); } else { data.setClassIndex(m_ClassIndex); } if (data.classAttribute().type() != Attribute.NOMINAL) { throw new Exception("Class attribute must be nominal"); } int numClasses = data.numClasses(); data.deleteWithMissingClass(); if (data.checkForStringAttributes()) { throw new Exception("Can't handle string attributes!"); } if (data.numInstances() < 2 * m_TrainPoolSize) { throw new Exception("The dataset must contain at least " + (2 * m_TrainPoolSize) + " instances"); } Random random = new Random(m_Seed); data.randomize(random); Instances trainPool = new Instances(data, 0, m_TrainPoolSize); Instances test = new Instances(data, m_TrainPoolSize, data.numInstances() - m_TrainPoolSize); int numTest = test.numInstances(); double [][] instanceProbs = new double [numTest][numClasses]; m_Error = 0; for (int i = 0; i < m_TrainIterations; i++) { if (m_Debug) { System.err.println("Iteration " + (i + 1)); } trainPool.randomize(random); Instances train = new Instances(trainPool, 0, m_TrainPoolSize / 2); Classifier current = AbstractClassifier.makeCopy(m_Classifier); current.buildClassifier(train); //// Evaluate the classifier on test, updating BVD stats for (int j = 0; j < numTest; j++) { int pred = (int)current.classifyInstance(test.instance(j)); if (pred != test.instance(j).classValue()) { m_Error++; } instanceProbs[j][pred]++; } } m_Error /= (m_TrainIterations * numTest); // Average the BV over each instance in test. m_Bias = 0; m_Variance = 0; m_Sigma = 0; for (int i = 0; i < numTest; i++) { Instance current = test.instance(i); double [] predProbs = instanceProbs[i]; double pActual, pPred; double bsum = 0, vsum = 0, ssum = 0; for (int j = 0; j < numClasses; j++) { pActual = (current.classValue() == j) ? 1 : 0; // Or via 1NN from test data? pPred = predProbs[j] / m_TrainIterations; bsum += (pActual - pPred) * (pActual - pPred) - pPred * (1 - pPred) / (m_TrainIterations - 1); vsum += pPred * pPred; ssum += pActual * pActual; } m_Bias += bsum; m_Variance += (1 - vsum); m_Sigma += (1 - ssum); } m_Bias /= (2 * numTest); m_Variance /= (2 * numTest); m_Sigma /= (2 * numTest); if (m_Debug) { System.err.println("Decomposition finished"); } } /** * Returns description of the bias-variance decomposition results. * * @return the bias-variance decomposition results as a string */ public String toString() { String result = "\nBias-Variance Decomposition\n"; if (getClassifier() == null) { return "Invalid setup"; } result += "\nClassifier : " + getClassifier().getClass().getName(); if (getClassifier() instanceof OptionHandler) { result += Utils.joinOptions(((OptionHandler)m_Classifier).getOptions()); } result += "\nData File : " + getDataFileName(); result += "\nClass Index : "; if (getClassIndex() == 0) { result += "last"; } else { result += getClassIndex(); } result += "\nTraining Pool: " + getTrainPoolSize(); result += "\nIterations : " + getTrainIterations(); result += "\nSeed : " + getSeed(); result += "\nError : " + Utils.doubleToString(getError(), 6, 4); result += "\nSigma^2 : " + Utils.doubleToString(getSigma(), 6, 4); result += "\nBias^2 : " + Utils.doubleToString(getBias(), 6, 4); result += "\nVariance : " + Utils.doubleToString(getVariance(), 6, 4); return result + "\n"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Test method for this class * * @param args the command line arguments */ public static void main(String [] args) { try { BVDecompose bvd = new BVDecompose(); try { bvd.setOptions(args); Utils.checkForRemainingOptions(args); } catch (Exception ex) { String result = ex.getMessage() + "\nBVDecompose Options:\n\n"; Enumeration enu = bvd.listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); result += option.synopsis() + "\n" + option.description() + "\n"; } throw new Exception(result); } bvd.decompose(); System.out.println(bvd.toString()); } catch (Exception ex) { System.err.println(ex.getMessage()); } } }
20,386
27.39415
187
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/BVDecomposeSegCVSub.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BVDecomposeSegCVSub.java * Copyright (C) 2003 Paul Conilione * * Based on the class: BVDecompose.java by Len Trigg (1999) */ /* * DEDICATION * * Paul Conilione would like to express his deep gratitude and appreciation * to his Chinese Buddhist Taoist Master Sifu Chow Yuk Nen for the abilities * and insight that he has been taught, which have allowed him to program in * a clear and efficient manner. * * Master Sifu Chow Yuk Nen's Teachings are unique and precious. They are * applicable to any field of human endeavour. Through his unique and powerful * ability to skilfully apply Chinese Buddhist Teachings, people have achieved * success in; Computing, chemical engineering, business, accounting, philosophy * and more. * */ package weka.classifiers; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.io.BufferedReader; import java.io.FileReader; import java.io.Reader; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * This class performs Bias-Variance decomposion on any classifier using the sub-sampled cross-validation procedure as specified in (1).<br/> * The Kohavi and Wolpert definition of bias and variance is specified in (2).<br/> * The Webb definition of bias and variance is specified in (3).<br/> * <br/> * Geoffrey I. Webb, Paul Conilione (2002). Estimating bias and variance from data. School of Computer Science and Software Engineering, Victoria, Australia.<br/> * <br/> * Ron Kohavi, David H. Wolpert: Bias Plus Variance Decomposition for Zero-One Loss Functions. In: Machine Learning: Proceedings of the Thirteenth International Conference, 275-283, 1996.<br/> * <br/> * Geoffrey I. Webb (2000). MultiBoosting: A Technique for Combining Boosting and Wagging. Machine Learning. 40(2):159-196. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{Webb2002, * address = {School of Computer Science and Software Engineering, Victoria, Australia}, * author = {Geoffrey I. Webb and Paul Conilione}, * institution = {Monash University}, * title = {Estimating bias and variance from data}, * year = {2002}, * PDF = {http://www.csse.monash.edu.au/\~webb/Files/WebbConilione04.pdf} * } * * &#64;inproceedings{Kohavi1996, * author = {Ron Kohavi and David H. Wolpert}, * booktitle = {Machine Learning: Proceedings of the Thirteenth International Conference}, * editor = {Lorenza Saitta}, * pages = {275-283}, * publisher = {Morgan Kaufmann}, * title = {Bias Plus Variance Decomposition for Zero-One Loss Functions}, * year = {1996}, * PS = {http://robotics.stanford.edu/\~ronnyk/biasVar.ps} * } * * &#64;article{Webb2000, * author = {Geoffrey I. Webb}, * journal = {Machine Learning}, * number = {2}, * pages = {159-196}, * title = {MultiBoosting: A Technique for Combining Boosting and Wagging}, * volume = {40}, * year = {2000} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -l &lt;num&gt; * The number of times each instance is classified. * (default 10)</pre> * * <pre> -p &lt;proportion of objects in common&gt; * The average proportion of instances common between any two training sets</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;number of instances in training set&gt; * The number of instances in the training set.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-learner. <p> * * @author Paul Conilione (paulc4321@yahoo.com.au) * @version $Revision: 8034 $ */ public class BVDecomposeSegCVSub implements OptionHandler, TechnicalInformationHandler, RevisionHandler { /** Debugging mode, gives extra output if true. */ protected boolean m_Debug; /** An instantiated base classifier used for getting and testing options. */ protected Classifier m_Classifier = new weka.classifiers.rules.ZeroR(); /** The options to be passed to the base classifier. */ protected String [] m_ClassifierOptions; /** The number of times an instance is classified*/ protected int m_ClassifyIterations; /** The name of the data file used for the decomposition */ protected String m_DataFileName; /** The index of the class attribute */ protected int m_ClassIndex = -1; /** The random number seed */ protected int m_Seed = 1; /** The calculated Kohavi & Wolpert bias (squared) */ protected double m_KWBias; /** The calculated Kohavi & Wolpert variance */ protected double m_KWVariance; /** The calculated Kohavi & Wolpert sigma */ protected double m_KWSigma; /** The calculated Webb bias */ protected double m_WBias; /** The calculated Webb variance */ protected double m_WVariance; /** The error rate */ protected double m_Error; /** The training set size */ protected int m_TrainSize; /** Proportion of instances common between any two training sets. */ protected double m_P; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "This class performs Bias-Variance decomposion on any classifier using the " + "sub-sampled cross-validation procedure as specified in (1).\n" + "The Kohavi and Wolpert definition of bias and variance is specified in (2).\n" + "The Webb definition of bias and variance is specified in (3).\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "Geoffrey I. Webb and Paul Conilione"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.TITLE, "Estimating bias and variance from data"); result.setValue(Field.INSTITUTION, "Monash University"); result.setValue(Field.ADDRESS, "School of Computer Science and Software Engineering, Victoria, Australia"); result.setValue(Field.PDF, "http://www.csse.monash.edu.au/~webb/Files/WebbConilione04.pdf"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Ron Kohavi and David H. Wolpert"); additional.setValue(Field.YEAR, "1996"); additional.setValue(Field.TITLE, "Bias Plus Variance Decomposition for Zero-One Loss Functions"); additional.setValue(Field.BOOKTITLE, "Machine Learning: Proceedings of the Thirteenth International Conference"); additional.setValue(Field.PUBLISHER, "Morgan Kaufmann"); additional.setValue(Field.EDITOR, "Lorenza Saitta"); additional.setValue(Field.PAGES, "275-283"); additional.setValue(Field.PS, "http://robotics.stanford.edu/~ronnyk/biasVar.ps"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "Geoffrey I. Webb"); additional.setValue(Field.YEAR, "2000"); additional.setValue(Field.TITLE, "MultiBoosting: A Technique for Combining Boosting and Wagging"); additional.setValue(Field.JOURNAL, "Machine Learning"); additional.setValue(Field.VOLUME, "40"); additional.setValue(Field.NUMBER, "2"); additional.setValue(Field.PAGES, "159-196"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(8); newVector.addElement(new Option( "\tThe index of the class attribute.\n"+ "\t(default last)", "c", 1, "-c <class index>")); newVector.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); newVector.addElement(new Option( "\tThe number of times each instance is classified.\n" +"\t(default 10)", "l", 1, "-l <num>")); newVector.addElement(new Option( "\tThe average proportion of instances common between any two training sets", "p", 1, "-p <proportion of objects in common>")); newVector.addElement(new Option( "\tThe random number seed used.", "s", 1, "-s <seed>")); newVector.addElement(new Option( "\tThe name of the arff file used for the decomposition.", "t", 1, "-t <name of arff file>")); newVector.addElement(new Option( "\tThe number of instances in the training set.", "T", 1, "-T <number of instances in training set>")); newVector.addElement(new Option( "\tFull class name of the learner used in the decomposition.\n" +"\teg: weka.classifiers.bayes.NaiveBayes", "W", 1, "-W <classifier class name>")); if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { newVector.addElement(new Option( "", "", 0, "\nOptions specific to learner " + m_Classifier.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_Classifier).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } } return newVector.elements(); } /** * Sets the OptionHandler's options using the given list. All options * will be set (or reset) during this call (i.e. incremental setting * of options is not possible). <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -c &lt;class index&gt; * The index of the class attribute. * (default last)</pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -l &lt;num&gt; * The number of times each instance is classified. * (default 10)</pre> * * <pre> -p &lt;proportion of objects in common&gt; * The average proportion of instances common between any two training sets</pre> * * <pre> -s &lt;seed&gt; * The random number seed used.</pre> * * <pre> -t &lt;name of arff file&gt; * The name of the arff file used for the decomposition.</pre> * * <pre> -T &lt;number of instances in training set&gt; * The number of instances in the training set.</pre> * * <pre> -W &lt;classifier class name&gt; * Full class name of the learner used in the decomposition. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> * Options specific to learner weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String classIndex = Utils.getOption('c', options); if (classIndex.length() != 0) { if (classIndex.toLowerCase().equals("last")) { setClassIndex(0); } else if (classIndex.toLowerCase().equals("first")) { setClassIndex(1); } else { setClassIndex(Integer.parseInt(classIndex)); } } else { setClassIndex(0); } String classifyIterations = Utils.getOption('l', options); if (classifyIterations.length() != 0) { setClassifyIterations(Integer.parseInt(classifyIterations)); } else { setClassifyIterations(10); } String prob = Utils.getOption('p', options); if (prob.length() != 0) { setP( Double.parseDouble(prob)); } else { setP(-1); } //throw new Exception("A proportion must be specified" + " with a -p option."); String seedString = Utils.getOption('s', options); if (seedString.length() != 0) { setSeed(Integer.parseInt(seedString)); } else { setSeed(1); } String dataFile = Utils.getOption('t', options); if (dataFile.length() != 0) { setDataFileName(dataFile); } else { throw new Exception("An arff file must be specified" + " with the -t option."); } String trainSize = Utils.getOption('T', options); if (trainSize.length() != 0) { setTrainSize(Integer.parseInt(trainSize)); } else { setTrainSize(-1); } //throw new Exception("A training set size must be specified" + " with a -T option."); String classifierName = Utils.getOption('W', options); if (classifierName.length() != 0) { setClassifier(AbstractClassifier.forName(classifierName, Utils.partitionOptions(options))); } else { throw new Exception("A learner must be specified with the -W option."); } } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] classifierOptions = new String [0]; if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { classifierOptions = ((OptionHandler)m_Classifier).getOptions(); } String [] options = new String [classifierOptions.length + 14]; int current = 0; if (getDebug()) { options[current++] = "-D"; } options[current++] = "-c"; options[current++] = "" + getClassIndex(); options[current++] = "-l"; options[current++] = "" + getClassifyIterations(); options[current++] = "-p"; options[current++] = "" + getP(); options[current++] = "-s"; options[current++] = "" + getSeed(); if (getDataFileName() != null) { options[current++] = "-t"; options[current++] = "" + getDataFileName(); } options[current++] = "-T"; options[current++] = "" + getTrainSize(); if (getClassifier() != null) { options[current++] = "-W"; options[current++] = getClassifier().getClass().getName(); } options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); current += classifierOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Set the classifiers being analysed * * @param newClassifier the Classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Gets the name of the classifier being analysed * * @return the classifier being analysed. */ public Classifier getClassifier() { return m_Classifier; } /** * Sets debugging mode * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Gets whether debugging is turned on * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Sets the random number seed * * @param seed the random number seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the random number seed * * @return the random number seed */ public int getSeed() { return m_Seed; } /** * Sets the number of times an instance is classified * * @param classifyIterations number of times an instance is classified */ public void setClassifyIterations(int classifyIterations) { m_ClassifyIterations = classifyIterations; } /** * Gets the number of times an instance is classified * * @return the maximum number of times an instance is classified */ public int getClassifyIterations() { return m_ClassifyIterations; } /** * Sets the name of the dataset file. * * @param dataFileName name of dataset file. */ public void setDataFileName(String dataFileName) { m_DataFileName = dataFileName; } /** * Get the name of the data file used for the decomposition * * @return the name of the data file */ public String getDataFileName() { return m_DataFileName; } /** * Get the index (starting from 1) of the attribute used as the class. * * @return the index of the class attribute */ public int getClassIndex() { return m_ClassIndex + 1; } /** * Sets index of attribute to discretize on * * @param classIndex the index (starting from 1) of the class attribute */ public void setClassIndex(int classIndex) { m_ClassIndex = classIndex - 1; } /** * Get the calculated bias squared according to the Kohavi and Wolpert definition * * @return the bias squared */ public double getKWBias() { return m_KWBias; } /** * Get the calculated bias according to the Webb definition * * @return the bias * */ public double getWBias() { return m_WBias; } /** * Get the calculated variance according to the Kohavi and Wolpert definition * * @return the variance */ public double getKWVariance() { return m_KWVariance; } /** * Get the calculated variance according to the Webb definition * * @return the variance according to Webb * */ public double getWVariance() { return m_WVariance; } /** * Get the calculated sigma according to the Kohavi and Wolpert definition * * @return the sigma * */ public double getKWSigma() { return m_KWSigma; } /** * Set the training size. * * @param size the size of the training set * */ public void setTrainSize(int size) { m_TrainSize = size; } /** * Get the training size * * @return the size of the training set * */ public int getTrainSize() { return m_TrainSize; } /** * Set the proportion of instances that are common between two training sets * used to train a classifier. * * @param proportion the proportion of instances that are common between training * sets. * */ public void setP(double proportion) { m_P = proportion; } /** * Get the proportion of instances that are common between two training sets. * * @return the proportion * */ public double getP() { return m_P; } /** * Get the calculated error rate * * @return the error rate */ public double getError() { return m_Error; } /** * Carry out the bias-variance decomposition using the sub-sampled cross-validation method. * * @throws Exception if the decomposition couldn't be carried out */ public void decompose() throws Exception { Reader dataReader; Instances data; int tps; // training pool size, size of segment E. int k; // number of folds in segment E. int q; // number of segments of size tps. dataReader = new BufferedReader(new FileReader(m_DataFileName)); //open file data = new Instances(dataReader); // encapsulate in wrapper class called weka.Instances() if (m_ClassIndex < 0) { data.setClassIndex(data.numAttributes() - 1); } else { data.setClassIndex(m_ClassIndex); } if (data.classAttribute().type() != Attribute.NOMINAL) { throw new Exception("Class attribute must be nominal"); } int numClasses = data.numClasses(); data.deleteWithMissingClass(); if ( data.checkForStringAttributes() ) { throw new Exception("Can't handle string attributes!"); } // Dataset size must be greater than 2 if ( data.numInstances() <= 2 ){ throw new Exception("Dataset size must be greater than 2."); } if ( m_TrainSize == -1 ){ // default value m_TrainSize = (int) Math.floor( (double) data.numInstances() / 2.0 ); }else if ( m_TrainSize < 0 || m_TrainSize >= data.numInstances() - 1 ) { // Check if 0 < training Size < D - 1 throw new Exception("Training set size of "+m_TrainSize+" is invalid."); } if ( m_P == -1 ){ // default value m_P = (double) m_TrainSize / ( (double)data.numInstances() - 1 ); }else if ( m_P < ( m_TrainSize / ( (double)data.numInstances() - 1 ) ) || m_P >= 1.0 ) { //Check if p is in range: m/(|D|-1) <= p < 1.0 throw new Exception("Proportion is not in range: "+ (m_TrainSize / ((double) data.numInstances() - 1 )) +" <= p < 1.0 "); } //roundup tps from double to integer tps = (int) Math.ceil( ((double)m_TrainSize / (double)m_P) + 1 ); k = (int) Math.ceil( tps / (tps - (double) m_TrainSize)); // number of folds cannot be more than the number of instances in the training pool if ( k > tps ) { throw new Exception("The required number of folds is too many." + "Change p or the size of the training set."); } // calculate the number of segments, round down. q = (int) Math.floor( (double) data.numInstances() / (double)tps ); //create confusion matrix, columns = number of instances in data set, as all will be used, by rows = number of classes. double [][] instanceProbs = new double [data.numInstances()][numClasses]; int [][] foldIndex = new int [ k ][ 2 ]; Vector segmentList = new Vector(q + 1); //Set random seed Random random = new Random(m_Seed); data.randomize(random); //create index arrays for different segments int currentDataIndex = 0; for( int count = 1; count <= (q + 1); count++ ){ if( count > q){ int [] segmentIndex = new int [ (data.numInstances() - (q * tps)) ]; for(int index = 0; index < segmentIndex.length; index++, currentDataIndex++){ segmentIndex[index] = currentDataIndex; } segmentList.add(segmentIndex); } else { int [] segmentIndex = new int [ tps ]; for(int index = 0; index < segmentIndex.length; index++, currentDataIndex++){ segmentIndex[index] = currentDataIndex; } segmentList.add(segmentIndex); } } int remainder = tps % k; // remainder is used to determine when to shrink the fold size by 1. //foldSize = ROUNDUP( tps / k ) (round up, eg 3 -> 3, 3.3->4) int foldSize = (int) Math.ceil( (double)tps /(double) k); //roundup fold size double to integer int index = 0; int currentIndex; for( int count = 0; count < k; count ++){ if( remainder != 0 && count == remainder ){ foldSize -= 1; } foldIndex[count][0] = index; foldIndex[count][1] = foldSize; index += foldSize; } for( int l = 0; l < m_ClassifyIterations; l++) { for(int i = 1; i <= q; i++){ int [] currentSegment = (int[]) segmentList.get(i - 1); randomize(currentSegment, random); //CROSS FOLD VALIDATION for current Segment for( int j = 1; j <= k; j++){ Instances TP = null; for(int foldNum = 1; foldNum <= k; foldNum++){ if( foldNum != j){ int startFoldIndex = foldIndex[ foldNum - 1 ][ 0 ]; //start index foldSize = foldIndex[ foldNum - 1 ][ 1 ]; int endFoldIndex = startFoldIndex + foldSize - 1; for(int currentFoldIndex = startFoldIndex; currentFoldIndex <= endFoldIndex; currentFoldIndex++){ if( TP == null ){ TP = new Instances(data, currentSegment[ currentFoldIndex ], 1); }else{ TP.add( data.instance( currentSegment[ currentFoldIndex ] ) ); } } } } TP.randomize(random); if( getTrainSize() > TP.numInstances() ){ throw new Exception("The training set size of " + getTrainSize() + ", is greater than the training pool " + TP.numInstances() ); } Instances train = new Instances(TP, 0, m_TrainSize); Classifier current = AbstractClassifier.makeCopy(m_Classifier); current.buildClassifier(train); // create a clssifier using the instances in train. int currentTestIndex = foldIndex[ j - 1 ][ 0 ]; //start index int testFoldSize = foldIndex[ j - 1 ][ 1 ]; //size int endTestIndex = currentTestIndex + testFoldSize - 1; while( currentTestIndex <= endTestIndex ){ Instance testInst = data.instance( currentSegment[currentTestIndex] ); int pred = (int)current.classifyInstance( testInst ); if(pred != testInst.classValue()) { m_Error++; // add 1 to mis-classifications. } instanceProbs[ currentSegment[ currentTestIndex ] ][ pred ]++; currentTestIndex++; } if( i == 1 && j == 1){ int[] segmentElast = (int[])segmentList.lastElement(); for( currentIndex = 0; currentIndex < segmentElast.length; currentIndex++){ Instance testInst = data.instance( segmentElast[currentIndex] ); int pred = (int)current.classifyInstance( testInst ); if(pred != testInst.classValue()) { m_Error++; // add 1 to mis-classifications. } instanceProbs[ segmentElast[ currentIndex ] ][ pred ]++; } } } } } m_Error /= (double)( m_ClassifyIterations * data.numInstances() ); m_KWBias = 0.0; m_KWVariance = 0.0; m_KWSigma = 0.0; m_WBias = 0.0; m_WVariance = 0.0; for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance( i ); double [] predProbs = instanceProbs[ i ]; double pActual, pPred; double bsum = 0, vsum = 0, ssum = 0; double wBSum = 0, wVSum = 0; Vector centralTendencies = findCentralTendencies( predProbs ); if( centralTendencies == null ){ throw new Exception("Central tendency was null."); } for (int j = 0; j < numClasses; j++) { pActual = (current.classValue() == j) ? 1 : 0; pPred = predProbs[j] / m_ClassifyIterations; bsum += (pActual - pPred) * (pActual - pPred) - pPred * (1 - pPred) / (m_ClassifyIterations - 1); vsum += pPred * pPred; ssum += pActual * pActual; } m_KWBias += bsum; m_KWVariance += (1 - vsum); m_KWSigma += (1 - ssum); for( int count = 0; count < centralTendencies.size(); count++ ) { int wB = 0, wV = 0; int centralTendency = ((Integer)centralTendencies.get(count)).intValue(); // For a single instance xi, find the bias and variance. for (int j = 0; j < numClasses; j++) { //Webb definition if( j != (int)current.classValue() && j == centralTendency ) { wB += predProbs[j]; } if( j != (int)current.classValue() && j != centralTendency ) { wV += predProbs[j]; } } wBSum += (double) wB; wVSum += (double) wV; } // calculate bais by dividing bSum by the number of central tendencies and // total number of instances. (effectively finding the average and dividing // by the number of instances to get the nominalised probability). m_WBias += ( wBSum / ((double) ( centralTendencies.size() * m_ClassifyIterations ))); // calculate variance by dividing vSum by the total number of interations m_WVariance += ( wVSum / ((double) ( centralTendencies.size() * m_ClassifyIterations ))); } m_KWBias /= (2.0 * (double) data.numInstances()); m_KWVariance /= (2.0 * (double) data.numInstances()); m_KWSigma /= (2.0 * (double) data.numInstances()); // bias = bias / number of data instances m_WBias /= (double) data.numInstances(); // variance = variance / number of data instances. m_WVariance /= (double) data.numInstances(); if (m_Debug) { System.err.println("Decomposition finished"); } } /** Finds the central tendency, given the classifications for an instance. * * Where the central tendency is defined as the class that was most commonly * selected for a given instance.<p> * * For example, instance 'x' may be classified out of 3 classes y = {1, 2, 3}, * so if x is classified 10 times, and is classified as follows, '1' = 2 times, '2' = 5 times * and '3' = 3 times. Then the central tendency is '2'. <p> * * However, it is important to note that this method returns a list of all classes * that have the highest number of classifications. * * In cases where there are several classes with the largest number of classifications, then * all of these classes are returned. For example if 'x' is classified '1' = 4 times, * '2' = 4 times and '3' = 2 times. Then '1' and '2' are returned.<p> * * @param predProbs the array of classifications for a single instance. * * @return a Vector containing Integer objects which store the class(s) which * are the central tendency. */ public Vector findCentralTendencies(double[] predProbs) { int centralTValue = 0; int currentValue = 0; //array to store the list of classes the have the greatest number of classifictions. Vector centralTClasses; centralTClasses = new Vector(); //create an array with size of the number of classes. // Go through array, finding the central tendency. for( int i = 0; i < predProbs.length; i++) { currentValue = (int) predProbs[i]; // if current value is greater than the central tendency value then // clear vector and add new class to vector array. if( currentValue > centralTValue) { centralTClasses.clear(); centralTClasses.addElement( new Integer(i) ); centralTValue = currentValue; } else if( currentValue != 0 && currentValue == centralTValue) { centralTClasses.addElement( new Integer(i) ); } } //return all classes that have the greatest number of classifications. if( centralTValue != 0){ return centralTClasses; } else { return null; } } /** * Returns description of the bias-variance decomposition results. * * @return the bias-variance decomposition results as a string */ public String toString() { String result = "\nBias-Variance Decomposition Segmentation, Cross Validation\n" + "with subsampling.\n"; if (getClassifier() == null) { return "Invalid setup"; } result += "\nClassifier : " + getClassifier().getClass().getName(); if (getClassifier() instanceof OptionHandler) { result += Utils.joinOptions(((OptionHandler)m_Classifier).getOptions()); } result += "\nData File : " + getDataFileName(); result += "\nClass Index : "; if (getClassIndex() == 0) { result += "last"; } else { result += getClassIndex(); } result += "\nIterations : " + getClassifyIterations(); result += "\np : " + getP(); result += "\nTraining Size : " + getTrainSize(); result += "\nSeed : " + getSeed(); result += "\n\nDefinition : " +"Kohavi and Wolpert"; result += "\nError :" + Utils.doubleToString(getError(), 4); result += "\nBias^2 :" + Utils.doubleToString(getKWBias(), 4); result += "\nVariance :" + Utils.doubleToString(getKWVariance(), 4); result += "\nSigma^2 :" + Utils.doubleToString(getKWSigma(), 4); result += "\n\nDefinition : " +"Webb"; result += "\nError :" + Utils.doubleToString(getError(), 4); result += "\nBias :" + Utils.doubleToString(getWBias(), 4); result += "\nVariance :" + Utils.doubleToString(getWVariance(), 4); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Test method for this class * * @param args the command line arguments */ public static void main(String [] args) { try { BVDecomposeSegCVSub bvd = new BVDecomposeSegCVSub(); try { bvd.setOptions(args); Utils.checkForRemainingOptions(args); } catch (Exception ex) { String result = ex.getMessage() + "\nBVDecompose Options:\n\n"; Enumeration enu = bvd.listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); result += option.synopsis() + "\n" + option.description() + "\n"; } throw new Exception(result); } bvd.decompose(); System.out.println(bvd.toString()); } catch (Exception ex) { System.err.println(ex.getMessage()); } } /** * Accepts an array of ints and randomises the values in the array, using the * random seed. * *@param index is the array of integers *@param random is the Random seed. */ public final void randomize(int[] index, Random random) { for( int j = index.length - 1; j > 0; j-- ){ int k = random.nextInt( j + 1 ); int temp = index[j]; index[j] = index[k]; index[k] = temp; } } }
37,978
33.031362
192
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/CheckClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.CheckScheme; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.TestInstances; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * Class for examining the capabilities and finding problems with * classifiers. If you implement a classifier using the WEKA.libraries, * you should run the checks on it to ensure robustness and correct * operation. Passing all the tests of this object does not mean * bugs in the classifier don't exist, but this will help find some * common ones. <p/> * * Typical usage: <p/> * <code>java weka.classifiers.CheckClassifier -W classifier_name * classifier_options </code><p/> * * CheckClassifier reports on the following: * <ul> * <li> Classifier abilities * <ul> * <li> Possible command line options to the classifier </li> * <li> Whether the classifier can predict nominal, numeric, string, * date or relational class attributes. Warnings will be displayed if * performance is worse than ZeroR </li> * <li> Whether the classifier can be trained incrementally </li> * <li> Whether the classifier can handle numeric predictor attributes </li> * <li> Whether the classifier can handle nominal predictor attributes </li> * <li> Whether the classifier can handle string predictor attributes </li> * <li> Whether the classifier can handle date predictor attributes </li> * <li> Whether the classifier can handle relational predictor attributes </li> * <li> Whether the classifier can handle multi-instance data </li> * <li> Whether the classifier can handle missing predictor values </li> * <li> Whether the classifier can handle missing class values </li> * <li> Whether a nominal classifier only handles 2 class problems </li> * <li> Whether the classifier can handle instance weights </li> * </ul> * </li> * <li> Correct functioning * <ul> * <li> Correct initialisation during buildClassifier (i.e. no result * changes when buildClassifier called repeatedly) </li> * <li> Whether incremental training produces the same results * as during non-incremental training (which may or may not * be OK) </li> * <li> Whether the classifier alters the data pased to it * (number of instances, instance order, instance weights, etc) </li> * <li> Whether the toString() method works correctly before the * classifier has been built. </li> * </ul> * </li> * <li> Degenerate cases * <ul> * <li> building classifier with zero training instances </li> * <li> all but one predictor attribute values missing </li> * <li> all predictor attribute values missing </li> * <li> all but one class values missing </li> * <li> all class values missing </li> * </ul> * </li> * </ul> * Running CheckClassifier with the debug option set will output the * training and test datasets for any failed tests.<p/> * * The <code>weka.classifiers.AbstractClassifierTest</code> uses this * class to test all the classifiers. Any changes here, have to be * checked in that abstract test class, too. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the classifier analysed. * eg: weka.classifiers.bayes.NaiveBayes * (default weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p/> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see TestInstances */ public class CheckClassifier extends CheckScheme { /* * Note about test methods: * - methods return array of booleans * - first index: success or not * - second index: acceptable or not (e.g., Exception is OK) * - in case the performance is worse than that of ZeroR both indices are true * * FracPete (fracpete at waikato dot ac dot nz) */ /*** The classifier to be examined */ protected Classifier m_Classifier = new weka.classifiers.rules.ZeroR(); /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tFull name of the classifier analysed.\n" +"\teg: weka.classifiers.bayes.NaiveBayes\n" + "\t(default weka.classifiers.rules.ZeroR)", "W", 1, "-W")); if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) { result.addElement(new Option("", "", 0, "\nOptions specific to classifier " + m_Classifier.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_Classifier).listOptions(); while (enu.hasMoreElements()) result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the classifier analysed. * eg: weka.classifiers.bayes.NaiveBayes * (default weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) tmpStr = weka.classifiers.rules.ZeroR.class.getName(); setClassifier( (Classifier) forName( "weka.classifiers", Classifier.class, tmpStr, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getClassifier() != null) { result.add("-W"); result.add(getClassifier().getClass().getName()); } if ((m_Classifier != null) && (m_Classifier instanceof OptionHandler)) options = ((OptionHandler) m_Classifier).getOptions(); else options = new String[0]; if (options.length > 0) { result.add("--"); for (i = 0; i < options.length; i++) result.add(options[i]); } return (String[]) result.toArray(new String[result.size()]); } /** * Begin the tests, reporting results to System.out */ public void doTests() { if (getClassifier() == null) { println("\n=== No classifier set ==="); return; } println("\n=== Check on Classifier: " + getClassifier().getClass().getName() + " ===\n"); // Start tests m_ClasspathProblems = false; println("--> Checking for interfaces"); canTakeOptions(); boolean updateableClassifier = updateableClassifier()[0]; boolean weightedInstancesHandler = weightedInstancesHandler()[0]; boolean multiInstanceHandler = multiInstanceHandler()[0]; println("--> Classifier tests"); declaresSerialVersionUID(); testToString(); testsPerClassType(Attribute.NOMINAL, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.NUMERIC, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.DATE, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.STRING, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.RELATIONAL, updateableClassifier, weightedInstancesHandler, multiInstanceHandler); } /** * Set the classifier for boosting. * * @param newClassifier the Classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Get the classifier used as the classifier * * @return the classifier used as the classifier */ public Classifier getClassifier() { return m_Classifier; } /** * Run a battery of tests for a given class attribute type * * @param classType true if the class attribute should be numeric * @param updateable true if the classifier is updateable * @param weighted true if the classifier says it handles weights * @param multiInstance true if the classifier is a multi-instance classifier */ protected void testsPerClassType(int classType, boolean updateable, boolean weighted, boolean multiInstance) { boolean PNom = canPredict(true, false, false, false, false, multiInstance, classType)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance, classType)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance, classType)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance, classType)[0]; boolean PRel; if (!multiInstance) PRel = canPredict(false, false, false, false, true, multiInstance, classType)[0]; else PRel = false; if (PNom || PNum || PStr || PDat || PRel) { if (weighted) instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); canHandleOnlyClass(PNom, PNum, PStr, PDat, PRel, classType); if (classType == Attribute.NOMINAL) canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4); if (!multiInstance) { canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 0); canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 1); } canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 20)[0]; if (handleMissingPredictors) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 100); boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 20)[0]; if (handleMissingClass) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 100); correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, handleMissingPredictors, handleMissingClass); doesntUseTestClassVal(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); if (updateable) updatingEquality(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); } } /** * Checks whether the scheme's toString() method works even though the * classifies hasn't been built yet. * * @return index 0 is true if the toString() method works fine */ protected boolean[] testToString() { boolean[] result = new boolean[2]; print("toString..."); try { Classifier copy = (Classifier) m_Classifier.getClass().newInstance(); copy.toString(); result[0] = true; println("yes"); } catch (Exception e) { result[0] = false; println("no"); if (m_Debug) { println("\n=== Full report ==="); e.printStackTrace(); println("\n"); } } return result; } /** * tests for a serialVersionUID. Fails in case the scheme doesn't declare * a UID. * * @return index 0 is true if the scheme declares a UID */ protected boolean[] declaresSerialVersionUID() { boolean[] result = new boolean[2]; print("serialVersionUID..."); result[0] = !SerializationHelper.needsUID(m_Classifier.getClass()); if (result[0]) println("yes"); else println("no"); return result; } /** * Checks whether the scheme can take command line options. * * @return index 0 is true if the classifier can take options */ protected boolean[] canTakeOptions() { boolean[] result = new boolean[2]; print("options..."); if (m_Classifier instanceof OptionHandler) { println("yes"); if (m_Debug) { println("\n=== Full report ==="); Enumeration enu = ((OptionHandler)m_Classifier).listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); print(option.synopsis() + "\n" + option.description() + "\n"); } println("\n"); } result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme can build models incrementally. * * @return index 0 is true if the classifier can train incrementally */ protected boolean[] updateableClassifier() { boolean[] result = new boolean[2]; print("updateable classifier..."); if (m_Classifier instanceof UpdateableClassifier) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme says it can handle instance weights. * * @return true if the classifier handles instance weights */ protected boolean[] weightedInstancesHandler() { boolean[] result = new boolean[2]; print("weighted instances classifier..."); if (m_Classifier instanceof WeightedInstancesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme handles multi-instance data. * * @return true if the classifier handles multi-instance data */ protected boolean[] multiInstanceHandler() { boolean[] result = new boolean[2]; print("multi-instance classifier..."); if (m_Classifier instanceof MultiInstanceCapabilitiesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks basic prediction of the scheme, for simple non-troublesome * datasets. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canPredict( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("basic predict"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("unary"); accepts.addElement("binary"); accepts.addElement("nominal"); accepts.addElement("numeric"); accepts.addElement("string"); accepts.addElement("date"); accepts.addElement("relational"); accepts.addElement("multi-instance"); accepts.addElement("not in classpath"); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme can handle data that contains only the class * attribute. If a scheme cannot build a proper model with that data, it * should default back to a ZeroR model. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleOnlyClass( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, int classType) { print("only class in data"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, false, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("class"); accepts.addElement("zeror"); int numTrain = getNumInstances(), numTest = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(false, false, false, false, false, false, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, 2, accepts); } /** * Checks whether nominal schemes can handle more than two classes. * If a scheme is only designed for two-class problems it should * throw an appropriate exception for multi-class problems. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param numClasses the number of classes to test * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleNClasses( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int numClasses) { print("more than two class problems"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL); print("..."); FastVector accepts = new FastVector(); accepts.addElement("number"); accepts.addElement("class"); int numTrain = getNumInstances(), numTest = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme can handle class attributes as Nth attribute. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class attribute (0-based, -1 means last attribute) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable * @see TestInstances#CLASS_IS_LAST */ protected boolean[] canHandleClassAsNthAttribute( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex) { if (classIndex == TestInstances.CLASS_IS_LAST) print("class attribute as last attribute"); else print("class attribute as " + (classIndex + 1) + ". attribute"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, classIndex, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme can handle zero training instances. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleZeroTraining( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("handle zero training instances"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("train"); accepts.addElement("value"); int numTrain = 0, numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether the scheme correctly initialises models when * buildClassifier is called. This test calls buildClassifier with * one training dataset and records performance on a test set. * buildClassifier is then called on a training set with different * structure, and then again with the original training set. The * performance on the test set is compared with the original results * and any performance difference noted as incorrect build initialisation. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if the * scheme performs worse than ZeroR, but without error (index 0 is * false) */ protected boolean[] correctBuildInitialisation( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { boolean[] result = new boolean[2]; print("correct initialisation during buildClassifier"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; Instances train1 = null; Instances test1 = null; Instances train2 = null; Instances test2 = null; Classifier classifier = null; Evaluation evaluation1A = null; Evaluation evaluation1B = null; Evaluation evaluation2 = null; boolean built = false; int stage = 0; try { // Make two sets of train/test splits with different // numbers of attributes train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); train2 = makeTestDataset(84, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test1 = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test2 = makeTestDataset(48, numTest, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train1, missingLevel, predictorMissing, classMissing); addMissing(test1, Math.min(missingLevel,50), predictorMissing, classMissing); addMissing(train2, missingLevel, predictorMissing, classMissing); addMissing(test2, Math.min(missingLevel,50), predictorMissing, classMissing); } classifier = AbstractClassifier.makeCopies(getClassifier(), 1)[0]; evaluation1A = new Evaluation(train1); evaluation1B = new Evaluation(train1); evaluation2 = new Evaluation(train2); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { stage = 0; classifier.buildClassifier(train1); built = true; if (!testWRTZeroR(classifier, evaluation1A, train1, test1)[0]) { throw new Exception("Scheme performs worse than ZeroR"); } stage = 1; built = false; classifier.buildClassifier(train2); built = true; if (!testWRTZeroR(classifier, evaluation2, train2, test2)[0]) { throw new Exception("Scheme performs worse than ZeroR"); } stage = 2; built = false; classifier.buildClassifier(train1); built = true; if (!testWRTZeroR(classifier, evaluation1B, train1, test1)[0]) { throw new Exception("Scheme performs worse than ZeroR"); } stage = 3; if (!evaluation1A.equals(evaluation1B)) { if (m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildClassifier()", true) + "\n\n"); println( evaluation1B.toSummaryString("\nSecond buildClassifier()", true) + "\n\n"); } throw new Exception("Results differ between buildClassifier calls"); } println("yes"); result[0] = true; if (false && m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildClassifier()", true) + "\n\n"); println( evaluation1B.toSummaryString("\nSecond buildClassifier()", true) + "\n\n"); } } catch (Exception ex) { String msg = ex.getMessage().toLowerCase(); if (msg.indexOf("worse than zeror") >= 0) { println("warning: performs worse than ZeroR"); result[0] = (stage < 1); result[1] = (stage < 1); } else { println("no"); result[0] = false; } if (m_Debug) { println("\n=== Full Report ==="); print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } switch (stage) { case 0: print(" of dataset 1"); break; case 1: print(" of dataset 2"); break; case 2: print(" of dataset 1 (2nd build)"); break; case 3: print(", comparing results from builds of dataset 1"); break; } println(": " + ex.getMessage() + "\n"); println("here are the datasets:\n"); println("=== Train1 Dataset ===\n" + train1.toString() + "\n"); println("=== Test1 Dataset ===\n" + test1.toString() + "\n\n"); println("=== Train2 Dataset ===\n" + train2.toString() + "\n"); println("=== Test2 Dataset ===\n" + test2.toString() + "\n\n"); } } return result; } /** * Checks basic missing value handling of the scheme. If the missing * values cause an exception to be thrown by the scheme, this will be * recorded. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param missingLevel the percentage of missing values * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleMissing( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing, int missingLevel) { if (missingLevel == 100) print("100% "); print("missing"); if (predictorMissing) { print(" predictor"); if (classMissing) print(" and"); } if (classMissing) print(" class"); print(" values"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("missing"); accepts.addElement("value"); accepts.addElement("train"); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Checks whether an updateable scheme produces the same model when * trained incrementally as when batch trained. The model itself * cannot be compared, so we compare the evaluation on test data * for both models. It is possible to get a false positive on this * test (likelihood depends on the classifier). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed */ protected boolean[] updatingEquality( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("incremental training produces the same results" + " as batch training"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier [] classifiers = null; Evaluation evaluationB = null; Evaluation evaluationI = null; boolean built = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifiers = AbstractClassifier.makeCopies(getClassifier(), 2); evaluationB = new Evaluation(train); evaluationI = new Evaluation(train); classifiers[0].buildClassifier(train); testWRTZeroR(classifiers[0], evaluationB, train, test); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { classifiers[1].buildClassifier(new Instances(train, 0)); for (int i = 0; i < train.numInstances(); i++) { ((UpdateableClassifier)classifiers[1]).updateClassifier( train.instance(i)); } built = true; testWRTZeroR(classifiers[1], evaluationI, train, test); if (!evaluationB.equals(evaluationI)) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); println("Results differ between batch and " + "incrementally built models.\n" + "Depending on the classifier, this may be OK"); println("Here are the results:\n"); println(evaluationB.toSummaryString( "\nbatch built results\n", true)); println(evaluationI.toSummaryString( "\nincrementally built results\n", true)); println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Test Dataset ===\n" + test.toString() + "\n\n"); } } else { println("yes"); result[0] = true; } } catch (Exception ex) { result[0] = false; print("Problem during"); if (built) print(" testing"); else print(" training"); println(": " + ex.getMessage() + "\n"); } return result; } /** * Checks whether the classifier erroneously uses the class * value of test instances (if provided). Runs the classifier with * test instance class values set to missing and compares with results * when test instance class values are left intact. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed */ protected boolean[] doesntUseTestClassVal( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("classifier ignores test instance class vals"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2*getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier [] classifiers = null; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifiers = AbstractClassifier.makeCopies(getClassifier(), 2); classifiers[0].buildClassifier(train); classifiers[1].buildClassifier(train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now set test values to missing when predicting for (int i = 0; i < test.numInstances(); i++) { Instance testInst = test.instance(i); Instance classMissingInst = (Instance)testInst.copy(); classMissingInst.setDataset(test); classMissingInst.setClassMissing(); double [] dist0 = classifiers[0].distributionForInstance(testInst); double [] dist1 = classifiers[1].distributionForInstance(classMissingInst); for (int j = 0; j < dist0.length; j++) { // ignore, if both are NaNs if (Double.isNaN(dist0[j]) && Double.isNaN(dist1[j])) { if (getDebug()) System.out.println("Both predictions are NaN!"); continue; } // distribution different? if (dist0[j] != dist1[j]) { throw new Exception("Prediction different for instance " + (i + 1)); } } } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results differ between non-missing and " + "missing test class values."); } else { print("Problem during testing"); println(": " + ex.getMessage() + "\n"); } println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } println("=== Test Dataset ===\n" + test.toString() + "\n\n"); println("(test weights all 1.0\n"); } } return result; } /** * Checks whether the classifier can handle instance weights. * This test compares the classifier performance on two datasets * that are identical except for the training weights. If the * results change, then the classifier must be using the weights. It * may be possible to get a false positive from this test if the * weight changes aren't significant enough to induce a change * in classifier performance (but the weights are chosen to minimize * the likelihood of this). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 true if the test was passed */ protected boolean[] instanceWeights( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("classifier uses instance weights"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2*getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier [] classifiers = null; Evaluation evaluationB = null; Evaluation evaluationI = null; boolean built = false; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifiers = AbstractClassifier.makeCopies(getClassifier(), 2); evaluationB = new Evaluation(train); evaluationI = new Evaluation(train); classifiers[0].buildClassifier(train); testWRTZeroR(classifiers[0], evaluationB, train, test); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now modify instance weights and re-built/test for (int i = 0; i < train.numInstances(); i++) { train.instance(i).setWeight(0); } Random random = new Random(1); for (int i = 0; i < train.numInstances() / 2; i++) { int inst = Math.abs(random.nextInt()) % train.numInstances(); int weight = Math.abs(random.nextInt()) % 10 + 1; train.instance(inst).setWeight(weight); } classifiers[1].buildClassifier(train); built = true; testWRTZeroR(classifiers[1], evaluationI, train, test); if (evaluationB.equals(evaluationI)) { // println("no"); evalFail = true; throw new Exception("evalFail"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results don't differ between non-weighted and " + "weighted instance models."); println("Here are the results:\n"); println(evaluationB.toSummaryString("\nboth methods\n", true)); } else { print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); } println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } println("=== Test Dataset ===\n" + test.toString() + "\n\n"); println("(test weights all 1.0\n"); } } return result; } /** * Checks whether the scheme alters the training dataset during * training. If the scheme needs to modify the training * data it should take a copy of the training data. Currently checks * for changes to header structure, number of instances, order of * instances, instance weights. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if we know the classifier can handle * (at least) moderate missing predictor values * @param classMissing true if we know the classifier can handle * (at least) moderate missing class values * @return index 0 is true if the test was passed */ protected boolean[] datasetIntegrity( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing) { print("classifier doesn't alter original datasets"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 20; boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier classifier = null; Evaluation evaluation = null; boolean built = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifier = AbstractClassifier.makeCopies(getClassifier(), 1)[0]; evaluation = new Evaluation(train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { Instances trainCopy = new Instances(train); Instances testCopy = new Instances(test); classifier.buildClassifier(trainCopy); compareDatasets(train, trainCopy); built = true; testWRTZeroR(classifier, evaluation, trainCopy, testCopy); compareDatasets(test, testCopy); println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Test Dataset ===\n" + test.toString() + "\n\n"); } } return result; } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numTest the number of instaces in the test set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numTest, int numClasses, FastVector accepts) { return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing, classMissing, numTrain, numTest, numClasses, accepts); } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the attribute index of the class * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numTest the number of instaces in the test set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numTest, int numClasses, FastVector accepts) { boolean[] result = new boolean[2]; Instances train = null; Instances test = null; Classifier classifier = null; Evaluation evaluation = null; boolean built = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); test = makeTestDataset(24, numTest, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); if (missingLevel > 0) { addMissing(train, missingLevel, predictorMissing, classMissing); addMissing(test, Math.min(missingLevel, 50), predictorMissing, classMissing); } classifier = AbstractClassifier.makeCopies(getClassifier(), 1)[0]; evaluation = new Evaluation(train); } catch (Exception ex) { ex.printStackTrace(); throw new Error("Error setting up for tests: " + ex.getMessage()); } try { classifier.buildClassifier(train); built = true; if (!testWRTZeroR(classifier, evaluation, train, test)[0]) { result[0] = true; result[1] = true; throw new Exception("Scheme performs worse than ZeroR"); } println("yes"); result[0] = true; } catch (Exception ex) { boolean acceptable = false; String msg; if (ex.getMessage() == null) msg = ""; else msg = ex.getMessage().toLowerCase(); if (msg.indexOf("not in classpath") > -1) m_ClasspathProblems = true; if (msg.indexOf("worse than zeror") >= 0) { println("warning: performs worse than ZeroR"); result[0] = true; result[1] = true; } else { for (int i = 0; i < accepts.size(); i++) { if (msg.indexOf((String)accepts.elementAt(i)) >= 0) { acceptable = true; } } println("no" + (acceptable ? " (OK error message)" : "")); result[1] = acceptable; } if (m_Debug) { println("\n=== Full Report ==="); print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); if (!acceptable) { if (accepts.size() > 0) { print("Error message doesn't mention "); for (int i = 0; i < accepts.size(); i++) { if (i != 0) { print(" or "); } print('"' + (String)accepts.elementAt(i) + '"'); } } println("here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Test Dataset ===\n" + test.toString() + "\n\n"); } } } return result; } /** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; } /** * Make a simple set of instances, which can later be modified * for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, boolean multiInstance) throws Exception { return makeTestDataset( seed, numInstances, numNominal, numNumeric, numString, numDate, numRelational, numClasses, classType, TestInstances.CLASS_IS_LAST, multiInstance); } /** * Make a simple set of instances with variable position of the class * attribute, which can later be modified for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class (0-based, -1 as last) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see TestInstances#CLASS_IS_LAST * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, int classIndex, boolean multiInstance) throws Exception { TestInstances dataset = new TestInstances(); dataset.setSeed(seed); dataset.setNumInstances(numInstances); dataset.setNumNominal(numNominal); dataset.setNumNumeric(numNumeric); dataset.setNumString(numString); dataset.setNumDate(numDate); dataset.setNumRelational(numRelational); dataset.setNumClasses(numClasses); dataset.setClassType(classType); dataset.setClassIndex(classIndex); dataset.setNumClasses(numClasses); dataset.setMultiInstance(multiInstance); dataset.setWords(getWords()); dataset.setWordSeparators(getWordSeparators()); return process(dataset.generate()); } /** * Print out a short summary string for the dataset characteristics * * @param nominalPredictor true if nominal predictor attributes are present * @param numericPredictor true if numeric predictor attributes are present * @param stringPredictor true if string predictor attributes are present * @param datePredictor true if date predictor attributes are present * @param relationalPredictor true if relational predictor attributes are present * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) */ protected void printAttributeSummary(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { String str = ""; if (numericPredictor) str += " numeric"; if (nominalPredictor) { if (str.length() > 0) str += " &"; str += " nominal"; } if (stringPredictor) { if (str.length() > 0) str += " &"; str += " string"; } if (datePredictor) { if (str.length() > 0) str += " &"; str += " date"; } if (relationalPredictor) { if (str.length() > 0) str += " &"; str += " relational"; } str += " predictors)"; switch (classType) { case Attribute.NUMERIC: str = " (numeric class," + str; break; case Attribute.NOMINAL: str = " (nominal class," + str; break; case Attribute.STRING: str = " (string class," + str; break; case Attribute.DATE: str = " (date class," + str; break; case Attribute.RELATIONAL: str = " (relational class," + str; break; } print(str); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Test method for this class * * @param args the commandline parameters */ public static void main(String [] args) { runCheck(new CheckClassifier(), args); } }
72,518
35.792998
131
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/CheckSource.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckSource.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers; import java.io.File; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.converters.ConverterUtils.DataSource; /** * A simple class for checking the source generated from Classifiers * implementing the <code>weka.classifiers.Sourcable</code> interface. * It takes a classifier, the classname of the generated source * and the dataset the source was generated with as parameters and tests * the output of the built classifier against the output of the generated * source. Use option '-h' to display all available commandline options. * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;classname and options&gt; * The classifier (incl. options) that was used to generate * the source code.</pre> * * <pre> -S &lt;classname&gt; * The classname of the generated source code.</pre> * * <pre> -t &lt;file&gt; * The training set with which the source code was generated.</pre> * * <pre> -c &lt;index&gt; * The class index of the training set. 'first' and 'last' are * valid indices. * (default: last)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier (specified with -W). * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see weka.classifiers.Sourcable */ public class CheckSource implements OptionHandler, RevisionHandler { /** the classifier used for generating the source code */ protected Classifier m_Classifier = null; /** the generated source code */ protected Classifier m_SourceCode = null; /** the dataset to use for testing */ protected File m_Dataset = null; /** the class index */ protected int m_ClassIndex = -1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tThe classifier (incl. options) that was used to generate\n" + "\tthe source code.", "W", 1, "-W <classname and options>")); result.addElement(new Option( "\tThe classname of the generated source code.", "S", 1, "-S <classname>")); result.addElement(new Option( "\tThe training set with which the source code was generated.", "t", 1, "-t <file>")); result.addElement(new Option( "\tThe class index of the training set. 'first' and 'last' are\n" + "\tvalid indices.\n" + "\t(default: last)", "c", 1, "-c <index>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;classname and options&gt; * The classifier (incl. options) that was used to generate * the source code.</pre> * * <pre> -S &lt;classname&gt; * The classname of the generated source code.</pre> * * <pre> -t &lt;file&gt; * The training set with which the source code was generated.</pre> * * <pre> -c &lt;index&gt; * The class index of the training set. 'first' and 'last' are * valid indices. * (default: last)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier (specified with * -W). * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] spec; String classname; tmpStr = Utils.getOption('W', options); if (tmpStr.length() > 0) { spec = Utils.splitOptions(tmpStr); if (spec.length == 0) throw new IllegalArgumentException("Invalid classifier specification string"); classname = spec[0]; spec[0] = ""; setClassifier((Classifier) Utils.forName(Classifier.class, classname, spec)); } else { throw new Exception("No classifier (classname + options) provided!"); } tmpStr = Utils.getOption('S', options); if (tmpStr.length() > 0) { spec = Utils.splitOptions(tmpStr); if (spec.length != 1) throw new IllegalArgumentException("Invalid source code specification string"); classname = spec[0]; spec[0] = ""; setSourceCode((Classifier) Utils.forName(Classifier.class, classname, spec)); } else { throw new Exception("No source code (classname) provided!"); } tmpStr = Utils.getOption('t', options); if (tmpStr.length() != 0) setDataset(new File(tmpStr)); else throw new Exception("No dataset provided!"); tmpStr = Utils.getOption('c', options); if (tmpStr.length() != 0) { if (tmpStr.equals("first")) setClassIndex(0); else if (tmpStr.equals("last")) setClassIndex(-1); else setClassIndex(Integer.parseInt(tmpStr) - 1); } else { setClassIndex(-1); } } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; result = new Vector<String>(); if (getClassifier() != null) { result.add("-W"); result.add(getClassifier().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) getClassifier()).getOptions())); } if (getSourceCode() != null) { result.add("-S"); result.add(getSourceCode().getClass().getName()); } if (getDataset() != null) { result.add("-t"); result.add(m_Dataset.getAbsolutePath()); } result.add("-c"); if (getClassIndex() == -1) result.add("last"); else if (getClassIndex() == 0) result.add("first"); else result.add("" + (getClassIndex() + 1)); return result.toArray(new String[result.size()]); } /** * Sets the classifier to use for the comparison. * * @param value the classifier to use */ public void setClassifier(Classifier value) { m_Classifier = value; } /** * Gets the classifier being used for the tests, can be null. * * @return the currently set classifier */ public Classifier getClassifier() { return m_Classifier; } /** * Sets the class to test. * * @param value the class to test */ public void setSourceCode(Classifier value) { m_SourceCode = value; } /** * Gets the class to test. * * @return the currently set class, can be null. */ public Classifier getSourceCode() { return m_SourceCode; } /** * Sets the dataset to use for testing. * * @param value the dataset to use. */ public void setDataset(File value) { if (!value.exists()) throw new IllegalArgumentException( "Dataset '" + value.getAbsolutePath() + "' does not exist!"); else m_Dataset = value; } /** * Gets the dataset to use for testing, can be null. * * @return the dataset to use. */ public File getDataset() { return m_Dataset; } /** * Sets the class index of the dataset. * * @param value the class index of the dataset. */ public void setClassIndex(int value) { m_ClassIndex = value; } /** * Gets the class index of the dataset. * * @return the current class index. */ public int getClassIndex() { return m_ClassIndex; } /** * performs the comparison test * * @return true if tests were successful * @throws Exception if tests fail */ public boolean execute() throws Exception { boolean result; Classifier cls; Classifier code; int i; Instances data; DataSource source; boolean numeric; boolean different; double predClassifier; double predSource; result = true; // a few checks if (getClassifier() == null) throw new Exception("No classifier set!"); if (getSourceCode() == null) throw new Exception("No source code set!"); if (getDataset() == null) throw new Exception("No dataset set!"); if (!getDataset().exists()) throw new Exception( "Dataset '" + getDataset().getAbsolutePath() + "' does not exist!"); // load data source = new DataSource(getDataset().getAbsolutePath()); data = source.getDataSet(); if (getClassIndex() == -1) data.setClassIndex(data.numAttributes() - 1); else data.setClassIndex(getClassIndex()); numeric = data.classAttribute().isNumeric(); // build classifier cls = AbstractClassifier.makeCopy(getClassifier()); cls.buildClassifier(data); code = getSourceCode(); // compare predictions for (i = 0; i < data.numInstances(); i++) { // perform predictions predClassifier = cls.classifyInstance(data.instance(i)); predSource = code.classifyInstance(data.instance(i)); // compare both results if (Double.isNaN(predClassifier) && Double.isNaN(predSource)) { different = false; } else { if (numeric) different = !Utils.eq(predClassifier, predSource); else different = ((int) predClassifier != (int) predSource); } if (different) { result = false; if (numeric) System.out.println( (i+1) + ". instance (Classifier/Source code): " + predClassifier + " != " + predSource); else System.out.println( (i+1) + ". instance (Classifier/Source code): " + data.classAttribute().value((int) predClassifier) + " != " + data.classAttribute().value((int) predSource)); } } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Executes the tests, use "-h" to list the commandline options. * * @param args the commandline parameters * @throws Exception if something goes wrong */ public static void main(String[] args) throws Exception{ CheckSource check; StringBuffer text; Enumeration enm; check = new CheckSource(); if (Utils.getFlag('h', args)) { text = new StringBuffer(); text.append("\nHelp requested:\n\n"); enm = check.listOptions(); while (enm.hasMoreElements()) { Option option = (Option) enm.nextElement(); text.append(option.synopsis() + "\n"); text.append(option.description() + "\n"); } System.out.println("\n" + text + "\n"); } else { check.setOptions(args); if (check.execute()) System.out.println("Tests OK!"); else System.out.println("Tests failed!"); } } }
11,931
26.878505
87
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/Classifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Classifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; /** * Classifier interface. All schemes for numeric or nominal prediction in * Weka implement this interface. Note that a classifier MUST either implement * distributionForInstance() or classifyInstance(). * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface Classifier { /** * Generates a classifier. Must initialize all fields of the classifier * that are not being set via options (ie. multiple calls of buildClassifier * must always lead to the same result). Must not change the dataset * in any way. * * @param data set of instances serving as training data * @exception Exception if the classifier has not been * generated successfully */ public abstract void buildClassifier(Instances data) throws Exception; /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. Note that a classifier MUST * implement either this or distributionForInstance(). * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @exception Exception if an error occurred during the prediction */ public double classifyInstance(Instance instance) throws Exception; /** * Predicts the class memberships for a given instance. If * an instance is unclassified, the returned array elements * must be all zero. If the class is numeric, the array * must consist of only one element, which contains the * predicted value. Note that a classifier MUST implement * either this or classifyInstance(). * * @param instance the instance to be classified * @return an array containing the estimated membership * probabilities of the test instance in each class * or the numeric prediction * @exception Exception if distribution could not be * computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception; /** * Returns the Capabilities of this classifier. Maximally permissive * capabilities are allowed by default. Derived classifiers should * override this method and first disable all capabilities and then * enable just those capabilities that make sense for the scheme. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities(); }
3,408
36.054348
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/ConditionalDensityEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ConditionalDensityEstimator.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instance; /** * Interface for numeric prediction schemes that can output conditional * density estimates. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface ConditionalDensityEstimator { /** * Returns natural logarithm of density estimate for given value based on given instance. * * @param instance the instance to make the prediction for. * @param value the value to make the prediction for. * @return the natural logarithm of the density estimate * @exception Exception if the density cannot be computed */ public double logDensity(Instance instance, double value) throws Exception; }
1,510
31.847826
91
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/CostMatrix.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CostMatrix.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.LineNumberReader; import java.io.Reader; import java.io.Serializable; import java.io.StreamTokenizer; import java.io.Writer; import java.util.Random; import java.util.StringTokenizer; import weka.core.AttributeExpression; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for storing and manipulating a misclassification cost matrix. The * element at position i,j in the matrix is the penalty for classifying an * instance of class j as class i. Cost values can be fixed or computed on a * per-instance basis (cost sensitive evaluation only) from the value of an * attribute or an expression involving attribute(s). * * @author Mark Hall * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 9047 $ */ public class CostMatrix implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -1973792250544554965L; private int m_size; /** [rows][columns] */ protected Object[][] m_matrix; /** The deafult file extension for cost matrix files */ public static String FILE_EXTENSION = ".cost"; /** * Creates a default cost matrix of a particular size. All diagonal values * will be 0 and all non-diagonal values 1. * * @param numOfClasses the number of classes that the cost matrix holds. */ public CostMatrix(int numOfClasses) { m_size = numOfClasses; initialize(); } /** * Creates a cost matrix that is a copy of another. * * @param toCopy the matrix to copy. */ public CostMatrix(CostMatrix toCopy) { this(toCopy.size()); for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { setCell(i, j, toCopy.getCell(i, j)); } } } /** * Initializes the matrix */ public void initialize() { m_matrix = new Object[m_size][m_size]; for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { setCell(i, j, i == j ? new Double(0.0) : new Double(1.0)); } } } /** * The number of rows (and columns) * * @return the size of the matrix */ public int size() { return m_size; } /** * Same as size * * @return the number of columns */ public int numColumns() { return size(); } /** * Same as size * * @return the number of rows */ public int numRows() { return size(); } private boolean replaceStrings() throws Exception { boolean nonDouble = false; for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { if (getCell(i, j) instanceof String) { AttributeExpression temp = new AttributeExpression(); temp.convertInfixToPostfix((String) getCell(i, j)); setCell(i, j, temp); nonDouble = true; } else if (getCell(i, j) instanceof AttributeExpression) { nonDouble = true; } } } return nonDouble; } /** * Applies the cost matrix to a set of instances. If a random number generator * is supplied the instances will be resampled, otherwise they will be * rewighted. Adapted from code once sitting in Instances.java * * @param data the instances to reweight. * @param random a random number generator for resampling, if null then * instances are rewighted. * @return a new dataset reflecting the cost of misclassification. * @exception Exception if the data has no class or the matrix in * inappropriate. */ public Instances applyCostMatrix(Instances data, Random random) throws Exception { double sumOfWeightFactors = 0, sumOfMissClassWeights, sumOfWeights; double[] weightOfInstancesInClass, weightFactor, weightOfInstances; Instances newData; if (data.classIndex() < 0) { throw new Exception("Class index is not set!"); } if (size() != data.numClasses()) { throw new Exception("Misclassification cost matrix has wrong format!"); } // are there any non-fixed, per-instance costs defined in the matrix? if (replaceStrings()) { // could reweight in the two class case if (data.classAttribute().numValues() > 2) { throw new Exception("Can't resample/reweight instances using " + "non-fixed cost values when there are more " + "than two classes!"); } else { // Store new weights weightOfInstances = new double[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); int classValIndex = (int) inst.classValue(); double factor = 1.0; Object element = (classValIndex == 0) ? getCell(classValIndex, 1) : getCell(classValIndex, 0); if (element instanceof Double) { factor = ((Double) element).doubleValue(); } else { factor = ((AttributeExpression) element).evaluateExpression(inst); } weightOfInstances[i] = inst.weight() * factor; /* * System.err.println("Multiplying " + * inst.classAttribute().value((int)inst.classValue()) +" by factor " * + factor); */ } // Change instances weight or do resampling if (random != null) { return data.resampleWithWeights(random, weightOfInstances); } else { Instances instances = new Instances(data); for (int i = 0; i < data.numInstances(); i++) { instances.instance(i).setWeight(weightOfInstances[i]); } return instances; } } } weightFactor = new double[data.numClasses()]; weightOfInstancesInClass = new double[data.numClasses()]; for (int j = 0; j < data.numInstances(); j++) { weightOfInstancesInClass[(int) data.instance(j).classValue()] += data .instance(j).weight(); } sumOfWeights = Utils.sum(weightOfInstancesInClass); // normalize the matrix if not already for (int i = 0; i < m_size; i++) { if (!Utils.eq(((Double) getCell(i, i)).doubleValue(), 0)) { CostMatrix normMatrix = new CostMatrix(this); normMatrix.normalize(); return normMatrix.applyCostMatrix(data, random); } } for (int i = 0; i < data.numClasses(); i++) { // Using Kai Ming Ting's formula for deriving weights for // the classes and Breiman's heuristic for multiclass // problems. sumOfMissClassWeights = 0; for (int j = 0; j < data.numClasses(); j++) { if (Utils.sm(((Double) getCell(i, j)).doubleValue(), 0)) { throw new Exception("Neg. weights in misclassification " + "cost matrix!"); } sumOfMissClassWeights += ((Double) getCell(i, j)).doubleValue(); } weightFactor[i] = sumOfMissClassWeights * sumOfWeights; sumOfWeightFactors += sumOfMissClassWeights * weightOfInstancesInClass[i]; } for (int i = 0; i < data.numClasses(); i++) { weightFactor[i] /= sumOfWeightFactors; } // Store new weights weightOfInstances = new double[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { weightOfInstances[i] = data.instance(i).weight() * weightFactor[(int) data.instance(i).classValue()]; } // Change instances weight or do resampling if (random != null) { return data.resampleWithWeights(random, weightOfInstances); } else { Instances instances = new Instances(data); for (int i = 0; i < data.numInstances(); i++) { instances.instance(i).setWeight(weightOfInstances[i]); } return instances; } } /** * Calculates the expected misclassification cost for each possible class * value, given class probability estimates. * * @param classProbs the class probability estimates. * @return the expected costs. * @exception Exception if the wrong number of class probabilities is * supplied. */ public double[] expectedCosts(double[] classProbs) throws Exception { if (classProbs.length != m_size) { throw new Exception("Length of probability estimates don't " + "match cost matrix"); } double[] costs = new double[m_size]; for (int x = 0; x < m_size; x++) { for (int y = 0; y < m_size; y++) { Object element = getCell(y, x); if (!(element instanceof Double)) { throw new Exception("Can't use non-fixed costs in " + "computing expected costs."); } costs[x] += classProbs[y] * ((Double) element).doubleValue(); } } return costs; } /** * Calculates the expected misclassification cost for each possible class * value, given class probability estimates. * * @param classProbs the class probability estimates. * @param inst the current instance for which the class probabilites apply. Is * used for computing any non-fixed cost values. * @return the expected costs. * @exception Exception if something goes wrong */ public double[] expectedCosts(double[] classProbs, Instance inst) throws Exception { if (classProbs.length != m_size) { throw new Exception("Length of probability estimates don't " + "match cost matrix"); } if (!replaceStrings()) { return expectedCosts(classProbs); } double[] costs = new double[m_size]; for (int x = 0; x < m_size; x++) { for (int y = 0; y < m_size; y++) { Object element = getCell(y, x); double costVal; if (!(element instanceof Double)) { costVal = ((AttributeExpression) element).evaluateExpression(inst); } else { costVal = ((Double) element).doubleValue(); } costs[x] += classProbs[y] * costVal; } } return costs; } /** * Gets the maximum cost for a particular class value. * * @param classVal the class value. * @return the maximum cost. * @exception Exception if cost matrix contains non-fixed costs */ public double getMaxCost(int classVal) throws Exception { double maxCost = Double.NEGATIVE_INFINITY; for (int i = 0; i < m_size; i++) { Object element = getCell(classVal, i); if (!(element instanceof Double)) { throw new Exception("Can't use non-fixed costs when " + "getting max cost."); } double cost = ((Double) element).doubleValue(); if (cost > maxCost) maxCost = cost; } return maxCost; } /** * Gets the maximum cost for a particular class value. * * @param classVal the class value. * @return the maximum cost. * @exception Exception if cost matrix contains non-fixed costs */ public double getMaxCost(int classVal, Instance inst) throws Exception { if (!replaceStrings()) { return getMaxCost(classVal); } double maxCost = Double.NEGATIVE_INFINITY; double cost; for (int i = 0; i < m_size; i++) { Object element = getCell(classVal, i); if (!(element instanceof Double)) { cost = ((AttributeExpression) element).evaluateExpression(inst); } else { cost = ((Double) element).doubleValue(); } if (cost > maxCost) maxCost = cost; } return maxCost; } /** * Normalizes the matrix so that the diagonal contains zeros. * */ public void normalize() { for (int y = 0; y < m_size; y++) { double diag = ((Double) getCell(y, y)).doubleValue(); for (int x = 0; x < m_size; x++) { setCell(x, y, new Double(((Double) getCell(x, y)).doubleValue() - diag)); } } } /** * Loads a cost matrix in the old format from a reader. Adapted from code once * sitting in Instances.java * * @param reader the reader to get the values from. * @exception Exception if the matrix cannot be read correctly. */ public void readOldFormat(Reader reader) throws Exception { StreamTokenizer tokenizer; int currentToken; double firstIndex, secondIndex, weight; tokenizer = new StreamTokenizer(reader); initialize(); tokenizer.commentChar('%'); tokenizer.eolIsSignificant(true); while (StreamTokenizer.TT_EOF != (currentToken = tokenizer.nextToken())) { // Skip empty lines if (currentToken == StreamTokenizer.TT_EOL) { continue; } // Get index of first class. if (currentToken != StreamTokenizer.TT_NUMBER) { throw new Exception("Only numbers and comments allowed " + "in cost file!"); } firstIndex = tokenizer.nval; if (!Utils.eq((int) firstIndex, firstIndex)) { throw new Exception("First number in line has to be " + "index of a class!"); } if ((int) firstIndex >= size()) { throw new Exception("Class index out of range!"); } // Get index of second class. if (StreamTokenizer.TT_EOF == (currentToken = tokenizer.nextToken())) { throw new Exception("Premature end of file!"); } if (currentToken == StreamTokenizer.TT_EOL) { throw new Exception("Premature end of line!"); } if (currentToken != StreamTokenizer.TT_NUMBER) { throw new Exception("Only numbers and comments allowed " + "in cost file!"); } secondIndex = tokenizer.nval; if (!Utils.eq((int) secondIndex, secondIndex)) { throw new Exception("Second number in line has to be " + "index of a class!"); } if ((int) secondIndex >= size()) { throw new Exception("Class index out of range!"); } if ((int) secondIndex == (int) firstIndex) { throw new Exception("Diagonal of cost matrix non-zero!"); } // Get cost factor. if (StreamTokenizer.TT_EOF == (currentToken = tokenizer.nextToken())) { throw new Exception("Premature end of file!"); } if (currentToken == StreamTokenizer.TT_EOL) { throw new Exception("Premature end of line!"); } if (currentToken != StreamTokenizer.TT_NUMBER) { throw new Exception("Only numbers and comments allowed " + "in cost file!"); } weight = tokenizer.nval; if (!Utils.gr(weight, 0)) { throw new Exception("Only positive weights allowed!"); } setCell((int) firstIndex, (int) secondIndex, new Double(weight)); } } /** * Reads a matrix from a reader. The first line in the file should contain the * number of rows and columns. Subsequent lines contain elements of the * matrix. (FracPete: taken from old weka.core.Matrix class) * * @param reader the reader containing the matrix * @throws Exception if an error occurs * @see #write(Writer) */ public CostMatrix(Reader reader) throws Exception { LineNumberReader lnr = new LineNumberReader(reader); String line; int currentRow = -1; while ((line = lnr.readLine()) != null) { // Comments if (line.startsWith("%")) { continue; } StringTokenizer st = new StringTokenizer(line); // Ignore blank lines if (!st.hasMoreTokens()) { continue; } if (currentRow < 0) { int rows = Integer.parseInt(st.nextToken()); if (!st.hasMoreTokens()) { throw new Exception("Line " + lnr.getLineNumber() + ": expected number of columns"); } int cols = Integer.parseInt(st.nextToken()); if (rows != cols) { throw new Exception("Trying to create a non-square cost " + "matrix"); } // m_matrix = new Object[rows][cols]; m_size = rows; initialize(); currentRow++; continue; } else { if (currentRow == m_size) { throw new Exception("Line " + lnr.getLineNumber() + ": too many rows provided"); } for (int i = 0; i < m_size; i++) { if (!st.hasMoreTokens()) { throw new Exception("Line " + lnr.getLineNumber() + ": too few matrix elements provided"); } String nextTok = st.nextToken(); // try to parse as a double first Double val = null; try { val = new Double(nextTok); double value = val.doubleValue(); } catch (Exception ex) { val = null; } if (val == null) { setCell(currentRow, i, nextTok); } else { setCell(currentRow, i, val); } } currentRow++; } } if (currentRow == -1) { throw new Exception("Line " + lnr.getLineNumber() + ": expected number of rows"); } else if (currentRow != m_size) { throw new Exception("Line " + lnr.getLineNumber() + ": too few rows provided"); } } /** * Writes out a matrix. The format can be read via the CostMatrix(Reader) * constructor. (FracPete: taken from old weka.core.Matrix class) * * @param w the output Writer * @throws Exception if an error occurs */ public void write(Writer w) throws Exception { w.write("% Rows\tColumns\n"); w.write("" + m_size + "\t" + m_size + "\n"); w.write("% Matrix elements\n"); for (int i = 0; i < m_size; i++) { for (int j = 0; j < m_size; j++) { w.write("" + getCell(i, j) + "\t"); } w.write("\n"); } w.flush(); } /** * converts the Matrix into a single line Matlab string: matrix is enclosed by * parentheses, rows are separated by semicolon and single cells by blanks, * e.g., [1 2; 3 4]. * * @return the matrix in Matlab single line format */ public String toMatlab() { StringBuffer result; int i; int n; result = new StringBuffer(); result.append("["); for (i = 0; i < m_size; i++) { if (i > 0) { result.append("; "); } for (n = 0; n < m_size; n++) { if (n > 0) { result.append(" "); } result.append(getCell(i, n)); } } result.append("]"); return result.toString(); } /** * creates a matrix from the given Matlab string. * * @param matlab the matrix in matlab format * @return the matrix represented by the given string * @see #toMatlab() */ public static CostMatrix parseMatlab(String matlab) throws Exception { StringTokenizer tokRow; StringTokenizer tokCol; int rows; int cols; CostMatrix result; String cells; // get content cells = matlab.substring(matlab.indexOf("[") + 1, matlab.indexOf("]")) .trim(); // determine dimenions tokRow = new StringTokenizer(cells, ";"); rows = tokRow.countTokens(); tokCol = new StringTokenizer(tokRow.nextToken(), " "); cols = tokCol.countTokens(); // fill matrix result = new CostMatrix(rows); tokRow = new StringTokenizer(cells, ";"); rows = 0; while (tokRow.hasMoreTokens()) { tokCol = new StringTokenizer(tokRow.nextToken(), " "); cols = 0; while (tokCol.hasMoreTokens()) { // is it a number String current = tokCol.nextToken(); try { double val = Double.parseDouble(current); result.setCell(rows, cols, new Double(val)); } catch (NumberFormatException e) { // must be an expression result.setCell(rows, cols, current); } cols++; } rows++; } return result; } /** * Set the value of a particular cell in the matrix * * @param rowIndex the row * @param columnIndex the column * @param value the value to set */ public final void setCell(int rowIndex, int columnIndex, Object value) { m_matrix[rowIndex][columnIndex] = value; } /** * Return the contents of a particular cell. Note: this method returns the * Object stored at a particular cell. * * @param rowIndex the row * @param columnIndex the column * @return the value at the cell */ public final Object getCell(int rowIndex, int columnIndex) { return m_matrix[rowIndex][columnIndex]; } /** * Return the value of a cell as a double (for legacy code) * * @param rowIndex the row * @param columnIndex the column * @return the value at a particular cell as a double * @exception Exception if the value is not a double */ public final double getElement(int rowIndex, int columnIndex) throws Exception { if (!(m_matrix[rowIndex][columnIndex] instanceof Double)) { throw new Exception("Cost matrix contains non-fixed costs!"); } return ((Double) m_matrix[rowIndex][columnIndex]).doubleValue(); } /** * Return the value of a cell as a double. Computes the value for non-fixed * costs using the supplied Instance * * @param rowIndex the row * @param columnIndex the column * @return the value from a particular cell * @exception Exception if something goes wrong */ public final double getElement(int rowIndex, int columnIndex, Instance inst) throws Exception { if (m_matrix[rowIndex][columnIndex] instanceof Double) { return ((Double) m_matrix[rowIndex][columnIndex]).doubleValue(); } else if (m_matrix[rowIndex][columnIndex] instanceof String) { replaceStrings(); } return ((AttributeExpression) m_matrix[rowIndex][columnIndex]) .evaluateExpression(inst); } /** * Set the value of a cell as a double * * @param rowIndex the row * @param columnIndex the column * @param value the value (double) to set */ public final void setElement(int rowIndex, int columnIndex, double value) { m_matrix[rowIndex][columnIndex] = new Double(value); } /** * Converts a matrix to a string. (FracPete: taken from old weka.core.Matrix * class) * * @return the converted string */ @Override public String toString() { // Determine the width required for the maximum element, // and check for fractional display requirement. double maxval = 0; boolean fractional = false; Object element = null; int widthNumber = 0; int widthExpression = 0; for (int i = 0; i < size(); i++) { for (int j = 0; j < size(); j++) { element = getCell(i, j); if (element instanceof Double) { double current = ((Double) element).doubleValue(); if (current < 0) current *= -11; if (current > maxval) maxval = current; double fract = Math.abs(current - Math.rint(current)); if (!fractional && ((Math.log(fract) / Math.log(10)) >= -2)) { fractional = true; } } else { if (element.toString().length() > widthExpression) { widthExpression = element.toString().length(); } } } } if (maxval > 0) { widthNumber = (int) (Math.log(maxval) / Math.log(10) + (fractional ? 4 : 1)); } int width = (widthNumber > widthExpression) ? widthNumber : widthExpression; StringBuffer text = new StringBuffer(); for (int i = 0; i < size(); i++) { for (int j = 0; j < size(); j++) { element = getCell(i, j); if (element instanceof Double) { text.append(" ").append( Utils.doubleToString(((Double) element).doubleValue(), width, (fractional ? 2 : 0))); } else { int diff = width - element.toString().length(); if (diff > 0) { int left = diff % 2; left += diff / 2; String temp = Utils.padLeft(element.toString(), element.toString() .length() + left); temp = Utils.padRight(temp, width); text.append(" ").append(temp); } else { text.append(" ").append(element.toString()); } } } text.append("\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9047 $"); } }
25,102
29.063473
81
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/Evaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Evaluation.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.io.Serializable; import java.util.List; import java.util.Random; import weka.classifiers.evaluation.AbstractEvaluationMetric; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.Summarizable; /** * Class for evaluating machine learning models. Delegates to the actual * implementation in weka.classifiers.evaluation.Evaluation. * * <p/> * * ------------------------------------------------------------------- * <p/> * * General options when evaluating a learning scheme from the command-line: * <p/> * * -t filename <br/> * Name of the file with the training data. (required) * <p/> * * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with ".xml", * a PMML file is loaded or, if that fails, options are loaded from XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the * extensions, e.g., '.arff' for ARFF format or '.csv' for CSV. * <p/> * * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first * label) * <p/> * * ------------------------------------------------------------------- * <p/> * * Example usage as the main of a classifier (called FunkyClassifier): * <code> <pre> * public static void main(String [] args) { * runClassifier(new FunkyClassifier(), args); * } * </pre> </code> * <p/> * * ------------------------------------------------------------------ * <p/> * * Example usage from within an application: <code> <pre> * Instances trainInstances = ... instances got from somewhere * Instances testInstances = ... instances got from somewhere * Classifier scheme = ... scheme got from somewhere * * Evaluation evaluation = new Evaluation(trainInstances); * evaluation.evaluateModel(scheme, testInstances); * System.out.println(evaluation.toSummaryString()); * </pre> </code> * * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 9324 $ */ public class Evaluation implements Serializable, Summarizable, RevisionHandler { /** For serialization */ private static final long serialVersionUID = -170766452472965668L; public static final String[] BUILT_IN_EVAL_METRICS = { "Correct", "Incorrect", "Kappa", "Total cost", "Average cost", "KB relative", "KB information", "Correlation", "Complexity 0", "Complexity scheme", "Complexity improvement", "MAE", "RMSE", "RAE", "RRSE", "Coverage", "Region size", "TP rate", "FP rate", "Precision", "Recall", "F-measure", "MCC", "ROC area", "PRC area" }; /** The actual evaluation object that we delegate to */ protected weka.classifiers.evaluation.Evaluation m_delegate; /** * Utility method to get a list of the names of all built-in and plugin * evaluation metrics * * @return the complete list of available evaluation metrics */ public static List<String> getAllEvaluationMetricNames() { return weka.classifiers.evaluation.Evaluation.getAllEvaluationMetricNames(); } public Evaluation(Instances data) throws Exception { m_delegate = new weka.classifiers.evaluation.Evaluation(data); } public Evaluation(Instances data, CostMatrix costMatrix) throws Exception { m_delegate = new weka.classifiers.evaluation.Evaluation(data, costMatrix); } /** * Returns the header of the underlying dataset. * * @return the header information */ public Instances getHeader() { return m_delegate.getHeader(); } /** * Returns the list of plugin metrics in use (or null if there are none) * * @return the list of plugin metrics */ public List<AbstractEvaluationMetric> getPluginMetrics() { return m_delegate.getPluginMetrics(); } /** * Get the named plugin evaluation metric * * @param name the name of the metric (as returned by * AbstractEvaluationMetric.getName()) or the fully qualified class * name of the metric to find * * @return the metric or null if the metric is not in the list of plugin * metrics */ public AbstractEvaluationMetric getPluginMetric(String name) { return m_delegate.getPluginMetric(name); } /** * Set a list of the names of metrics to have appear in the output. The * default is to display all built in metrics and plugin metrics that haven't * been globally disabled. * * @param display a list of metric names to have appear in the output */ public void setMetricsToDisplay(List<String> display) { m_delegate.setMetricsToDisplay(display); } /** * Get a list of the names of metrics to have appear in the output The default * is to display all built in metrics and plugin metrics that haven't been * globally disabled. * * @param display a list of metric names to have appear in the output */ public List<String> getMetricsToDisplay() { return m_delegate.getMetricsToDisplay(); } /** * Remove the supplied list of metrics from the list of those to display. * * @param metricsNotToDisplay */ public void dontDisplayMetrics(List<String> metricsNotToDisplay) { m_delegate.dontDisplayMetrics(metricsNotToDisplay); } /** * Sets whether to discard predictions, ie, not storing them for future * reference via predictions() method in order to conserve memory. * * @param value true if to discard the predictions * @see #predictions() */ public void setDiscardPredictions(boolean value) { m_delegate.setDiscardPredictions(value); } /** * Returns whether predictions are not recorded at all, in order to conserve * memory. * * @return true if predictions are not recorded * @see #predictions() */ public boolean getDiscardPredictions() { return m_delegate.getDiscardPredictions(); } /** * Returns the area under ROC for those predictions that have been collected * in the evaluateClassifier(Classifier, Instances) method. Returns * Utils.missingValue() if the area is not available. * * @param classIndex the index of the class to consider as "positive" * @return the area under the ROC curve or not a number */ public double areaUnderROC(int classIndex) { return m_delegate.areaUnderROC(classIndex); } /** * Calculates the weighted (by class size) AUC. * * @return the weighted AUC. */ public double weightedAreaUnderROC() { return m_delegate.weightedAreaUnderROC(); } /** * Returns the area under precision-recall curve (AUPRC) for those predictions * that have been collected in the evaluateClassifier(Classifier, Instances) * method. Returns Utils.missingValue() if the area is not available. * * @param classIndex the index of the class to consider as "positive" * @return the area under the precision-recall curve or not a number */ public double areaUnderPRC(int classIndex) { return m_delegate.areaUnderPRC(classIndex); } /** * Calculates the weighted (by class size) AUPRC. * * @return the weighted AUPRC. */ public double weightedAreaUnderPRC() { return m_delegate.weightedAreaUnderPRC(); } /** * Returns a copy of the confusion matrix. * * @return a copy of the confusion matrix as a two-dimensional array */ public double[][] confusionMatrix() { return m_delegate.confusionMatrix(); } /** * Performs a (stratified if class is nominal) cross-validation for a * classifier on a set of instances. Now performs a deep copy of the * classifier before each call to buildClassifier() (just in case the * classifier is not initialized properly). * * @param classifier the classifier with any options set. * @param data the data on which the cross-validation is to be performed * @param numFolds the number of folds for the cross-validation * @param random random number generator for randomization * @param forPredictionsPrinting varargs parameter that, if supplied, is * expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput * object * @throws Exception if a classifier could not be generated successfully or * the class is not defined */ public void crossValidateModel(Classifier classifier, Instances data, int numFolds, Random random, Object... forPredictionsPrinting) throws Exception { m_delegate.crossValidateModel(classifier, data, numFolds, random, forPredictionsPrinting); } /** * Performs a (stratified if class is nominal) cross-validation for a * classifier on a set of instances. * * @param classifierString a string naming the class of the classifier * @param data the data on which the cross-validation is to be performed * @param numFolds the number of folds for the cross-validation * @param options the options to the classifier. Any options * @param random the random number generator for randomizing the data accepted * by the classifier will be removed from this array. * @throws Exception if a classifier could not be generated successfully or * the class is not defined */ public void crossValidateModel(String classifierString, Instances data, int numFolds, String[] options, Random random) throws Exception { m_delegate.crossValidateModel(classifierString, data, numFolds, options, random); } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * * Valid options are: * <p/> * * -t filename <br/> * Name of the file with the training data. (required) * <p/> * * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with * ".xml",a PMML file is loaded or, if that fails, options are loaded from * XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs detailed information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are * desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the * extensions, e.g., '.arff' for ARFF format or '.csv' for CSV. * <p/> * * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first * label) * <p/> * * @param classifierString class of machine learning classifier as a string * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateModel(String classifierString, String[] options) throws Exception { return weka.classifiers.evaluation.Evaluation.evaluateModel( classifierString, options); } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * * Valid options are: * <p/> * * -t name of training file <br/> * Name of the file with the training data. (required) * <p/> * * -T name of test file <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c class index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number of folds <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m file with cost matrix <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with * ".xml",a PMML file is loaded or, if that fails, options are loaded from * XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs detailed information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are * desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * @param classifier machine learning classifier * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateModel(Classifier classifier, String[] options) throws Exception { return weka.classifiers.evaluation.Evaluation.evaluateModel(classifier, options); } /** * Evaluates the classifier on a given set of instances. Note that the data * must have exactly the same format (e.g. order of attributes) as the data * used to train the classifier! Otherwise the results will generally be * meaningless. * * @param classifier machine learning classifier * @param data set of test instances for evaluation * @param forPredictionsPrinting varargs parameter that, if supplied, is * expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput * object * @return the predictions * @throws Exception if model could not be evaluated successfully */ public double[] evaluateModel(Classifier classifier, Instances data, Object... forPredictionsPrinting) throws Exception { return m_delegate.evaluateModel(classifier, data, forPredictionsPrinting); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @param storePredictions whether to store predictions for nominal classifier * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluationForSingleInstance(double[] dist, Instance instance, boolean storePredictions) throws Exception { return m_delegate.evaluationForSingleInstance(dist, instance, storePredictions); } /** * Evaluates the classifier on a single instance and records the prediction. * * @param classifier machine learning classifier * @param instance the test instance to be classified * @return the prediction made by the clasifier * @throws Exception if model could not be evaluated successfully or the data * contains string attributes */ public double evaluateModelOnceAndRecordPrediction(Classifier classifier, Instance instance) throws Exception { return m_delegate .evaluateModelOnceAndRecordPrediction(classifier, instance); } /** * Evaluates the classifier on a single instance. * * @param classifier machine learning classifier * @param instance the test instance to be classified * @return the prediction made by the clasifier * @throws Exception if model could not be evaluated successfully or the data * contains string attributes */ public double evaluateModelOnce(Classifier classifier, Instance instance) throws Exception { return m_delegate.evaluateModelOnce(classifier, instance); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluateModelOnce(double[] dist, Instance instance) throws Exception { return m_delegate.evaluateModelOnce(dist, instance); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluateModelOnceAndRecordPrediction(double[] dist, Instance instance) throws Exception { return m_delegate.evaluateModelOnceAndRecordPrediction(dist, instance); } /** * Evaluates the supplied prediction on a single instance. * * @param prediction the supplied prediction * @param instance the test instance to be classified * @throws Exception if model could not be evaluated successfully */ public void evaluateModelOnce(double prediction, Instance instance) throws Exception { m_delegate.evaluateModelOnce(prediction, instance); } /** * Returns the predictions that have been collected. * * @return a reference to the FastVector containing the predictions that have * been collected. This should be null if no predictions have been * collected. */ public FastVector predictions() { return m_delegate.predictions(); } /** * Wraps a static classifier in enough source to test using the weka class * libraries. * * @param classifier a Sourcable Classifier * @param className the name to give to the source code class * @return the source for a static classifier that can be tested with weka * libraries. * @throws Exception if code-generation fails */ public static String wekaStaticWrapper(Sourcable classifier, String className) throws Exception { return weka.classifiers.evaluation.Evaluation.wekaStaticWrapper(classifier, className); } /** * Gets the number of test instances that had a known class value (actually * the sum of the weights of test instances with known class value). * * @return the number of test instances with known class */ public final double numInstances() { return m_delegate.numInstances(); } /** * Gets the coverage of the test cases by the predicted regions at the * confidence level specified when evaluation was performed. * * @return the coverage of the test cases by the predicted regions */ public final double coverageOfTestCasesByPredictedRegions() { return m_delegate.coverageOfTestCasesByPredictedRegions(); } /** * Gets the average size of the predicted regions, relative to the range of * the target in the training data, at the confidence level specified when * evaluation was performed. * * @return the average size of the predicted regions */ public final double sizeOfPredictedRegions() { return m_delegate.sizeOfPredictedRegions(); } /** * Gets the number of instances incorrectly classified (that is, for which an * incorrect prediction was made). (Actually the sum of the weights of these * instances) * * @return the number of incorrectly classified instances */ public final double incorrect() { return m_delegate.incorrect(); } /** * Gets the percentage of instances incorrectly classified (that is, for which * an incorrect prediction was made). * * @return the percent of incorrectly classified instances (between 0 and 100) */ public final double pctIncorrect() { return m_delegate.pctIncorrect(); } /** * Gets the total cost, that is, the cost of each prediction times the weight * of the instance, summed over all instances. * * @return the total cost */ public final double totalCost() { return m_delegate.totalCost(); } /** * Gets the average cost, that is, total cost of misclassifications (incorrect * plus unclassified) over the total number of instances. * * @return the average cost. */ public final double avgCost() { return m_delegate.avgCost(); } /** * Gets the number of instances correctly classified (that is, for which a * correct prediction was made). (Actually the sum of the weights of these * instances) * * @return the number of correctly classified instances */ public final double correct() { return m_delegate.correct(); } /** * Gets the percentage of instances correctly classified (that is, for which a * correct prediction was made). * * @return the percent of correctly classified instances (between 0 and 100) */ public final double pctCorrect() { return m_delegate.pctCorrect(); } /** * Gets the number of instances not classified (that is, for which no * prediction was made by the classifier). (Actually the sum of the weights of * these instances) * * @return the number of unclassified instances */ public final double unclassified() { return m_delegate.unclassified(); } /** * Gets the percentage of instances not classified (that is, for which no * prediction was made by the classifier). * * @return the percent of unclassified instances (between 0 and 100) */ public final double pctUnclassified() { return m_delegate.pctUnclassified(); } /** * Returns the estimated error rate or the root mean squared error (if the * class is numeric). If a cost matrix was given this error rate gives the * average cost. * * @return the estimated error rate (between 0 and 1, or between 0 and maximum * cost) */ public final double errorRate() { return m_delegate.errorRate(); } /** * Returns value of kappa statistic if class is nominal. * * @return the value of the kappa statistic */ public final double kappa() { return m_delegate.kappa(); } @Override public String getRevision() { return m_delegate.getRevision(); } /** * Returns the correlation coefficient if the class is numeric. * * @return the correlation coefficient * @throws Exception if class is not numeric */ public final double correlationCoefficient() throws Exception { return m_delegate.correlationCoefficient(); } /** * Returns the mean absolute error. Refers to the error of the predicted * values for numeric classes, and the error of the predicted probability * distribution for nominal classes. * * @return the mean absolute error */ public final double meanAbsoluteError() { return m_delegate.meanAbsoluteError(); } /** * Returns the mean absolute error of the prior. * * @return the mean absolute error */ public final double meanPriorAbsoluteError() { return m_delegate.meanPriorAbsoluteError(); } /** * Returns the relative absolute error. * * @return the relative absolute error * @throws Exception if it can't be computed */ public final double relativeAbsoluteError() throws Exception { return m_delegate.relativeAbsoluteError(); } /** * Returns the root mean squared error. * * @return the root mean squared error */ public final double rootMeanSquaredError() { return m_delegate.rootMeanSquaredError(); } /** * Returns the root mean prior squared error. * * @return the root mean prior squared error */ public final double rootMeanPriorSquaredError() { return m_delegate.rootMeanPriorSquaredError(); } /** * Returns the root relative squared error if the class is numeric. * * @return the root relative squared error */ public final double rootRelativeSquaredError() { return m_delegate.rootRelativeSquaredError(); } /** * Calculate the entropy of the prior distribution. * * @return the entropy of the prior distribution * @throws Exception if the class is not nominal */ public final double priorEntropy() throws Exception { return m_delegate.priorEntropy(); } /** * Return the total Kononenko & Bratko Information score in bits. * * @return the K&B information score * @throws Exception if the class is not nominal */ public final double KBInformation() throws Exception { return m_delegate.KBInformation(); } /** * Return the Kononenko & Bratko Information score in bits per instance. * * @return the K&B information score * @throws Exception if the class is not nominal */ public final double KBMeanInformation() throws Exception { return m_delegate.KBMeanInformation(); } /** * Return the Kononenko & Bratko Relative Information score. * * @return the K&B relative information score * @throws Exception if the class is not nominal */ public final double KBRelativeInformation() throws Exception { return m_delegate.KBRelativeInformation(); } /** * Returns the total entropy for the null model. * * @return the total null model entropy */ public final double SFPriorEntropy() { return m_delegate.SFPriorEntropy(); } /** * Returns the entropy per instance for the null model. * * @return the null model entropy per instance */ public final double SFMeanPriorEntropy() { return m_delegate.SFMeanPriorEntropy(); } /** * Returns the total entropy for the scheme. * * @return the total scheme entropy */ public final double SFSchemeEntropy() { return m_delegate.SFSchemeEntropy(); } /** * Returns the entropy per instance for the scheme. * * @return the scheme entropy per instance */ public final double SFMeanSchemeEntropy() { return m_delegate.SFMeanSchemeEntropy(); } /** * Returns the total SF, which is the null model entropy minus the scheme * entropy. * * @return the total SF */ public final double SFEntropyGain() { return m_delegate.SFEntropyGain(); } /** * Returns the SF per instance, which is the null model entropy minus the * scheme entropy, per instance. * * @return the SF per instance */ public final double SFMeanEntropyGain() { return m_delegate.SFMeanEntropyGain(); } /** * Output the cumulative margin distribution as a string suitable for input * for gnuplot or similar package. * * @return the cumulative margin distribution * @throws Exception if the class attribute is nominal */ public String toCumulativeMarginDistributionString() throws Exception { return m_delegate.toCumulativeMarginDistributionString(); } /** * Calls toSummaryString() with no title and no complexity stats. * * @return a summary description of the classifier evaluation */ @Override public String toSummaryString() { return m_delegate.toSummaryString(); } /** * Calls toSummaryString() with a default title. * * @param printComplexityStatistics if true, complexity statistics are * returned as well * @return the summary string */ public String toSummaryString(boolean printComplexityStatistics) { return m_delegate.toSummaryString(printComplexityStatistics); } /** * Outputs the performance statistics in summary form. Lists number (and * percentage) of instances classified correctly, incorrectly and * unclassified. Outputs the total number of instances classified, and the * number of instances (if any) that had no class value provided. * * @param title the title for the statistics * @param printComplexityStatistics if true, complexity statistics are * returned as well * @return the summary as a String */ public String toSummaryString(String title, boolean printComplexityStatistics) { return m_delegate.toSummaryString(title, printComplexityStatistics); } /** * Calls toMatrixString() with a default title. * * @return the confusion matrix as a string * @throws Exception if the class is numeric */ public String toMatrixString() throws Exception { return m_delegate.toMatrixString(); } /** * Outputs the performance statistics as a classification confusion matrix. * For each class value, shows the distribution of predicted class values. * * @param title the title for the confusion matrix * @return the confusion matrix as a String * @throws Exception if the class is numeric */ public String toMatrixString(String title) throws Exception { return m_delegate.toMatrixString(title); } /** * Generates a breakdown of the accuracy for each class (with default title), * incorporating various information-retrieval statistics, such as true/false * positive rate, precision/recall/F-Measure. Should be useful for ROC curves, * recall/precision curves. * * @return the statistics presented as a string * @throws Exception if class is not nominal */ public String toClassDetailsString() throws Exception { return m_delegate.toClassDetailsString(); } /** * Generates a breakdown of the accuracy for each class, incorporating various * information-retrieval statistics, such as true/false positive rate, * precision/recall/F-Measure. Should be useful for ROC curves, * recall/precision curves. * * @param title the title to prepend the stats string with * @return the statistics presented as a string * @throws Exception if class is not nominal */ public String toClassDetailsString(String title) throws Exception { return m_delegate.toClassDetailsString(title); } /** * Calculate the number of true positives with respect to a particular class. * This is defined as * <p/> * * <pre> * correctly classified positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double numTruePositives(int classIndex) { return m_delegate.numTruePositives(classIndex); } /** * Calculate the true positive rate with respect to a particular class. This * is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double truePositiveRate(int classIndex) { return m_delegate.truePositiveRate(classIndex); } /** * Calculates the weighted (by class size) true positive rate. * * @return the weighted true positive rate. */ public double weightedTruePositiveRate() { return m_delegate.weightedTruePositiveRate(); } /** * Calculate the number of true negatives with respect to a particular class. * This is defined as * <p/> * * <pre> * correctly classified negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double numTrueNegatives(int classIndex) { return m_delegate.numTrueNegatives(classIndex); } /** * Calculate the true negative rate with respect to a particular class. This * is defined as * <p/> * * <pre> * correctly classified negatives * ------------------------------ * total negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double trueNegativeRate(int classIndex) { return m_delegate.trueNegativeRate(classIndex); } /** * Calculates the weighted (by class size) true negative rate. * * @return the weighted true negative rate. */ public double weightedTrueNegativeRate() { return m_delegate.weightedTrueNegativeRate(); } /** * Calculate number of false positives with respect to a particular class. * This is defined as * <p/> * * <pre> * incorrectly classified negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double numFalsePositives(int classIndex) { return m_delegate.numFalsePositives(classIndex); } /** * Calculate the false positive rate with respect to a particular class. This * is defined as * <p/> * * <pre> * incorrectly classified negatives * -------------------------------- * total negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double falsePositiveRate(int classIndex) { return m_delegate.falsePositiveRate(classIndex); } /** * Calculates the weighted (by class size) false positive rate. * * @return the weighted false positive rate. */ public double weightedFalsePositiveRate() { return m_delegate.weightedFalsePositiveRate(); } /** * Calculate number of false negatives with respect to a particular class. * This is defined as * <p/> * * <pre> * incorrectly classified positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double numFalseNegatives(int classIndex) { return m_delegate.numFalseNegatives(classIndex); } /** * Calculate the false negative rate with respect to a particular class. This * is defined as * <p/> * * <pre> * incorrectly classified positives * -------------------------------- * total positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double falseNegativeRate(int classIndex) { return m_delegate.falseNegativeRate(classIndex); } /** * Calculates the weighted (by class size) false negative rate. * * @return the weighted false negative rate. */ public double weightedFalseNegativeRate() { return m_delegate.weightedFalseNegativeRate(); } /** * Calculates the matthews correlation coefficient (sometimes called phi * coefficient) for the supplied class * * @param classIndex the index of the class to compute the matthews * correlation coefficient for * * @return the mathews correlation coefficient */ public double matthewsCorrelationCoefficient(int classIndex) { return m_delegate.matthewsCorrelationCoefficient(classIndex); } /** * Calculates the weighted (by class size) matthews correlation coefficient. * * @return the weighted matthews correlation coefficient. */ public double weightedMatthewsCorrelation() { return m_delegate.weightedMatthewsCorrelation(); } /** * Calculate the recall with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * <p/> * (Which is also the same as the truePositiveRate.) * * @param classIndex the index of the class to consider as "positive" * @return the recall */ public double recall(int classIndex) { return m_delegate.recall(classIndex); } /** * Calculates the weighted (by class size) recall. * * @return the weighted recall. */ public double weightedRecall() { return m_delegate.weightedRecall(); } /** * Calculate the precision with respect to a particular class. This is defined * as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the precision */ public double precision(int classIndex) { return m_delegate.precision(classIndex); } /** * Calculates the weighted (by class size) precision. * * @return the weighted precision. */ public double weightedPrecision() { return m_delegate.weightedPrecision(); } /** * Calculate the F-Measure with respect to a particular class. This is defined * as * <p/> * * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the F-Measure */ public double fMeasure(int classIndex) { return m_delegate.fMeasure(classIndex); } /** * Calculates the macro weighted (by class size) average F-Measure. * * @return the weighted F-Measure. */ public double weightedFMeasure() { return m_delegate.weightedFMeasure(); } /** * Unweighted macro-averaged F-measure. If some classes not present in the * test set, they're just skipped (since recall is undefined there anyway) . * * @return unweighted macro-averaged F-measure. * */ public double unweightedMacroFmeasure() { return m_delegate.unweightedMacroFmeasure(); } /** * Unweighted micro-averaged F-measure. If some classes not present in the * test set, they have no effect. * * Note: if the test set is *single-label*, then this is the same as accuracy. * * @return unweighted micro-averaged F-measure. */ public double unweightedMicroFmeasure() { return m_delegate.unweightedMicroFmeasure(); } /** * Sets the class prior probabilities. * * @param train the training instances used to determine the prior * probabilities * @throws Exception if the class attribute of the instances is not set */ public void setPriors(Instances train) throws Exception { m_delegate.setPriors(train); } /** * Get the current weighted class counts. * * @return the weighted class counts */ public double[] getClassPriors() { return m_delegate.getClassPriors(); } /** * Updates the class prior probabilities or the mean respectively (when * incrementally training). * * @param instance the new training instance seen * @throws Exception if the class of the instance is not set */ public void updatePriors(Instance instance) throws Exception { m_delegate.updatePriors(instance); } /** * disables the use of priors, e.g., in case of de-serialized schemes that * have no access to the original training set, but are evaluated on a set * set. */ public void useNoPriors() { m_delegate.useNoPriors(); } /** * Tests whether the current evaluation object is equal to another evaluation * object. * * @param obj the object to compare against * @return true if the two objects are equal */ @Override public boolean equals(Object obj) { if (obj instanceof weka.classifiers.Evaluation) { obj = ((weka.classifiers.Evaluation) obj).m_delegate; } return m_delegate.equals(obj); } /** * A test method for this class. Just extracts the first command line argument * as a classifier class name and calls evaluateModel. * * @param args an array of command line arguments, the first of which must be * the class name of a classifier. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the class name" + " of a classifier"); } String classifier = args[0]; args[0] = ""; System.out.println(evaluateModel(classifier, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } } }
47,308
29.268074
83
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/IntervalEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IntervalEstimator.java * Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instance; /** * Interface for numeric prediction schemes that can output prediction * intervals. * * @author Kurt Driessens (kurtd@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface IntervalEstimator { /** * Returns an N * 2 array, where N is the number of prediction * intervals. In each row, the first element contains the lower * boundary of the corresponding prediction interval and the second * element the upper boundary. * * @param inst the instance to make the prediction for. * @param confidenceLevel the percentage of cases that the interval should cover. * @return an array of prediction intervals * @exception Exception if the intervals can't be computed */ double[][] predictIntervals(Instance inst, double confidenceLevel) throws Exception; }
1,649
32.673469
86
java