code
stringlengths
3
1.18M
language
stringclasses
1 value
/** * [CMM_GTAnalysis.java] * * CMM: Ground truth analysis * * Reference: Kremer et al., "An Effective Evaluation Measure for Clustering on Evolving Data Streams", KDD, 2011 * * @author Timm jansen * Data Management and Data Exploration Group, RWTH Aachen University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ /* * TODO: * - try to avoid calcualting the radius multiple times * - avoid the full distance map? * - knn functionality in clusters * - noise error */ package moa.evaluation; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import moa.cluster.Clustering; import moa.core.AutoExpandVector; import moa.gui.visualization.DataPoint; import weka.core.Instance; public class CMM_GTAnalysis{ /** * the given ground truth clustering */ private Clustering gtClustering; /** * list of given points within the horizon */ private ArrayList<CMMPoint> cmmpoints; /** * the newly calculate ground truth clustering */ private ArrayList<GTCluster> gt0Clusters; /** * IDs of noise points */ private ArrayList<Integer> noise; /** * total number of points */ private int numPoints; /** * number of clusters of the original ground truth */ private int numGTClusters; /** * number of classes of the original ground truth, in case of a * micro clustering ground truth this differs from numGTClusters */ private int numGTClasses; /** * number of classes after we are done with the analysis */ private int numGT0Classes; /** * number of dimensions */ private int numDims; /** * mapping between true cluster ID/class label of the original ground truth * and the internal cluster ID/working class label. * * different original cluster IDs might map to the same new cluster ID due to merging of two clusters */ private HashMap<Integer, Integer> mapTrueLabelToWorkLabel; /** * log of how clusters have been merged (for debugging) */ private int[] mergeMap; /** * number of non-noise points that will create an error due to the underlying clustering model * (e.g. point being covered by two clusters representing different classes) */ private int noiseErrorByModel; /** * number of noise points that will create an error due to the underlying clustering model * (e.g. noise point being covered by a cluster) */ private int pointErrorByModel; /** * CMM debug mode */ private boolean debug = false; /******* CMM parameter ***********/ /** * defines how many nearest neighbors will be used */ private int knnNeighbourhood = 2; /** * the threshold which defines when ground truth clusters will be merged. * set to 1 to disable merging */ private double tauConnection = 0.5; /** * experimental (default: disabled) * separate k for points to cluster and cluster to cluster */ private double clusterConnectionMaxPoints = knnNeighbourhood; /** * experimental (default: disabled) * use exponential connectivity function to model different behavior: * closer points will have a stronger connection compared to the linear function. * Use ConnRefXValue and ConnX to better parameterize lambda, which controls * the decay of the connectivity */ private boolean useExpConnectivity = false; private double lambdaConnRefXValue = 0.01; private double lambdaConnX = 4; private double lamdaConn; /******************************************/ /** * Wrapper class for data points to store CMM relevant attributes * */ protected class CMMPoint extends DataPoint{ /** * Reference to original point */ protected DataPoint p = null; /** * point ID */ protected int pID = 0; /** * true class label */ protected int trueClass = -1; /** * the connectivity of the point to its cluster */ protected double connectivity = 1.0; /** * knn distnace within own cluster */ protected double knnInCluster = 0.0; /** * knn indices (for debugging only) */ protected ArrayList<Integer> knnIndices; public CMMPoint(DataPoint point, int id) { //make a copy, but keep reference super(point,point.getTimestamp()); p = point; pID = id; trueClass = (int)point.classValue(); } /** * Retruns the current working label of the cluster the point belongs to. * The label can change due to merging of clusters. * * @return the current working class label */ protected int workclass(){ if(trueClass == -1 ) return -1; else return mapTrueLabelToWorkLabel.get(trueClass); } } /** * Main class to model the new clusters that will be the output of the cluster analysis * */ protected class GTCluster{ /** points that are per definition in the cluster */ private ArrayList<Integer> points = new ArrayList<Integer>(); /** a new GT cluster consists of one or more "old" GT clusters. * Connected/overlapping clusters cannot be merged directly because of the * underlying cluster model. E.g. for merging two spherical clusters the new * cluster sphere can cover a lot more space then two separate smaller spheres. * To keep the original coverage we need to keep the orignal clusters and merge * them on an abstract level. */ private ArrayList<Integer> clusterRepresentations = new ArrayList<Integer>(); /** current work class (changes when merging) */ private int workclass; /** original work class */ private final int orgWorkClass; /** original class label*/ private final int label; /** clusters that have been merged into this cluster (debugging)*/ private ArrayList<Integer> mergedWorkLabels = null; /** average knn distance of all points in the cluster*/ private double knnMeanAvg = 0; /** average deviation of knn distance of all points*/ private double knnDevAvg = 0; /** connectivity of the cluster to all other clusters */ private ArrayList<Double> connections = new ArrayList<Double>(); private GTCluster(int workclass, int label, int gtClusteringID) { this.orgWorkClass = workclass; this.workclass = workclass; this.label = label; this.clusterRepresentations.add(gtClusteringID); } /** * The original class label the cluster represents * @return original class label */ protected int getLabel(){ return label; } /** * Calculate the probability of the point being covered through the cluster * @param point to calculate the probability for * @return probability of the point being covered through the cluster */ protected double getInclusionProbability(CMMPoint point){ double prob = Double.MIN_VALUE; //check all cluster representatives for coverage for (int c = 0; c < clusterRepresentations.size(); c++) { double tmp_prob = gtClustering.get(clusterRepresentations.get(c)).getInclusionProbability(point); if(tmp_prob > prob) prob = tmp_prob; } return prob; } /** * calculate knn distances of points within own cluster * + average knn distance and average knn distance deviation of all points */ private void calculateKnn(){ for (int p0 : points) { CMMPoint cmdp = cmmpoints.get(p0); if(!cmdp.isNoise()){ AutoExpandVector<Double> knnDist = new AutoExpandVector<Double>(); AutoExpandVector<Integer> knnPointIndex = new AutoExpandVector<Integer>(); //calculate nearest neighbours getKnnInCluster(cmdp, knnNeighbourhood, points, knnDist,knnPointIndex); //TODO: What to do if we have less then k neighbours? double avgKnn = 0; for (int i = 0; i < knnDist.size(); i++) { avgKnn+= knnDist.get(i); } if(knnDist.size()!=0) avgKnn/=knnDist.size(); cmdp.knnInCluster = avgKnn; cmdp.knnIndices = knnPointIndex; cmdp.p.setMeasureValue("knnAvg", cmdp.knnInCluster); knnMeanAvg+=avgKnn; knnDevAvg+=Math.pow(avgKnn,2); } } knnMeanAvg=knnMeanAvg/(double)points.size(); knnDevAvg=knnDevAvg/(double)points.size(); double variance = knnDevAvg-Math.pow(knnMeanAvg,2.0); // Due to numerical errors, small negative values can occur. if (variance <= 0.0) variance = 1e-50; knnDevAvg = Math.sqrt(variance); } /** * Calculate the connection of a cluster to this cluster * @param otherCid cluster id of the other cluster * @param initial flag for initial run */ private void calculateClusterConnection(int otherCid, boolean initial){ double avgConnection = 0; if(workclass==otherCid){ avgConnection = 1; } else{ AutoExpandVector<Double> kmax = new AutoExpandVector<Double>(); AutoExpandVector<Integer> kmaxIndexes = new AutoExpandVector<Integer>(); for(int p : points){ CMMPoint cmdp = cmmpoints.get(p); double con_p_Cj = getConnectionValue(cmmpoints.get(p), otherCid); double connection = cmdp.connectivity * con_p_Cj; if(initial){ cmdp.p.setMeasureValue("Connection to C"+otherCid, con_p_Cj); } //connection if(kmax.size() < clusterConnectionMaxPoints || connection > kmax.get(kmax.size()-1)){ int index = 0; while(index < kmax.size() && connection < kmax.get(index)) { index++; } kmax.add(index, connection); kmaxIndexes.add(index, p); if(kmax.size() > clusterConnectionMaxPoints){ kmax.remove(kmax.size()-1); kmaxIndexes.add(kmaxIndexes.size()-1); } } } //connection for (int k = 0; k < kmax.size(); k++) { avgConnection+= kmax.get(k); } avgConnection/=kmax.size(); } if(otherCid<connections.size()){ connections.set(otherCid, avgConnection); } else if(connections.size() == otherCid){ connections.add(avgConnection); } else System.out.println("Something is going really wrong with the connection listing!"+knnNeighbourhood+" "+tauConnection); } /** * Merge a cluster into this cluster * @param mergeID the ID of the cluster to be merged */ private void mergeCluster(int mergeID){ if(mergeID < gt0Clusters.size()){ //track merging (debugging) for (int i = 0; i < numGTClasses; i++) { if(mergeMap[i]==mergeID) mergeMap[i]=workclass; if(mergeMap[i]>mergeID) mergeMap[i]--; } GTCluster gtcMerge = gt0Clusters.get(mergeID); if(debug) System.out.println("Merging C"+gtcMerge.workclass+" into C"+workclass+ " with Con "+connections.get(mergeID)+" / "+gtcMerge.connections.get(workclass)); //update mapTrueLabelToWorkLabel mapTrueLabelToWorkLabel.put(gtcMerge.label, workclass); Iterator iterator = mapTrueLabelToWorkLabel.keySet().iterator(); while (iterator.hasNext()) { Integer key = (Integer)iterator.next(); //update pointer of already merged cluster int value = mapTrueLabelToWorkLabel.get(key); if(value == mergeID) mapTrueLabelToWorkLabel.put(key, workclass); if(value > mergeID) mapTrueLabelToWorkLabel.put(key, value-1); } //merge points from B into A points.addAll(gtcMerge.points); clusterRepresentations.addAll(gtcMerge.clusterRepresentations); if(mergedWorkLabels==null){ mergedWorkLabels = new ArrayList<Integer>(); } mergedWorkLabels.add(gtcMerge.orgWorkClass); if(gtcMerge.mergedWorkLabels!=null) mergedWorkLabels.addAll(gtcMerge.mergedWorkLabels); gt0Clusters.remove(mergeID); //update workclass labels for(int c=mergeID; c < gt0Clusters.size(); c++){ gt0Clusters.get(c).workclass = c; } //update knn distances calculateKnn(); for(int c=0; c < gt0Clusters.size(); c++){ gt0Clusters.get(c).connections.remove(mergeID); //recalculate connection from other clusters to the new merged one gt0Clusters.get(c).calculateClusterConnection(workclass,false); //and from new merged one to other clusters gt0Clusters.get(workclass).calculateClusterConnection(c,false); } } else{ System.out.println("Merge indices are not valid"); } } } /** * @param trueClustering the ground truth clustering * @param points data points * @param enableClassMerge allow class merging (should be set to true on default) */ public CMM_GTAnalysis(Clustering trueClustering, ArrayList<DataPoint> points, boolean enableClassMerge){ if(debug) System.out.println("GT Analysis Debug Output"); noiseErrorByModel = 0; pointErrorByModel = 0; if(!enableClassMerge){ tauConnection = 1.0; } lamdaConn = -Math.log(lambdaConnRefXValue)/Math.log(2)/lambdaConnX; this.gtClustering = trueClustering; numPoints = points.size(); numDims = points.get(0).numAttributes()-1; numGTClusters = gtClustering.size(); //init mappings between work and true labels mapTrueLabelToWorkLabel = new HashMap<Integer, Integer>(); //set up base of new clustering gt0Clusters = new ArrayList<GTCluster>(); int numWorkClasses = 0; //create label to worklabel mapping as real labels can be just a set of unordered integers for (int i = 0; i < numGTClusters; i++) { int label = (int)gtClustering.get(i).getGroundTruth(); if(!mapTrueLabelToWorkLabel.containsKey(label)){ gt0Clusters.add(new GTCluster(numWorkClasses,label,i)); mapTrueLabelToWorkLabel.put(label,numWorkClasses); numWorkClasses++; } else{ gt0Clusters.get(mapTrueLabelToWorkLabel.get(label)).clusterRepresentations.add(i); } } numGTClasses = numWorkClasses; mergeMap = new int[numGTClasses]; for (int i = 0; i < numGTClasses; i++) { mergeMap[i]=i; } //create cmd point wrapper instances cmmpoints = new ArrayList<CMMPoint>(); for (int p = 0; p < points.size(); p++) { CMMPoint cmdp = new CMMPoint(points.get(p), p); cmmpoints.add(cmdp); } //split points up into their GTClusters and Noise (according to class labels) noise = new ArrayList<Integer>(); for (int p = 0; p < numPoints; p++) { if(cmmpoints.get(p).isNoise()){ noise.add(p); } else{ gt0Clusters.get(cmmpoints.get(p).workclass()).points.add(p); } } //calculate initial knnMean and knnDev for (GTCluster gtc : gt0Clusters) { gtc.calculateKnn(); } //calculate cluster connections calculateGTClusterConnections(); //calculate point connections with own clusters calculateGTPointQualities(); if(debug) System.out.println("GT Analysis Debug End"); } /** * Calculate the connection of a point to a cluster * * @param cmmp the point to calculate the connection for * @param clusterID the corresponding cluster * @return the connection value */ //TODO: Cache the connection value for a point to the different clusters??? protected double getConnectionValue(CMMPoint cmmp, int clusterID){ AutoExpandVector<Double> knnDist = new AutoExpandVector<Double>(); AutoExpandVector<Integer> knnPointIndex = new AutoExpandVector<Integer>(); //calculate the knn distance of the point to the cluster getKnnInCluster(cmmp, knnNeighbourhood, gt0Clusters.get(clusterID).points, knnDist, knnPointIndex); //TODO: What to do if we have less then k neighbors? double avgDist = 0; for (int i = 0; i < knnDist.size(); i++) { avgDist+= knnDist.get(i); } //what to do if we only have a single point??? if(knnDist.size()!=0) avgDist/=knnDist.size(); else return 0; //get the upper knn distance of the cluster double upperKnn = gt0Clusters.get(clusterID).knnMeanAvg + gt0Clusters.get(clusterID).knnDevAvg; /* calculate the connectivity based on knn distance of the point within the cluster and the upper knn distance of the cluster*/ if(avgDist < upperKnn){ return 1; } else{ //value that should be reached at upperKnn distance //Choose connection formula double conn; if(useExpConnectivity) conn = Math.pow(2,-lamdaConn*(avgDist-upperKnn)/upperKnn); else conn = upperKnn/avgDist; if(Double.isNaN(conn)) System.out.println("Connectivity NaN at "+cmmp.p.getTimestamp()); return conn; } } /** * @param cmmp point to calculate knn distance for * @param k number of nearest neighbors to look for * @param pointIDs list of point IDs to check * @param knnDist sorted list of smallest knn distances (can already be filled to make updates possible) * @param knnPointIndex list of corresponding knn indices */ private void getKnnInCluster(CMMPoint cmmp, int k, ArrayList<Integer> pointIDs, AutoExpandVector<Double> knnDist, AutoExpandVector<Integer> knnPointIndex) { //iterate over every point in the choosen cluster, cal distance and insert into list for (int p1 = 0; p1 < pointIDs.size(); p1++) { int pid = pointIDs.get(p1); if(cmmp.pID == pid) continue; double dist = distance(cmmp,cmmpoints.get(pid)); if(knnDist.size() < k || dist < knnDist.get(knnDist.size()-1)){ int index = 0; while(index < knnDist.size() && dist > knnDist.get(index)) { index++; } knnDist.add(index, dist); knnPointIndex.add(index,pid); if(knnDist.size() > k){ knnDist.remove(knnDist.size()-1); knnPointIndex.remove(knnPointIndex.size()-1); } } } } /** * calculate initial connectivities */ private void calculateGTPointQualities(){ for (int p = 0; p < numPoints; p++) { CMMPoint cmdp = cmmpoints.get(p); if(!cmdp.isNoise()){ cmdp.connectivity = getConnectionValue(cmdp, cmdp.workclass()); cmdp.p.setMeasureValue("Connectivity", cmdp.connectivity); } } } /** * Calculate connections between clusters and merge clusters accordingly as * long as connections exceed threshold */ private void calculateGTClusterConnections(){ for (int c0 = 0; c0 < gt0Clusters.size(); c0++) { for (int c1 = 0; c1 < gt0Clusters.size(); c1++) { gt0Clusters.get(c0).calculateClusterConnection(c1, true); } } boolean changedConnection = true; while(changedConnection){ if(debug){ System.out.println("Cluster Connection"); for (int c = 0; c < gt0Clusters.size(); c++) { System.out.print("C"+gt0Clusters.get(c).label+" --> "); for (int c1 = 0; c1 < gt0Clusters.get(c).connections.size(); c1++) { System.out.print(" C"+gt0Clusters.get(c1).label+": "+gt0Clusters.get(c).connections.get(c1)); } System.out.println(""); } System.out.println(""); } double max = 0; int maxIndexI = -1; int maxIndexJ = -1; changedConnection = false; for (int c0 = 0; c0 < gt0Clusters.size(); c0++) { for (int c1 = c0+1; c1 < gt0Clusters.size(); c1++) { if(c0==c1) continue; double min =Math.min(gt0Clusters.get(c0).connections.get(c1), gt0Clusters.get(c1).connections.get(c0)); if(min > max){ max = min; maxIndexI = c0; maxIndexJ = c1; } } } if(maxIndexI!=-1 && max > tauConnection){ gt0Clusters.get(maxIndexI).mergeCluster(maxIndexJ); if(debug) System.out.println("Merging "+maxIndexI+" and "+maxIndexJ+" because of connection "+max); changedConnection = true; } } numGT0Classes = gt0Clusters.size(); } /** * Calculates how well the original clusters are separable. * Small values indicate bad separability, values close to 1 indicate good separability * @return index of seperability */ public double getClassSeparability(){ // int totalConn = numGTClasses*(numGTClasses-1)/2; // int mergedConn = 0; // for(GTCluster gt : gt0Clusters){ // int merged = gt.clusterRepresentations.size(); // if(merged > 1) // mergedConn+=merged * (merged-1)/2; // } // if(totalConn == 0) // return 0; // else // return 1-mergedConn/(double)totalConn; return numGT0Classes/(double)numGTClasses; } /** * Calculates how well noise is separable from the given clusters * Small values indicate bad separability, values close to 1 indicate good separability * @return index of noise separability */ public double getNoiseSeparability(){ if(noise.isEmpty()) return 1; double connectivity = 0; for(int p : noise){ CMMPoint npoint = cmmpoints.get(p); double maxConnection = 0; //TODO: some kind of pruning possible. what about weighting? for (int c = 0; c < gt0Clusters.size(); c++) { double connection = getConnectionValue(npoint, c); if(connection > maxConnection) maxConnection = connection; } connectivity+=maxConnection; npoint.p.setMeasureValue("MaxConnection", maxConnection); } return 1-(connectivity / noise.size()); } /** * Calculates the relative number of errors being caused by the underlying cluster model * @return quality of the model */ public double getModelQuality(){ for(int p = 0; p < numPoints; p++){ CMMPoint cmdp = cmmpoints.get(p); for(int hc = 0; hc < numGTClusters;hc++){ if(gtClustering.get(hc).getGroundTruth() != cmdp.trueClass){ if(gtClustering.get(hc).getInclusionProbability(cmdp) >= 1){ if(!cmdp.isNoise()) pointErrorByModel++; else noiseErrorByModel++; break; } } } } if(debug) System.out.println("Error by model: noise "+noiseErrorByModel+" point "+pointErrorByModel); return 1-((pointErrorByModel + noiseErrorByModel)/(double) numPoints); } /** * Get CMM internal point * @param index of the point * @return cmm point */ protected CMMPoint getPoint(int index){ return cmmpoints.get(index); } /** * Return cluster * @param index of the cluster to return * @return cluster */ protected GTCluster getGT0Cluster(int index){ return gt0Clusters.get(index); } /** * Number of classes/clusters of the new clustering * @return number of new clusters */ protected int getNumberOfGT0Classes() { return numGT0Classes; } /** * Calculates Euclidian distance * @param inst1 point as double array * @param inst2 point as double array * @return euclidian distance */ private double distance(Instance inst1, Instance inst2){ return distance(inst1, inst2.toDoubleArray()); } /** * Calculates Euclidian distance * @param inst1 point as an instance * @param inst2 point as double array * @return euclidian distance */ private double distance(Instance inst1, double[] inst2){ double distance = 0.0; for (int i = 0; i < numDims; i++) { double d = inst1.value(i) - inst2[i]; distance += d * d; } return Math.sqrt(distance); } /** * String with main CMM parameters * @return main CMM parameter */ public String getParameterString(){ String para = ""; para+="k="+knnNeighbourhood+";"; if(useExpConnectivity){ para+="lambdaConnX="+lambdaConnX+";"; para+="lambdaConn="+lamdaConn+";"; para+="lambdaConnRef="+lambdaConnRefXValue+";"; } para+="m="+clusterConnectionMaxPoints+";"; para+="tauConn="+tauConnection+";"; return para; } }
Java
/* * SilhouetteCoefficient.java * Copyright (C) 2010 RWTH Aachen University, Germany * @author Jansen (moa@cs.rwth-aachen.de) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.evaluation; import java.util.ArrayList; import java.util.HashMap; import moa.cluster.Cluster; import moa.cluster.Clustering; import moa.gui.visualization.DataPoint; public class SilhouetteCoefficient extends MeasureCollection{ private double pointInclusionProbThreshold = 0.8; public SilhouetteCoefficient() { super(); } @Override protected boolean[] getDefaultEnabled() { boolean [] defaults = {false}; return defaults; } @Override public String[] getNames() { String[] names = {"SilhCoeff"}; return names; } public void evaluateClustering(Clustering clustering, Clustering trueClustering, ArrayList<DataPoint> points) { int numFCluster = clustering.size(); double [][] pointInclusionProbFC = new double[points.size()][numFCluster]; for (int p = 0; p < points.size(); p++) { DataPoint point = points.get(p); for (int fc = 0; fc < numFCluster; fc++) { Cluster cl = clustering.get(fc); pointInclusionProbFC[p][fc] = cl.getInclusionProbability(point); } } double silhCoeff = 0.0; int totalCount = 0; for (int p = 0; p < points.size(); p++) { DataPoint point = points.get(p); ArrayList<Integer> ownClusters = new ArrayList<Integer>(); for (int fc = 0; fc < numFCluster; fc++) { if(pointInclusionProbFC[p][fc] > pointInclusionProbThreshold){ ownClusters.add(fc); } } if(ownClusters.size() > 0){ double[] distanceByClusters = new double[numFCluster]; int[] countsByClusters = new int[numFCluster]; //calculate averageDistance of p to all cluster for (int p1 = 0; p1 < points.size(); p1++) { DataPoint point1 = points.get(p1); if(p1!= p && point1.classValue() != -1){ for (int fc = 0; fc < numFCluster; fc++) { if(pointInclusionProbFC[p1][fc] > pointInclusionProbThreshold){ double distance = distance(point, point1); distanceByClusters[fc]+=distance; countsByClusters[fc]++; } } } } //find closest OWN cluster as clusters might overlap double minAvgDistanceOwn = Double.MAX_VALUE; int minOwnIndex = -1; for (int fc : ownClusters) { double normDist = distanceByClusters[fc]/(double)countsByClusters[fc]; if(normDist < minAvgDistanceOwn){// && pointInclusionProbFC[p][fc] > pointInclusionProbThreshold){ minAvgDistanceOwn = normDist; minOwnIndex = fc; } } //find closest other (or other own) cluster double minAvgDistanceOther = Double.MAX_VALUE; for (int fc = 0; fc < numFCluster; fc++) { if(fc != minOwnIndex){ double normDist = distanceByClusters[fc]/(double)countsByClusters[fc]; if(normDist < minAvgDistanceOther){ minAvgDistanceOther = normDist; } } } double silhP = (minAvgDistanceOther-minAvgDistanceOwn)/Math.max(minAvgDistanceOther, minAvgDistanceOwn); point.setMeasureValue("SC - own", minAvgDistanceOwn); point.setMeasureValue("SC - other", minAvgDistanceOther); point.setMeasureValue("SC", silhP); silhCoeff+=silhP; totalCount++; //System.out.println(point.getTimestamp()+" Silh "+silhP+" / "+avgDistanceOwn+" "+minAvgDistanceOther+" (C"+minIndex+")"); } } if(totalCount>0) silhCoeff/=(double)totalCount; //normalize from -1, 1 to 0,1 silhCoeff = (silhCoeff+1)/2.0; addValue(0,silhCoeff); } private double distance(DataPoint inst1, DataPoint inst2){ double distance = 0.0; int numDims = inst1.numAttributes(); for (int i = 0; i < numDims; i++) { double d = inst1.value(i) - inst2.value(i); distance += d * d; } return Math.sqrt(distance); } }
Java
/* * BasicRegressionPerformanceEvaluator.java * Copyright (C) 2011 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.evaluation; import moa.AbstractMOAObject; import moa.core.Measurement; import weka.core.Instance; /** * Regression evaluator that performs basic incremental evaluation. * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class BasicRegressionPerformanceEvaluator extends AbstractMOAObject implements RegressionPerformanceEvaluator { private static final long serialVersionUID = 1L; protected double weightObserved; protected double squareError; protected double averageError; protected double sumTarget; protected double squareTargetError; protected double averageTargetError; @Override public void reset() { this.weightObserved = 0.0; this.squareError = 0.0; this.averageError = 0.0; this.sumTarget = 0.0; this.averageTargetError = 0.0; this.squareTargetError = 0.0; } @Override public void addResult(Instance inst, double[] prediction) { if (inst.weight() > 0.0) { if (prediction.length > 0) { double meanTarget = this.weightObserved != 0 ? this.sumTarget / this.weightObserved : 0.0; this.squareError += (inst.classValue() - prediction[0]) * (inst.classValue() - prediction[0]); this.averageError += Math.abs(inst.classValue() - prediction[0]); this.squareTargetError += (inst.classValue() - meanTarget) * (inst.classValue() - meanTarget); this.averageTargetError += Math.abs(inst.classValue() - meanTarget); this.sumTarget += inst.classValue(); this.weightObserved += inst.weight(); } } } @Override public Measurement[] getPerformanceMeasurements() { return new Measurement[]{ new Measurement("classified instances", getTotalWeightObserved()), new Measurement("mean absolute error", getMeanError()), new Measurement("root mean squared error", getSquareError()), new Measurement("relative mean absolute error", getRelativeMeanError()), new Measurement("relative root mean squared error", getRelativeSquareError()) }; } public double getTotalWeightObserved() { return this.weightObserved; } public double getMeanError() { return this.weightObserved > 0.0 ? this.averageError / this.weightObserved : 0.0; } public double getSquareError() { return Math.sqrt(this.weightObserved > 0.0 ? this.squareError / this.weightObserved : 0.0); } public double getTargetMeanError() { return this.weightObserved > 0.0 ? this.averageTargetError / this.weightObserved : 0.0; } public double getTargetSquareError() { return Math.sqrt(this.weightObserved > 0.0 ? this.squareTargetError / this.weightObserved : 0.0); } @Override public void getDescription(StringBuilder sb, int indent) { Measurement.getMeasurementsDescription(getPerformanceMeasurements(), sb, indent); } private double getRelativeMeanError() { //double targetMeanError = getTargetMeanError(); //return targetMeanError > 0 ? getMeanError()/targetMeanError : 0.0; return this.averageTargetError> 0 ? this.averageError/this.averageTargetError : 0.0; } private double getRelativeSquareError() { //double targetSquareError = getTargetSquareError(); //return targetSquareError > 0 ? getSquareError()/targetSquareError : 0.0; return Math.sqrt(this.squareTargetError> 0 ? this.squareError/this.squareTargetError : 0.0); } }
Java
/* * ClassificationPerformanceEvaluator.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * @author Jesse Read (jesse@tsc.uc3m.es) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.evaluation; import java.util.ArrayList; import java.util.HashMap; import moa.core.Measurement; import moa.core.utils.EvalUtils; import weka.core.Instance; /** * Multilabel Window Classification Performance Evaluator. * * @author Jesse Read (jesse@tsc.uc3m.es) * @version $Revision: 1 $ */ public class MultilabelWindowClassificationPerformanceEvaluator extends WindowClassificationPerformanceEvaluator { //ArrayList<Pair<double[],int[]>> result = new ArrayList<Pair<double[],int[]>>(); ArrayList<double[]> result_pred = new ArrayList<double[]>(); ArrayList<int[]> result_real = new ArrayList<int[]>(); // We have to keep track of Label Cardinality for thresholding. private double LC = -1.0; @Override public void reset() { result_pred = new ArrayList<double[]>(widthOption.getValue()); result_real = new ArrayList<int[]>(widthOption.getValue()); } @Override public void reset(int L) { numClasses = L; result_pred = new ArrayList<double[]>(widthOption.getValue()); result_real = new ArrayList<int[]>(widthOption.getValue()); } /** * Add a Result. NOTE: In theory, the size of y[] could change, although we * do not take into account this possibility *yet*. (for this, we would have * to use y[] differently, another format for y[] e.g. HashMap, or store * more info in x) */ @Override public void addResult(Instance x, double[] y) { if (y.length <= 2) { System.err.println("y.length too short (" + y.length + "). We've lost track of L at some point, unable to continue"); System.exit(1); } // add to the current evaluation window result_real.add(EvalUtils.toIntArray(x, y.length)); result_pred.add(y); } @Override public Measurement[] getPerformanceMeasurements() { // calculate threshold double t = 0.5; try { t = (LC > 0.0) ? EvalUtils.calibrateThreshold(result_pred, LC) : 0.5; } catch (Exception e) { System.err.println("Warning: failed to calibrate threshold, continuing with default: t = " + t); e.printStackTrace(); } // calculate performance HashMap<String, Double> result = EvalUtils.evaluateMultiLabel(result_pred, result_real, t); // gather measurements Measurement m[] = new Measurement[]{ new Measurement("Subset Accuracy", result.get("Accuracy")), new Measurement("Exact Match", result.get("Exact_match")), new Measurement("Hamming Accucaracy", result.get("H_acc")), new Measurement("Log Loss_D", result.get("LogLossD")), //new Measurement( "F1-macro_L", result.get("F1_macro_L")), //new Measurement( "LCard_real", result.get("LCard_real")), //new Measurement( "LCard_pred", result.get("LCard_pred")), new Measurement("Threshold", result.get("Threshold")), //new Measurement( "L", result.get("L")), //new Measurement( "N", result.get("N")), }; // save label cardinality for the next window LC = result.get("LCard_real"); // reset reset(); return m; } @Override public void getDescription(StringBuilder sb, int indent) { sb.append("Multi-label Window Classification Performance Evaluator"); } }
Java
/* * StatisticalCollection.java * Copyright (C) 2010 RWTH Aachen University, Germany * @author Jansen (moa@cs.rwth-aachen.de) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.evaluation; import java.util.ArrayList; import java.util.Arrays; import moa.cluster.Clustering; import moa.gui.visualization.DataPoint; public class StatisticalCollection extends MeasureCollection{ private boolean debug = false; @Override protected String[] getNames() { //String[] names = {"van Dongen","Rand statistic", "C Index"}; String[] names = {"van Dongen","Rand statistic"}; return names; } @Override protected boolean[] getDefaultEnabled() { boolean [] defaults = {false, false}; return defaults; } @Override public void evaluateClustering(Clustering clustering, Clustering trueClustering, ArrayList<DataPoint> points) throws Exception { MembershipMatrix mm = new MembershipMatrix(clustering, points); int numClasses = mm.getNumClasses(); int numCluster = clustering.size()+1; int n = mm.getTotalEntries(); double dongenMaxFC = 0; double dongenMaxSumFC = 0; for (int i = 0; i < numCluster; i++){ double max = 0; for (int j = 0; j < numClasses; j++) { if(mm.getClusterClassWeight(i, j)>max) max = mm.getClusterClassWeight(i, j); } dongenMaxFC+=max; if(mm.getClusterSum(i)>dongenMaxSumFC) dongenMaxSumFC = mm.getClusterSum(i); } double dongenMaxHC = 0; double dongenMaxSumHC = 0; for (int j = 0; j < numClasses; j++) { double max = 0; for (int i = 0; i < numCluster; i++){ if(mm.getClusterClassWeight(i, j)>max) max = mm.getClusterClassWeight(i, j); } dongenMaxHC+=max; if(mm.getClassSum(j)>dongenMaxSumHC) dongenMaxSumHC = mm.getClassSum(j); } double dongen = (dongenMaxFC + dongenMaxHC)/(2*n); //normalized dongen //double dongen = 1-(2*n - dongenMaxFC - dongenMaxHC)/(2*n - dongenMaxSumFC - dongenMaxSumHC); if(debug) System.out.println("Dongen HC:"+dongenMaxHC+" FC:"+dongenMaxFC+" Total:"+dongen+" n "+n); addValue("van Dongen", dongen); //Rand index //http://www.cais.ntu.edu.sg/~qihe/menu4.html double m1 = 0; for (int j = 0; j < numClasses; j++) { double v = mm.getClassSum(j); m1+= v*(v-1)/2.0; } double m2 = 0; for (int i = 0; i < numCluster; i++){ double v = mm.getClusterSum(i); m2+= v*(v-1)/2.0; } double m = 0; for (int i = 0; i < numCluster; i++){ for (int j = 0; j < numClasses; j++) { double v = mm.getClusterClassWeight(i, j); m+= v*(v-1)/2.0; } } double M = n*(n-1)/2.0; double rand = (M - m1 - m2 +2*m)/M; //normalized rand //double rand = (m - m1*m2/M)/(m1/2.0 + m2/2.0 - m1*m2/M); addValue("Rand statistic", rand); //addValue("C Index",cindex(clustering, points)); } public double cindex(Clustering clustering, ArrayList<DataPoint> points){ int numClusters = clustering.size(); double withinClustersDistance = 0; int numDistancesWithin = 0; double numDistances = 0; //double[] withinClusters = new double[numClusters]; double[] minWithinClusters = new double[numClusters]; double[] maxWithinClusters = new double[numClusters]; ArrayList<Integer>[] pointsInClusters = new ArrayList[numClusters]; for (int c = 0; c < numClusters; c++) { pointsInClusters[c] = new ArrayList<Integer>(); minWithinClusters[c] = Double.MAX_VALUE; maxWithinClusters[c] = Double.MIN_VALUE; } for (int p = 0; p < points.size(); p++) { for (int c = 0; c < clustering.size(); c++) { if(clustering.get(c).getInclusionProbability(points.get(p)) > 0.8){ pointsInClusters[c].add(p); numDistances++; } } } //calc within cluster distances + min and max values for (int c = 0; c < numClusters; c++) { int numDistancesInC = 0; ArrayList<Integer> pointsInC = pointsInClusters[c]; for (int p = 0; p < pointsInC.size(); p++) { DataPoint point = points.get(pointsInC.get(p)); for (int p1 = p+1; p1 < pointsInC.size(); p1++) { numDistancesWithin++; numDistancesInC++; DataPoint point1 = points.get(pointsInC.get(p1)); double dist = point.getDistance(point1); withinClustersDistance+=dist; if(minWithinClusters[c] > dist) minWithinClusters[c] = dist; if(maxWithinClusters[c] < dist) maxWithinClusters[c] = dist; } } } double minWithin = Double.MAX_VALUE; double maxWithin = Double.MIN_VALUE; for (int c = 0; c < numClusters; c++) { if(minWithinClusters[c] < minWithin) minWithin = minWithinClusters[c]; if(maxWithinClusters[c] > maxWithin) maxWithin = maxWithinClusters[c]; } double cindex = 0; if(numDistancesWithin != 0){ double meanWithinClustersDistance = withinClustersDistance/numDistancesWithin; cindex = (meanWithinClustersDistance - minWithin)/(maxWithin-minWithin); } if(debug){ System.out.println("Min:"+Arrays.toString(minWithinClusters)); System.out.println("Max:"+Arrays.toString(maxWithinClusters)); System.out.println("totalWithin:"+numDistancesWithin); } return cindex; } }
Java
/* * ChangeDetectionMeasures.java * Copyright (C) 2010 RWTH Aachen University, Germany * @author Jansen (moa@cs.rwth-aachen.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.evaluation; import java.util.ArrayList; import moa.cluster.Clustering; import moa.gui.visualization.DataPoint; public class ChangeDetectionMeasures extends MeasureCollection implements ClassificationMeasureCollection{ private boolean debug = false; public ChangeDetectionMeasures() { super(); } @Override public String[] getNames() { String[] names = {"Input","","","Ram-Hours","Time","Memory"}; return names; } @Override protected boolean[] getDefaultEnabled() { boolean [] defaults = {true,false,false,true,true,true}; return defaults; } public void evaluateClustering(Clustering clustering, Clustering trueClsutering, ArrayList<DataPoint> points) { } }
Java
/* * WindowClassificationPerformanceEvaluator.java * Copyright (C) 2009 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.evaluation; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; import moa.options.IntOption; import moa.tasks.TaskMonitor; import weka.core.Utils; import weka.core.Instance; /** * Classification evaluator that updates evaluation results using a sliding * window. * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class WindowClassificationPerformanceEvaluator extends AbstractOptionHandler implements ClassificationPerformanceEvaluator { private static final long serialVersionUID = 1L; public IntOption widthOption = new IntOption("width", 'w', "Size of Window", 1000); protected double TotalweightObserved = 0; protected Estimator weightObserved; protected Estimator weightCorrect; protected Estimator weightCorrectNoChangeClassifier; protected double lastSeenClass; protected Estimator[] columnKappa; protected Estimator[] rowKappa; protected Estimator[] classAccuracy; protected int numClasses; public class Estimator { protected double[] window; protected int posWindow; protected int lenWindow; protected int SizeWindow; protected double sum; public Estimator(int sizeWindow) { window = new double[sizeWindow]; SizeWindow = sizeWindow; posWindow = 0; lenWindow = 0; } public void add(double value) { sum -= window[posWindow]; sum += value; window[posWindow] = value; posWindow++; if (posWindow == SizeWindow) { posWindow = 0; } if (lenWindow < SizeWindow) { lenWindow++; } } public double total() { return sum; } public double length() { return lenWindow; } } /* public void setWindowWidth(int w) { this.width = w; reset(); }*/ @Override public void reset() { reset(this.numClasses); } public void reset(int numClasses) { this.numClasses = numClasses; this.rowKappa = new Estimator[numClasses]; this.columnKappa = new Estimator[numClasses]; this.classAccuracy = new Estimator[numClasses]; for (int i = 0; i < this.numClasses; i++) { this.rowKappa[i] = new Estimator(this.widthOption.getValue()); this.columnKappa[i] = new Estimator(this.widthOption.getValue()); this.classAccuracy[i] = new Estimator(this.widthOption.getValue()); } this.weightCorrect = new Estimator(this.widthOption.getValue()); this.weightCorrectNoChangeClassifier = new Estimator(this.widthOption.getValue()); this.weightObserved = new Estimator(this.widthOption.getValue()); this.TotalweightObserved = 0; this.lastSeenClass = 0; } @Override public void addResult(Instance inst, double[] classVotes) { double weight = inst.weight(); int trueClass = (int) inst.classValue(); if (weight > 0.0) { if (TotalweightObserved == 0) { reset(inst.dataset().numClasses()); } this.TotalweightObserved += weight; this.weightObserved.add(weight); int predictedClass = Utils.maxIndex(classVotes); if (predictedClass == trueClass) { this.weightCorrect.add(weight); } else { this.weightCorrect.add(0); } //Add Kappa statistic information for (int i = 0; i < this.numClasses; i++) { this.rowKappa[i].add(i == predictedClass ? weight : 0); this.columnKappa[i].add(i == trueClass ? weight : 0); } if (this.lastSeenClass == trueClass) { this.weightCorrectNoChangeClassifier.add(weight); } else { this.weightCorrectNoChangeClassifier.add(0); } this.classAccuracy[trueClass].add(predictedClass == trueClass ? weight : 0.0); this.lastSeenClass = trueClass; } } @Override public Measurement[] getPerformanceMeasurements() { return new Measurement[]{ new Measurement("classified instances", this.TotalweightObserved), new Measurement("classifications correct (percent)", getFractionCorrectlyClassified() * 100.0), new Measurement("Kappa Statistic (percent)", getKappaStatistic() * 100.0), new Measurement("Kappa Temporal Statistic (percent)", getKappaTemporalStatistic() * 100.0) }; } public double getTotalWeightObserved() { return this.weightObserved.total(); } public double getFractionCorrectlyClassified() { return this.weightObserved.total() > 0.0 ? (double) this.weightCorrect.total() / this.weightObserved.total() : 0.0; } public double getKappaStatistic() { if (this.weightObserved.total() > 0.0) { double p0 = this.weightCorrect.total() / this.weightObserved.total(); double pc = 0; for (int i = 0; i < this.numClasses; i++) { pc += (this.rowKappa[i].total() / this.weightObserved.total()) * (this.columnKappa[i].total() / this.weightObserved.total()); } return (p0 - pc) / (1 - pc); } else { return 0; } } public double getKappaTemporalStatistic() { if (this.weightObserved.total() > 0.0) { double p0 = this.weightCorrect.total() / this.weightObserved.total(); double pc = this.weightCorrectNoChangeClassifier.total() / this.weightObserved.total(); return (p0 - pc) / (1 - pc); } else { return 0; } } public double getFractionIncorrectlyClassified() { return 1.0 - getFractionCorrectlyClassified(); } @Override public void getDescription(StringBuilder sb, int indent) { Measurement.getMeasurementsDescription(getPerformanceMeasurements(), sb, indent); } @Override public void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { } }
Java
/* * BasicConceptDriftPerformanceEvaluator.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ package moa.evaluation; import moa.AbstractMOAObject; import moa.core.Measurement; import weka.core.Instance; public class BasicConceptDriftPerformanceEvaluator extends AbstractMOAObject implements ClassificationPerformanceEvaluator { private static final long serialVersionUID = 1L; protected double weightObserved; protected double numberDetections; protected double numberDetectionsOccurred; protected double numberChanges; protected double numberWarnings; protected double delay; protected double errorPrediction; protected double totalDelay; protected boolean isWarningZone; protected double inputValues; @Override public void reset() { this.weightObserved = 0.0; this.numberDetections = 0.0; this.numberDetectionsOccurred = 0.0; this.errorPrediction = 0.0; this.numberChanges = 0.0; this.numberWarnings = 0.0; this.delay = 0.0; this.totalDelay = 0.0; this.isWarningZone = false; this.inputValues = 0.0; this.hasChangeOccurred = false; } private boolean hasChangeOccurred = false; @Override public void addResult(Instance inst, double[] classVotes) { //classVotes[0] -> is Change //classVotes[1] -> is in Warning Zone //classVotes[2] -> delay //classVotes[3] -> estimation this.inputValues = inst.value(2); if (inst.weight() > 0.0 && classVotes.length == 4) { if (inst.numAttributes() > 1) { //if there is ground truth we monitor delay this.delay++; } this.weightObserved += inst.weight(); if ( classVotes[0] == 1.0) { //Change detected //System.out.println("Change detected with delay "+ this.delay ); this.numberDetections += inst.weight(); if (this.hasChangeOccurred == true) { this.totalDelay += this.delay - classVotes[2]; this.numberDetectionsOccurred += inst.weight(); this.hasChangeOccurred = false; } } if (this.hasChangeOccurred && classVotes[1] == 1.0) { //Warning detected //System.out.println("Warning detected at "+getTotalWeightObserved()); if (this.isWarningZone == false) { this.numberWarnings += inst.weight(); this.isWarningZone = true; } } else { this.isWarningZone = false; } if (inst.numAttributes() > 1) { if (inst.value(inst.numAttributes() - 2) == 1.0) {//Attribute 1 //Ground truth Change this.numberChanges += inst.weight(); this.delay = 0; this.hasChangeOccurred = true; } } //Compute error prediction if (classVotes.length > 1) { this.errorPrediction += Math.abs(classVotes[3] - inst.value(0)); } } } @Override public Measurement[] getPerformanceMeasurements() { Measurement[] measurement; //if (totalDelay == 0.0) { //No Ground Truth measurement = new Measurement[]{ new Measurement("learned instances", getTotalWeightObserved()), new Measurement("detected changes", getNumberDetections()), new Measurement("detected warnings", getNumberWarnings()), new Measurement("prediction error (average)", getPredictionError() / getTotalWeightObserved()), new Measurement("true changes", getNumberChanges()), new Measurement("delay detection (average)", getTotalDelay() / getNumberChanges()), new Measurement("true changes detected", getNumberChangesOccurred()), new Measurement("input values", getInputValues()) }; /* } else { measurement = new Measurement[]{ new Measurement("learned instances", getTotalWeightObserved()), new Measurement("detected changes", getNumberDetections()), new Measurement("detected warnings", getNumberWarnings()), new Measurement("prediction error (average)", getPredictionError() / getTotalWeightObserved()), new Measurement("true changes", getNumberChanges()), new Measurement("delay detection (average)", getTotalDelay() / getNumberChanges()), new Measurement("true changes detected", getNumberChangesOccurred()), new Measurement("input values", getInputValues()) }; }*/ return measurement; } public double getTotalWeightObserved() { return this.weightObserved > 0 ? this.weightObserved : 1.0; } public double getNumberDetections() { return this.numberDetections; } public double getInputValues() { return this.inputValues; } public double getPredictionError() { return this.errorPrediction; } public double getNumberChanges() { return this.numberChanges; } public double getNumberChangesOccurred() { return this.numberDetectionsOccurred; } public double getNumberWarnings() { return this.numberWarnings; } public double getTotalDelay() { return this.totalDelay; } @Override public void getDescription(StringBuilder sb, int indent) { Measurement.getMeasurementsDescription(getPerformanceMeasurements(), sb, indent); } }
Java
/* * LearningPerformanceEvaluator.java * Copyright (C) 2009 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.evaluation; import moa.MOAObject; import moa.core.Measurement; /** * Interface implemented by learner evaluators to monitor * the results of the learning process. * * @author Albert Bifet (abifet@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface LearningPerformanceEvaluator extends MOAObject { public void reset(); public void addLearningAttempt(int trueClass, double[] classVotes, double weight); public Measurement[] getPerformanceMeasurements(); }
Java
/* * NullMonitor.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; /** * Class that represents a null monitor. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class NullMonitor implements TaskMonitor { @Override public void setCurrentActivity(String activityDescription, double fracComplete) { } @Override public void setCurrentActivityDescription(String activity) { } @Override public void setCurrentActivityFractionComplete(double fracComplete) { } @Override public boolean taskShouldAbort() { return false; } @Override public String getCurrentActivityDescription() { return null; } @Override public double getCurrentActivityFractionComplete() { return -1.0; } @Override public boolean isPaused() { return false; } @Override public boolean isCancelled() { return false; } @Override public void requestCancel() { } @Override public void requestPause() { } @Override public void requestResume() { } @Override public Object getLatestResultPreview() { return null; } @Override public void requestResultPreview() { } @Override public boolean resultPreviewRequested() { return false; } @Override public void setLatestResultPreview(Object latestPreview) { } @Override public void requestResultPreview(ResultPreviewListener toInform) { } }
Java
/* * EvaluatePrequentialRegression.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import moa.classifiers.Classifier; import moa.classifiers.Regressor; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.TimingUtils; import moa.evaluation.RegressionPerformanceEvaluator; import moa.evaluation.WindowClassificationPerformanceEvaluator; import moa.evaluation.EWMAClassificationPerformanceEvaluator; import moa.evaluation.FadingFactorClassificationPerformanceEvaluator; import moa.evaluation.LearningCurve; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.FloatOption; import moa.options.IntOption; import moa.streams.InstanceStream; import weka.core.Instance; import weka.core.Utils; /** * Task for evaluating a classifier on a stream by testing then training with each example in sequence. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class EvaluatePrequentialRegression extends RegressionMainTask { @Override public String getPurposeString() { return "Evaluates a classifier on a stream by testing then training with each example in sequence."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Classifier to train.", Regressor.class, "trees.FIMTDD"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", InstanceStream.class, "generators.RandomTreeGenerator"); public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", RegressionPerformanceEvaluator.class, "WindowRegressionPerformanceEvaluator"); public IntOption instanceLimitOption = new IntOption("instanceLimit", 'i', "Maximum number of instances to test/train on (-1 = no limit).", 100000000, -1, Integer.MAX_VALUE); public IntOption timeLimitOption = new IntOption("timeLimit", 't', "Maximum number of seconds to test/train for (-1 = no limit).", -1, -1, Integer.MAX_VALUE); public IntOption sampleFrequencyOption = new IntOption("sampleFrequency", 'f', "How many instances between samples of the learning performance.", 100000, 0, Integer.MAX_VALUE); public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE); public FileOption dumpFileOption = new FileOption("dumpFile", 'd', "File to append intermediate csv results to.", null, "csv", true); public FileOption outputPredictionFileOption = new FileOption("outputPredictionFile", 'o', "File to append output predictions to.", null, "pred", true); //New for prequential method DEPRECATED public IntOption widthOption = new IntOption("width", 'w', "Size of Window", 1000); public FloatOption alphaOption = new FloatOption("alpha", 'a', "Fading factor or exponential smoothing factor", .01); //End New for prequential methods @Override public Class<?> getTaskResultType() { return LearningCurve.class; } @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier learner = (Classifier) getPreparedClassOption(this.learnerOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); RegressionPerformanceEvaluator evaluator = (RegressionPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); LearningCurve learningCurve = new LearningCurve( "learning evaluation instances"); //New for prequential methods if (evaluator instanceof WindowClassificationPerformanceEvaluator) { //((WindowClassificationPerformanceEvaluator) evaluator).setWindowWidth(widthOption.getValue()); if (widthOption.getValue() != 1000) { System.out.println("DEPRECATED! Use EvaluatePrequential -e (WindowClassificationPerformanceEvaluator -w " + widthOption.getValue() + ")"); return learningCurve; } } if (evaluator instanceof EWMAClassificationPerformanceEvaluator) { //((EWMAClassificationPerformanceEvaluator) evaluator).setalpha(alphaOption.getValue()); if (alphaOption.getValue() != .01) { System.out.println("DEPRECATED! Use EvaluatePrequential -e (EWMAClassificationPerformanceEvaluator -a " + alphaOption.getValue() + ")"); return learningCurve; } } if (evaluator instanceof FadingFactorClassificationPerformanceEvaluator) { //((FadingFactorClassificationPerformanceEvaluator) evaluator).setalpha(alphaOption.getValue()); if (alphaOption.getValue() != .01) { System.out.println("DEPRECATED! Use EvaluatePrequential -e (FadingFactorClassificationPerformanceEvaluator -a " + alphaOption.getValue() + ")"); return learningCurve; } } //End New for prequential methods learner.setModelContext(stream.getHeader()); int maxInstances = this.instanceLimitOption.getValue(); long instancesProcessed = 0; int maxSeconds = this.timeLimitOption.getValue(); int secondsElapsed = 0; monitor.setCurrentActivity("Evaluating learner...", -1.0); File dumpFile = this.dumpFileOption.getFile(); PrintStream immediateResultStream = null; if (dumpFile != null) { try { if (dumpFile.exists()) { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile, true), true); } else { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open immediate result file: " + dumpFile, ex); } } //File for output predictions File outputPredictionFile = this.outputPredictionFileOption.getFile(); PrintStream outputPredictionResultStream = null; if (outputPredictionFile != null) { try { if (outputPredictionFile.exists()) { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile, true), true); } else { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open prediction result file: " + outputPredictionFile, ex); } } boolean firstDump = true; boolean preciseCPUTiming = TimingUtils.enablePreciseTiming(); long evaluateStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); long lastEvaluateStartTime = evaluateStartTime; double RAMHours = 0.0; while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances)) && ((maxSeconds < 0) || (secondsElapsed < maxSeconds))) { Instance trainInst = stream.nextInstance(); Instance testInst = (Instance) trainInst.copy(); double trueClass = trainInst.classValue(); //testInst.setClassMissing(); double[] prediction = learner.getVotesForInstance(testInst); // Output prediction if (outputPredictionFile != null) { outputPredictionResultStream.println(prediction[0] + "," + trueClass); } //evaluator.addClassificationAttempt(trueClass, prediction, testInst.weight()); evaluator.addResult(testInst, prediction); learner.trainOnInstance(trainInst); instancesProcessed++; if (instancesProcessed % this.sampleFrequencyOption.getValue() == 0 || stream.hasMoreInstances() == false) { long evaluateTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); double time = TimingUtils.nanoTimeToSeconds(evaluateTime - evaluateStartTime); double timeIncrement = TimingUtils.nanoTimeToSeconds(evaluateTime - lastEvaluateStartTime); double RAMHoursIncrement = learner.measureByteSize() / (1024.0 * 1024.0 * 1024.0); //GBs RAMHoursIncrement *= (timeIncrement / 3600.0); //Hours RAMHours += RAMHoursIncrement; lastEvaluateStartTime = evaluateTime; learningCurve.insertEntry(new LearningEvaluation( new Measurement[]{ new Measurement( "learning evaluation instances", instancesProcessed), new Measurement( "evaluation time (" + (preciseCPUTiming ? "cpu " : "") + "seconds)", time), new Measurement( "model cost (RAM-Hours)", RAMHours) }, evaluator, learner)); if (immediateResultStream != null) { if (firstDump) { immediateResultStream.println(learningCurve.headerToString()); firstDump = false; } immediateResultStream.println(learningCurve.entryToString(learningCurve.numEntries() - 1)); immediateResultStream.flush(); } } if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learningCurve.copy()); } secondsElapsed = (int) TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - evaluateStartTime); } } if (immediateResultStream != null) { immediateResultStream.close(); } if (outputPredictionResultStream != null) { outputPredictionResultStream.close(); } return learningCurve; } }
Java
/* * TaskMonitor.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; /** * Interface representing a task monitor. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface TaskMonitor { /** * Sets the description and the percentage done of the current activity. * * @param activity the description of the current activity * @param fracComplete the percentage done of the current activity */ public void setCurrentActivity(String activityDescription, double fracComplete); /** * Sets the description of the current activity. * * @param activity the description of the current activity */ public void setCurrentActivityDescription(String activity); /** * Sets the percentage done of the current activity * * @param fracComplete the percentage done of the current activity */ public void setCurrentActivityFractionComplete(double fracComplete); /** * Gets whether the task should abort. * * @return true if the task should abort */ public boolean taskShouldAbort(); /** * Gets whether there is a request for preview the task result. * * @return true if there is a request for preview the task result */ public boolean resultPreviewRequested(); /** * Sets the current result to preview * * @param latestPreview the result to preview */ public void setLatestResultPreview(Object latestPreview); /** * Gets the description of the current activity. * * @return the description of the current activity */ public String getCurrentActivityDescription(); /** * Gets the percentage done of the current activity * * @return the percentage done of the current activity */ public double getCurrentActivityFractionComplete(); /** * Requests the task monitored to pause. * */ public void requestPause(); /** * Requests the task monitored to resume. * */ public void requestResume(); /** * Requests the task monitored to cancel. * */ public void requestCancel(); /** * Gets whether the task monitored is paused. * * @return true if the task is paused */ public boolean isPaused(); /** * Gets whether the task monitored is cancelled. * * @return true if the task is cancelled */ public boolean isCancelled(); /** * Requests to preview the task result. * */ public void requestResultPreview(); /** * Requests to preview the task result. * * @param toInform the listener of the changes in the preview of the result */ public void requestResultPreview(ResultPreviewListener toInform); /** * Gets the current result to preview * * @return the result to preview */ public Object getLatestResultPreview(); }
Java
/* * EvaluateInterleavedTestThenTrain.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import moa.classifiers.Classifier; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.TimingUtils; import moa.evaluation.ClassificationPerformanceEvaluator; import moa.evaluation.LearningCurve; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.IntOption; import moa.streams.InstanceStream; import weka.core.Instance; /** * Task for evaluating a classifier on a stream by testing then training with * each example in sequence. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class EvaluateInterleavedTestThenTrain extends MainTask { @Override public String getPurposeString() { return "Evaluates a classifier on a stream by testing then training with each example in sequence."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Classifier to train.", Classifier.class, "bayes.NaiveBayes"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", InstanceStream.class, "generators.RandomTreeGenerator"); public IntOption randomSeedOption = new IntOption( "instanceRandomSeed", 'r', "Seed for random generation of instances.", 1); public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", ClassificationPerformanceEvaluator.class, "BasicClassificationPerformanceEvaluator"); public IntOption instanceLimitOption = new IntOption("instanceLimit", 'i', "Maximum number of instances to test/train on (-1 = no limit).", 100000000, -1, Integer.MAX_VALUE); public IntOption timeLimitOption = new IntOption("timeLimit", 't', "Maximum number of seconds to test/train for (-1 = no limit).", -1, -1, Integer.MAX_VALUE); public IntOption sampleFrequencyOption = new IntOption("sampleFrequency", 'f', "How many instances between samples of the learning performance.", 100000, 0, Integer.MAX_VALUE); public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE); public FileOption dumpFileOption = new FileOption("dumpFile", 'd', "File to append intermediate csv reslts to.", null, "csv", true); @Override public Class<?> getTaskResultType() { return LearningCurve.class; } @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { String learnerString = this.learnerOption.getValueAsCLIString(); String streamString = this.streamOption.getValueAsCLIString(); //this.learnerOption.setValueViaCLIString(this.learnerOption.getValueAsCLIString() + " -r " +this.randomSeedOption); this.streamOption.setValueViaCLIString(streamString + " -i " + this.randomSeedOption.getValueAsCLIString()); Classifier learner = (Classifier) getPreparedClassOption(this.learnerOption); if (learner.isRandomizable()) { learner.setRandomSeed(this.randomSeedOption.getValue()); learner.resetLearning(); } InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); ClassificationPerformanceEvaluator evaluator = (ClassificationPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); learner.setModelContext(stream.getHeader()); int maxInstances = this.instanceLimitOption.getValue(); long instancesProcessed = 0; int maxSeconds = this.timeLimitOption.getValue(); int secondsElapsed = 0; monitor.setCurrentActivity("Evaluating learner...", -1.0); LearningCurve learningCurve = new LearningCurve( "learning evaluation instances"); File dumpFile = this.dumpFileOption.getFile(); PrintStream immediateResultStream = null; if (dumpFile != null) { try { if (dumpFile.exists()) { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile, true), true); } else { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open immediate result file: " + dumpFile, ex); } } boolean firstDump = true; boolean preciseCPUTiming = TimingUtils.enablePreciseTiming(); long evaluateStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); long lastEvaluateStartTime = evaluateStartTime; double RAMHours = 0.0; while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances)) && ((maxSeconds < 0) || (secondsElapsed < maxSeconds))) { Instance trainInst = stream.nextInstance(); Instance testInst = (Instance) trainInst.copy(); int trueClass = (int) trainInst.classValue(); //testInst.setClassMissing(); double[] prediction = learner.getVotesForInstance(testInst); //evaluator.addClassificationAttempt(trueClass, prediction, testInst // .weight()); evaluator.addResult(testInst, prediction); learner.trainOnInstance(trainInst); instancesProcessed++; if (instancesProcessed % this.sampleFrequencyOption.getValue() == 0 || stream.hasMoreInstances() == false) { long evaluateTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); double time = TimingUtils.nanoTimeToSeconds(evaluateTime - evaluateStartTime); double timeIncrement = TimingUtils.nanoTimeToSeconds(evaluateTime - lastEvaluateStartTime); double RAMHoursIncrement = learner.measureByteSize() / (1024.0 * 1024.0 * 1024.0); //GBs RAMHoursIncrement *= (timeIncrement / 3600.0); //Hours RAMHours += RAMHoursIncrement; lastEvaluateStartTime = evaluateTime; learningCurve.insertEntry(new LearningEvaluation( new Measurement[]{ new Measurement( "learning evaluation instances", instancesProcessed), new Measurement( "evaluation time (" + (preciseCPUTiming ? "cpu " : "") + "seconds)", time), new Measurement( "model cost (RAM-Hours)", RAMHours) }, evaluator, learner)); if (immediateResultStream != null) { if (firstDump) { immediateResultStream.print("Learner,stream,randomSeed,"); immediateResultStream.println(learningCurve.headerToString()); firstDump = false; } immediateResultStream.print(learnerString + "," + streamString + "," + this.randomSeedOption.getValueAsCLIString() + ","); immediateResultStream.println(learningCurve.entryToString(learningCurve.numEntries() - 1)); immediateResultStream.flush(); } } if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learningCurve.copy()); } secondsElapsed = (int) TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - evaluateStartTime); } } if (immediateResultStream != null) { immediateResultStream.close(); } return learningCurve; } }
Java
/* * WriteStreamToARFFFile.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.Writer; import moa.core.ObjectRepository; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.FlagOption; import moa.options.IntOption; import moa.streams.InstanceStream; /** * Task to output a stream to an ARFF file * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class WriteStreamToARFFFile extends MainTask { @Override public String getPurposeString() { return "Outputs a stream to an ARFF file."; } private static final long serialVersionUID = 1L; public ClassOption streamOption = new ClassOption("stream", 's', "Stream to write.", InstanceStream.class, "generators.RandomTreeGenerator"); public FileOption arffFileOption = new FileOption("arffFile", 'f', "Destination ARFF file.", null, "arff", true); public IntOption maxInstancesOption = new IntOption("maxInstances", 'm', "Maximum number of instances to write to file.", 10000000, 0, Integer.MAX_VALUE); public FlagOption suppressHeaderOption = new FlagOption("suppressHeader", 'h', "Suppress header from output."); @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); File destFile = this.arffFileOption.getFile(); if (destFile != null) { try { Writer w = new BufferedWriter(new FileWriter(destFile)); monitor.setCurrentActivityDescription("Writing stream to ARFF"); if (!this.suppressHeaderOption.isSet()) { w.write(stream.getHeader().toString()); w.write("\n"); } int numWritten = 0; while ((numWritten < this.maxInstancesOption.getValue()) && stream.hasMoreInstances()) { w.write(stream.nextInstance().toString()); w.write("\n"); numWritten++; } w.close(); } catch (Exception ex) { throw new RuntimeException( "Failed writing to file " + destFile, ex); } return "Stream written to ARFF file " + destFile; } throw new IllegalArgumentException("No destination file to write to."); } @Override public Class<?> getTaskResultType() { return String.class; } }
Java
/* * LearnModel.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.classifiers.Classifier; import moa.core.ObjectRepository; import moa.options.ClassOption; import moa.options.IntOption; import moa.streams.InstanceStream; /** * Task for learning a model without any evaluation. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class LearnModel extends MainTask { @Override public String getPurposeString() { return "Learns a model from a stream."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Classifier to train.", Classifier.class, "bayes.NaiveBayes"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", InstanceStream.class, "generators.RandomTreeGenerator"); public IntOption maxInstancesOption = new IntOption("maxInstances", 'm', "Maximum number of instances to train on per pass over the data.", 10000000, 0, Integer.MAX_VALUE); public IntOption numPassesOption = new IntOption("numPasses", 'p', "The number of passes to do over the data.", 1, 1, Integer.MAX_VALUE); public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE); public LearnModel() { } public LearnModel(Classifier learner, InstanceStream stream, int maxInstances, int numPasses) { this.learnerOption.setCurrentObject(learner); this.streamOption.setCurrentObject(stream); this.maxInstancesOption.setValue(maxInstances); this.numPassesOption.setValue(numPasses); } @Override public Class<?> getTaskResultType() { return Classifier.class; } @Override public Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier learner = (Classifier) getPreparedClassOption(this.learnerOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); learner.setModelContext(stream.getHeader()); int numPasses = this.numPassesOption.getValue(); int maxInstances = this.maxInstancesOption.getValue(); for (int pass = 0; pass < numPasses; pass++) { long instancesProcessed = 0; monitor.setCurrentActivity("Training learner" + (numPasses > 1 ? (" (pass " + (pass + 1) + "/" + numPasses + ")") : "") + "...", -1.0); if (pass > 0) { stream.restart(); } while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances))) { learner.trainOnInstance(stream.nextInstance()); instancesProcessed++; if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learner.copy()); } } } } learner.setModelContext(stream.getHeader()); return learner; } }
Java
/* * EvaluatePrequential.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import moa.classifiers.Classifier; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.TimingUtils; import moa.evaluation.ClassificationPerformanceEvaluator; import moa.evaluation.WindowClassificationPerformanceEvaluator; import moa.evaluation.EWMAClassificationPerformanceEvaluator; import moa.evaluation.FadingFactorClassificationPerformanceEvaluator; import moa.evaluation.LearningCurve; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.FloatOption; import moa.options.IntOption; import moa.streams.InstanceStream; import weka.core.Instance; import weka.core.Utils; /** * Task for evaluating a classifier on a stream by testing then training with each example in sequence. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class EvaluatePrequential extends MainTask { @Override public String getPurposeString() { return "Evaluates a classifier on a stream by testing then training with each example in sequence."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Classifier to train.", Classifier.class, "bayes.NaiveBayes"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", InstanceStream.class, "generators.RandomTreeGenerator"); public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", ClassificationPerformanceEvaluator.class, "WindowClassificationPerformanceEvaluator"); public IntOption instanceLimitOption = new IntOption("instanceLimit", 'i', "Maximum number of instances to test/train on (-1 = no limit).", 100000000, -1, Integer.MAX_VALUE); public IntOption timeLimitOption = new IntOption("timeLimit", 't', "Maximum number of seconds to test/train for (-1 = no limit).", -1, -1, Integer.MAX_VALUE); public IntOption sampleFrequencyOption = new IntOption("sampleFrequency", 'f', "How many instances between samples of the learning performance.", 100000, 0, Integer.MAX_VALUE); public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE); public FileOption dumpFileOption = new FileOption("dumpFile", 'd', "File to append intermediate csv results to.", null, "csv", true); public FileOption outputPredictionFileOption = new FileOption("outputPredictionFile", 'o', "File to append output predictions to.", null, "pred", true); //New for prequential method DEPRECATED public IntOption widthOption = new IntOption("width", 'w', "Size of Window", 1000); public FloatOption alphaOption = new FloatOption("alpha", 'a', "Fading factor or exponential smoothing factor", .01); //End New for prequential methods @Override public Class<?> getTaskResultType() { return LearningCurve.class; } @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier learner = (Classifier) getPreparedClassOption(this.learnerOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); ClassificationPerformanceEvaluator evaluator = (ClassificationPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); LearningCurve learningCurve = new LearningCurve( "learning evaluation instances"); //New for prequential methods if (evaluator instanceof WindowClassificationPerformanceEvaluator) { //((WindowClassificationPerformanceEvaluator) evaluator).setWindowWidth(widthOption.getValue()); if (widthOption.getValue() != 1000) { System.out.println("DEPRECATED! Use EvaluatePrequential -e (WindowClassificationPerformanceEvaluator -w " + widthOption.getValue() + ")"); return learningCurve; } } if (evaluator instanceof EWMAClassificationPerformanceEvaluator) { //((EWMAClassificationPerformanceEvaluator) evaluator).setalpha(alphaOption.getValue()); if (alphaOption.getValue() != .01) { System.out.println("DEPRECATED! Use EvaluatePrequential -e (EWMAClassificationPerformanceEvaluator -a " + alphaOption.getValue() + ")"); return learningCurve; } } if (evaluator instanceof FadingFactorClassificationPerformanceEvaluator) { //((FadingFactorClassificationPerformanceEvaluator) evaluator).setalpha(alphaOption.getValue()); if (alphaOption.getValue() != .01) { System.out.println("DEPRECATED! Use EvaluatePrequential -e (FadingFactorClassificationPerformanceEvaluator -a " + alphaOption.getValue() + ")"); return learningCurve; } } //End New for prequential methods learner.setModelContext(stream.getHeader()); int maxInstances = this.instanceLimitOption.getValue(); long instancesProcessed = 0; int maxSeconds = this.timeLimitOption.getValue(); int secondsElapsed = 0; monitor.setCurrentActivity("Evaluating learner...", -1.0); File dumpFile = this.dumpFileOption.getFile(); PrintStream immediateResultStream = null; if (dumpFile != null) { try { if (dumpFile.exists()) { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile, true), true); } else { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open immediate result file: " + dumpFile, ex); } } //File for output predictions File outputPredictionFile = this.outputPredictionFileOption.getFile(); PrintStream outputPredictionResultStream = null; if (outputPredictionFile != null) { try { if (outputPredictionFile.exists()) { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile, true), true); } else { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open prediction result file: " + outputPredictionFile, ex); } } boolean firstDump = true; boolean preciseCPUTiming = TimingUtils.enablePreciseTiming(); long evaluateStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); long lastEvaluateStartTime = evaluateStartTime; double RAMHours = 0.0; while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances)) && ((maxSeconds < 0) || (secondsElapsed < maxSeconds))) { Instance trainInst = stream.nextInstance(); Instance testInst = (Instance) trainInst.copy(); if (testInst.classIsMissing() == false){ // Added for semisupervised setting: test only if we have the label double[] prediction = learner.getVotesForInstance(testInst); // Output prediction if (outputPredictionFile != null) { outputPredictionResultStream.println(Utils.maxIndex(prediction) + "," + testInst.classValue()); } evaluator.addResult(testInst, prediction); } learner.trainOnInstance(trainInst); instancesProcessed++; if (instancesProcessed % this.sampleFrequencyOption.getValue() == 0 || stream.hasMoreInstances() == false) { long evaluateTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); double time = TimingUtils.nanoTimeToSeconds(evaluateTime - evaluateStartTime); double timeIncrement = TimingUtils.nanoTimeToSeconds(evaluateTime - lastEvaluateStartTime); double RAMHoursIncrement = learner.measureByteSize() / (1024.0 * 1024.0 * 1024.0); //GBs RAMHoursIncrement *= (timeIncrement / 3600.0); //Hours RAMHours += RAMHoursIncrement; lastEvaluateStartTime = evaluateTime; learningCurve.insertEntry(new LearningEvaluation( new Measurement[]{ new Measurement( "learning evaluation instances", instancesProcessed), new Measurement( "evaluation time (" + (preciseCPUTiming ? "cpu " : "") + "seconds)", time), new Measurement( "model cost (RAM-Hours)", RAMHours) }, evaluator, learner)); if (immediateResultStream != null) { if (firstDump) { immediateResultStream.println(learningCurve.headerToString()); firstDump = false; } immediateResultStream.println(learningCurve.entryToString(learningCurve.numEntries() - 1)); immediateResultStream.flush(); } } if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learningCurve.copy()); } secondsElapsed = (int) TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - evaluateStartTime); } } if (immediateResultStream != null) { immediateResultStream.close(); } if (outputPredictionResultStream != null) { outputPredictionResultStream.close(); } return learningCurve; } }
Java
/* * StandardTaskMonitor.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; /** * Class that represents a standard task monitor. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class StandardTaskMonitor implements TaskMonitor { protected String currentActivityDescription = ""; protected double currentActivityFractionComplete = -1.0; protected volatile boolean cancelFlag = false; protected volatile boolean pauseFlag = false; protected volatile boolean isComplete = false; protected volatile boolean resultPreviewRequested = false; protected volatile Object latestResultPreview = null; protected volatile ResultPreviewListener resultPreviewer = null; @Override public void setCurrentActivity(String activityDescription, double fracComplete) { setCurrentActivityDescription(activityDescription); setCurrentActivityFractionComplete(fracComplete); } @Override public void setCurrentActivityDescription(String activity) { this.currentActivityDescription = activity; } @Override public void setCurrentActivityFractionComplete(double fracComplete) { this.currentActivityFractionComplete = fracComplete; } @Override public boolean taskShouldAbort() { if (this.pauseFlag) { try { synchronized (this) { while (this.pauseFlag && !this.cancelFlag) { wait(); } } } catch (InterruptedException e) { } } return this.cancelFlag; } @Override public String getCurrentActivityDescription() { return this.currentActivityDescription; } @Override public double getCurrentActivityFractionComplete() { return this.currentActivityFractionComplete; } @Override public boolean isCancelled() { return this.cancelFlag; } @Override public void requestCancel() { this.cancelFlag = true; requestResume(); } @Override public void requestPause() { this.pauseFlag = true; } @Override public synchronized void requestResume() { this.pauseFlag = false; notify(); } @Override public boolean isPaused() { return this.pauseFlag; } @Override public Object getLatestResultPreview() { return this.latestResultPreview; } @Override public void requestResultPreview() { this.resultPreviewRequested = true; } @Override public void requestResultPreview(ResultPreviewListener toInform) { this.resultPreviewer = toInform; this.resultPreviewRequested = true; } @Override public boolean resultPreviewRequested() { return this.resultPreviewRequested; } @Override public synchronized void setLatestResultPreview(Object latestPreview) { this.resultPreviewRequested = false; this.latestResultPreview = latestPreview; if (this.resultPreviewer != null) { this.resultPreviewer.latestPreviewChanged(); } this.resultPreviewer = null; } }
Java
/* * Plot.java * Copyright (C) 2010 Poznan University of Technology, Poznan, Poland * @author Dariusz Brzezinski (dariusz.brzezinski@cs.put.poznan.pl) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.io.InputStreamReader; import moa.core.ObjectRepository; import moa.options.FileOption; import moa.options.FlagOption; import moa.options.IntOption; import moa.options.ListOption; import moa.options.MultiChoiceOption; import moa.options.StringOption; /** * A task allowing to create and plot gnuplot scripts. * * @author Dariusz Brzezinski * */ public class Plot extends MainTask { @Override public String getPurposeString() { return "Creates a Gnuplot script and plots a chart from a set of given csv files."; } private static final long serialVersionUID = 1L; /** * Path to gunplot's binary directory, for example C:\Tools\Gnuplot\binary. */ public StringOption gnuplotPathOption = new StringOption("gnuplotPath", 'e', "Directory of the gnuplot executable. For example \"C:\\Tools\\Gnuplot\\binary\".", ""); /** * FileOption for selecting the plot output file. */ public FileOption plotOutputOption = new FileOption("plotOutputFile", 'r', "File with the result plot (image).", null, "eps", true); /** * Comma separated list of input *csv files. The file paths can be absolute * or relative to the executing directory (moa.jar directory). */ public ListOption inputFilesOption = new ListOption( "inputFiles", 'i', "File names or file paths of csv inputs. Values should be seperated by commas.", new StringOption("inputFile", ' ', "Input file.", "algorithm.csv"), new StringOption[] { new StringOption("", ' ', "", "algorithm1.csv"), new StringOption("", ' ', "", "algorithm2.csv"), new StringOption("", ' ', "", "algorithm3.csv") }, ','); /** * Comma separated list of aliases for the input *csv files. If a legend is * added to the plot, aliases will be presented in the legend. */ public ListOption fileAliasesOption = new ListOption( "aliases", 'a', "Aliases for files stated in the inputFiles parameter. Aliases will be presented in the plot's legend.", new StringOption("alias", ' ', "File alias.", "MyAlg"), new StringOption[] { new StringOption("", ' ', "", "OZABag"), new StringOption("", ' ', "", "HOT"), new StringOption("", ' ', "", "AWE") }, ','); /** * Gnuplot terminal - postscript, png, pdf etc. */ public MultiChoiceOption outputTypeOption = new MultiChoiceOption( "outputType", 't', "Gnuplot output terminal.", Terminal .getStringValues(), Terminal.getDescriptions(), 8); /** * Type of plot - dots, points, lines ets. */ public MultiChoiceOption plotStyleOption = new MultiChoiceOption( "plotStyle", 'p', "Plot style.", PlotStyle.getStringValues(), PlotStyle.getDescriptions(), 2); /** * Index of the csv column from which values for the x-axis should be taken. */ public IntOption xColumnOption = new IntOption( "xColumn", 'x', "Index of the csv column from which values for the x-axis should be taken.", 1); /** * Title of the plots' x-axis. */ public StringOption xTitleOption = new StringOption("xTitle", 'm', "Title of the plots' x-axis.", "Processed instances"); /** * Units displayed next to x-axis values. */ public StringOption xUnitOption = new StringOption("xUnit", 'g', "Units displayed next to x-axis values.", ""); /** * Index of the csv column from which values for the y-axis should be taken. */ public IntOption yColumnOption = new IntOption( "yColumn", 'y', "Index of the column from which values for the y-axis should be taken", 9); /** * Title of the plots' y-axis. */ public StringOption yTitleOption = new StringOption("yTitle", 'n', "Title of the plots' y-axis.", "Accuracy"); /** * Units displayed next to y-axis values. */ public StringOption yUnitOption = new StringOption("yUnit", 'u', "Units displayed next to y-axis values.", "%"); /** * Plotted line width. */ public IntOption lineWidthOption = new IntOption("lineWidth", 'w', "Determines the thickness of plotted lines", 2); /** * Interval between plotted data points. */ public IntOption pointIntervalOption = new IntOption( "pointInterval", 'v', "Determines the inteval between plotted data points. Used for LINESPOINTS plots only.", 0, 0, Integer.MAX_VALUE); /** * Determines whether to smooth the plot with bezier curves. */ public FlagOption smoothOption = new FlagOption("smooth", 's', "Determines whether to smooth the plot with bezier curves."); /** * Determines whether to delete gnuplot scripts after plotting. */ public FlagOption deleteScriptsOption = new FlagOption("deleteScripts", 'd', "Determines whether to delete gnuplot scripts after plotting."); /** * Legend (key) location on the plot. */ public MultiChoiceOption legendLocationOption = new MultiChoiceOption( "legendLocation", 'l', "Legend (key) location on the plot.", LegendLocation.getStringValues(), LegendLocation.getDescriptions(), 8); /** * Legend elements' alignment. */ public MultiChoiceOption legendTypeOption = new MultiChoiceOption( "legendType", 'k', "Legend elements' alignment.", LegendType .getStringValues(), LegendType.getDescriptions(), 1); /** * Addition pre-plot gunplot commands. For example "set tics out" will * change the default tic option and force outward facing tics. See the * gnuplot manual for more commands. */ public StringOption additionalSetOption = new StringOption( "additionalCommands", 'c', "Additional commands that should be added to the gnuplot script before the plot command. For example \"set tics out\" will change the default tic option and force outward facing tics. See the gnuplot manual for more commands.", " "); /** * Additional plot options. For example \"[] [0:]\" will force the y-axis to * start from 0. See the gnuplot manual for more options. */ public StringOption additionalPlotOption = new StringOption( "additionalPlotOptions", 'z', "Additional options that should be added to the gnuplot script in the plot statement. For example \"[] [0:]\" will force the y-axis to start from 0. See the gnuplot manual for more options.", " "); /** * Plot output terminal. * @author Dariusz Brzezi�ski * */ public enum Terminal { CANVAS, EPSLATEX, GIF, JPEG, LATEX, PDFCAIRO, PNG, POSTSCRIPT, POSTSCRIPT_COLOR, PSLATEX, PSTEX, PSTRICKS, SVG; private static String[] descriptions = new String[] { "HTML Canvas object", "LaTeX picture environment using graphicx package", "GIF images using libgd and TrueType fonts", "JPEG images using libgd and TrueType fonts", "LaTeX picture environment", "pdf terminal based on cairo", "PNG images using libgd and TrueType fonts", "PostScript graphics, including EPSF embedded files (*.eps)", "Color PostScript graphics, including EPSF embedded files (*.eps)", "LaTeX picture environment with PostScript specials", "plain TeX with PostScript specials", "LaTeX picture environment with PSTricks macros", "W3C Scalable Vector Graphics driver" }; /** * Gets an array of string descriptions - one for each enum value. * @return a description for each enum value. */ public static String[] getDescriptions() { return descriptions; } /** * Get string values for the enum values. * @return a set of string values for the enum values. */ public static String[] getStringValues() { int i = 0; String[] result = new String[values().length]; for (Terminal value : values()) { result[i++] = value.name(); } return result; } } /** * Location of the legend on the plot. * @author Dariusz Brzezi�ski * */ public enum LegendLocation { TOP_LEFT_INSIDE, TOP_CENTER_INSIDE, TOP_RIGHT_INSIDE, LEFT_INSIDE, CENTER_INSIDE, RIGHT_INSIDE, BOTTOM_LEFT_INSIDE, BOTTOM_CENTER_INSIDE, BOTTOM_RIGHT_INSIDE, TOP_LEFT_OUTSIDE, TOP_CENTER_OUTSIDE, TOP_RIGHT_OUTSIDE, LEFT_OUTSIDE, CENTER_OUTSIDE, RIGHT_OUTSIDE, BOTTOM_LEFT_OUTSIDE, BOTTOM_CENTER_OUTSIDE, BOTTOM_RIGHT_OUTSIDE; /** * Gets an array of string descriptions - one for each enum value. * @return a description for each enum value. */ public static String[] getDescriptions() { int i = 0; String[] result = new String[values().length]; for (LegendLocation value : values()) { result[i++] = value.name().toLowerCase().replace('_', ' '); } return result; } /** * Get string values for the enum values. * @return a set of string values for the enum values. */ public static String[] getStringValues() { int i = 0; String[] result = new String[values().length]; for (LegendLocation value : values()) { result[i++] = value.name(); } return result; } } /** * Type of legend. * @author Dariusz Brzezi�ski * */ public enum LegendType { NONE, BOX_VERTICAL, BOX_HORIZONTAL, NOBOX_VERTICAL, NOBOX_HORIZONTAL; /** * Gets an array of string descriptions - one for each enum value. * @return a description for each enum value. */ public static String[] getDescriptions() { int i = 0; String[] result = new String[values().length]; for (LegendType value : values()) { result[i++] = value.name().toLowerCase().replace('_', ' '); } return result; } /** * Get string values for the enum values. * @return a set of string values for the enum values. */ public static String[] getStringValues() { int i = 0; String[] result = new String[values().length]; for (LegendType value : values()) { result[i++] = value.name(); } return result; } } public enum PlotStyle { LINES, POINTS, LINESPOINTS, IMPULSES, STEPS, FSTEPS, HISTEPS, DOTS; private static String[] descriptions = new String[] { "It connects each data point with lines. Suitable to smoothly varying data.", "Symbols are shown at the data point location, can be used to plot experimental data.", "Draws lines and symbols at the same time.", "Draw vertical lines from each data point to X-axis. This is a bar-graph without width.", "Histogram type 1", "Histogram type 2", "Histogram type 3", "It displays dots, can be used when there many data points, but hard to see though.", }; /** * Gets an array of string descriptions = one for each enum value. * @return a description for each enum value. */ public static String[] getDescriptions() { return descriptions; } /** * Get string values for the enum values. * @return a set of string values for the enum values. */ public static String[] getStringValues() { int i = 0; String[] result = new String[values().length]; for (PlotStyle value : values()) { result[i++] = value.name(); } return result; } } /** * Defines the task's result type. */ public Class<?> getTaskResultType() { return String.class; } @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { File resultFile = this.plotOutputOption.getFile(); if (this.plotOutputOption.getFile() == null) { throw new RuntimeException("Plot output file option not set!"); } String resultDirectory = (new File(resultFile.getAbsolutePath())) .getParent(); String gnuPlotPath = gnuplotPathOption.getValue(); File gnuplotDir = new File(gnuPlotPath); if(!gnuplotDir.exists()){ throw new RuntimeException("Gnuplot directory not found: " + gnuPlotPath); } monitor.setCurrentActivity("Verifying input files...", 0.0); if (inputFilesOption.getList().length > fileAliasesOption.getList().length) { throw new RuntimeException("Too little aliases for input files!"); } else if (inputFilesOption.getList().length < fileAliasesOption .getList().length) { throw new RuntimeException("Too many aliases for input files!"); } else { for (int i = 0; i < inputFilesOption.getList().length; i++) { File inputFile = new File(((StringOption) inputFilesOption .getList()[i]).getValue()); if (!inputFile.exists()) { throw new RuntimeException("File not found: " + inputFile.getAbsolutePath()); } } } if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivity("Creating script file...", 1.0 / 4.0); String gnuplotScriptPath = resultDirectory + File.separator + resultFile.getName() + ".plt"; String script = createScript(resultFile); File scriptFile = writeScriptToFile(gnuplotScriptPath, script); if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivity("Plotting data...", 2.0 / 4.0); String gnuplotCommand = gnuPlotPath + File.separator + "gnuplot \"" + gnuplotScriptPath + "\""; String line, gnuplotOutput = ""; try { Process p = Runtime.getRuntime().exec(gnuplotCommand); BufferedReader err = new BufferedReader(new InputStreamReader(p .getErrorStream())); while ((line = err.readLine()) != null) { gnuplotOutput += line + System.getProperty("line.separator"); } err.close(); } catch (IOException ex) { throw new RuntimeException("Error while executing gnuplot script:" + scriptFile, ex); } if (monitor.taskShouldAbort()) { return null; } if (deleteScriptsOption.isSet()) { monitor.setCurrentActivity("Deleting script...", 3.0 / 4.0); scriptFile.delete(); } if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivity("Done", 1.0); return resultFile.getAbsolutePath() + System.getProperty("line.separator") + gnuplotOutput; } /** * Method responsible for saving a gnuplot script to a file. * @param gnuplotScriptPath Path of the file * @param script gnuplot script content * @return the object of the saved file */ private File writeScriptToFile(String gnuplotScriptPath, String script) { File scriptFile = new File(gnuplotScriptPath); BufferedWriter writer; try { writer = new BufferedWriter(new FileWriter(scriptFile)); writer.write(script); writer.close(); } catch (IOException ex) { throw new RuntimeException( "Unable to create or write to script file: " + scriptFile, ex); } return scriptFile; } /** * Creates the content of the gnuplot script. * @param resultFile path of the plot output file * @return gnuplot script */ private String createScript(File resultFile) { String newLine = System.getProperty("line.separator"); int sourceFileIdx = 0; // terminal options; String script = "set term " + terminalOptions(Terminal.valueOf(outputTypeOption .getChosenLabel())) + newLine; script += "set output '" + resultFile.getAbsolutePath() + "'" + newLine; script += "set datafile separator ','" + newLine; script += "set grid" + newLine; script += "set style line 1 pt 8" + newLine; script += "set style line 2 lt rgb '#00C000'" + newLine; script += "set style line 5 lt rgb '#FFD800'" + newLine; script += "set style line 6 lt rgb '#4E0000'" + newLine; script += "set format x '%.0s %c" + getAxisUnit(xUnitOption.getValue()) + "'" + newLine; script += "set format y '%.0s %c" + getAxisUnit(yUnitOption.getValue()) + "'" + newLine; script += "set ylabel '" + yTitleOption.getValue() + "'" + newLine; script += "set xlabel '" + xTitleOption.getValue() + "'" + newLine; if (!legendTypeOption.getChosenLabel().equals(LegendType.NONE)) { script += "set key " + legendTypeOption.getChosenLabel().toLowerCase().replace( '_', ' ') + " " + legendLocationOption.getChosenLabel().toLowerCase() .replace('_', ' ') + newLine; } // additional commands script += additionalSetOption.getValue(); // plot command script += "plot " + additionalPlotOption.getValue() + " "; // plot for each input file for (int i = 0; i < inputFilesOption.getList().length; i++) { if (sourceFileIdx > 0) { script += ", "; } sourceFileIdx++; script += "'" + ((StringOption) inputFilesOption .getList()[i]).getValue() + "' using " + xColumnOption.getValue() + ":" + yColumnOption.getValue(); if (smoothOption.isSet()) { script += ":(1.0) smooth bezier"; } script += " with " + plotStyleOption.getChosenLabel().toLowerCase() + " ls " + sourceFileIdx + " lw " + lineWidthOption.getValue(); if (plotStyleOption.getChosenLabel().equals( PlotStyle.LINESPOINTS.toString()) && pointIntervalOption.getValue() > 0) { script += " pointinterval " + pointIntervalOption.getValue(); } script += " title '" + ((StringOption) fileAliasesOption .getList()[i]).getValue() + "'"; } script += newLine; return script; } private String getAxisUnit(String unit) { if (unit.equals("%")) { return "%%"; } else { return unit; } } private String terminalOptions(Terminal term) { String options; switch (term) { case POSTSCRIPT: options = "postscript enhanced"; break; case POSTSCRIPT_COLOR: options = "postscript color enhanced"; break; default: options = term.toString().toLowerCase(); break; } return options; } }
Java
/* * EvaluatePrequential.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.TimingUtils; import moa.evaluation.ClassificationPerformanceEvaluator; import moa.evaluation.LearningCurve; import moa.evaluation.LearningEvaluation; import moa.learners.ChangeDetectorLearner; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.IntOption; import moa.streams.clustering.ClusterEvent; import moa.streams.generators.cd.ConceptDriftGenerator; import weka.core.Instance; import weka.core.Utils; /** * Task for evaluating a classifier on a stream by testing then training with each example in sequence. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class EvaluateConceptDrift extends ConceptDriftMainTask{ @Override public String getPurposeString() { return "Evaluates a classifier on a stream by testing then training with each example in sequence."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Change detector to train.", ChangeDetectorLearner.class, "ChangeDetectorLearner"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", ConceptDriftGenerator.class, "GradualChangeGenerator"); public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", ClassificationPerformanceEvaluator.class, "BasicConceptDriftPerformanceEvaluator"); public IntOption instanceLimitOption = new IntOption("instanceLimit", 'i', "Maximum number of instances to test/train on (-1 = no limit).", 1000, -1, Integer.MAX_VALUE); public IntOption timeLimitOption = new IntOption("timeLimit", 't', "Maximum number of seconds to test/train for (-1 = no limit).", -1, -1, Integer.MAX_VALUE); public IntOption sampleFrequencyOption = new IntOption("sampleFrequency", 'f', "How many instances between samples of the learning performance.", 10, 0, Integer.MAX_VALUE); /*public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE);*/ public FileOption dumpFileOption = new FileOption("dumpFile", 'd', "File to append intermediate csv results to.", null, "csv", true); /*public FileOption outputPredictionFileOption = new FileOption("outputPredictionFile", 'o', "File to append output predictions to.", null, "pred", true);*/ @Override public Class<?> getTaskResultType() { return LearningCurve.class; } @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { ChangeDetectorLearner learner = (ChangeDetectorLearner) getPreparedClassOption(this.learnerOption); ConceptDriftGenerator stream = (ConceptDriftGenerator) getPreparedClassOption(this.streamOption); this.setEventsList(stream.getEventsList()); ClassificationPerformanceEvaluator evaluator = (ClassificationPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); LearningCurve learningCurve = new LearningCurve( "learning evaluation instances"); learner.setModelContext(stream.getHeader()); int maxInstances = this.instanceLimitOption.getValue(); long instancesProcessed = 0; int maxSeconds = this.timeLimitOption.getValue(); int secondsElapsed = 0; monitor.setCurrentActivity("Evaluating learner...", -1.0); File dumpFile = this.dumpFileOption.getFile(); PrintStream immediateResultStream = null; if (dumpFile != null) { try { if (dumpFile.exists()) { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile, true), true); } else { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open immediate result file: " + dumpFile, ex); } } //File for output predictions /* File outputPredictionFile = this.outputPredictionFileOption.getFile(); PrintStream outputPredictionResultStream = null; if (outputPredictionFile != null) { try { if (outputPredictionFile.exists()) { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile, true), true); } else { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open prediction result file: " + outputPredictionFile, ex); } }*/ boolean firstDump = true; boolean preciseCPUTiming = TimingUtils.enablePreciseTiming(); long evaluateStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); long lastEvaluateStartTime = evaluateStartTime; double RAMHours = 0.0; while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances)) && ((maxSeconds < 0) || (secondsElapsed < maxSeconds))) { Instance trainInst = stream.nextInstance(); Instance testInst = (Instance) trainInst.copy(); int trueClass = (int) trainInst.classValue(); //testInst.setClassMissing(); double[] prediction = learner.getVotesForInstance(testInst); if (prediction[0] ==1 ){ //Change detected this.getEventsList().add(new ClusterEvent(this, instancesProcessed, "Detected Change", "Drift")); } // Output prediction /* if (outputPredictionFile != null) { outputPredictionResultStream.println(Utils.maxIndex(prediction) + "," + trueClass); }*/ //evaluator.addClassificationAttempt(trueClass, prediction, testInst.weight()); evaluator.addResult(testInst, prediction); learner.trainOnInstance(trainInst); instancesProcessed++; if (instancesProcessed % this.sampleFrequencyOption.getValue() == 0 || stream.hasMoreInstances() == false) { long evaluateTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); double time = TimingUtils.nanoTimeToSeconds(evaluateTime - evaluateStartTime); double timeIncrement = TimingUtils.nanoTimeToSeconds(evaluateTime - lastEvaluateStartTime); double RAMHoursIncrement = learner.measureByteSize() / (1024.0 * 1024.0 * 1024.0); //GBs RAMHoursIncrement *= (timeIncrement / 3600.0); //Hours RAMHours += RAMHoursIncrement; lastEvaluateStartTime = evaluateTime; learningCurve.insertEntry(new LearningEvaluation( new Measurement[]{ new Measurement( "learning evaluation instances", instancesProcessed), new Measurement( "evaluation time (" + (preciseCPUTiming ? "cpu " : "") + "seconds)", time), new Measurement( "model cost (RAM-Hours)", RAMHours) }, evaluator, learner)); if (immediateResultStream != null) { if (firstDump) { immediateResultStream.println(learningCurve.headerToString()); firstDump = false; } immediateResultStream.println(learningCurve.entryToString(learningCurve.numEntries() - 1)); immediateResultStream.flush(); } } if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learningCurve.copy()); } secondsElapsed = (int) TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - evaluateStartTime); } } if (immediateResultStream != null) { immediateResultStream.close(); } /* if (outputPredictionResultStream != null) { outputPredictionResultStream.close(); }*/ return learningCurve; } }
Java
/* * EvaluateModel.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import moa.classifiers.Classifier; import moa.classifiers.Regressor; import moa.core.ObjectRepository; import moa.evaluation.ClassificationPerformanceEvaluator; import moa.evaluation.LearningEvaluation; import moa.evaluation.RegressionPerformanceEvaluator; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.IntOption; import moa.streams.InstanceStream; import weka.core.Instance; import weka.core.Utils; /** * Task for evaluating a static model on a stream. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class EvaluateModelRegression extends RegressionMainTask { @Override public String getPurposeString() { return "Evaluates a static model on a stream."; } private static final long serialVersionUID = 1L; public ClassOption modelOption = new ClassOption("model", 'm', "Classifier to evaluate.", Regressor.class, "LearnModelRegression"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to evaluate on.", InstanceStream.class, "generators.RandomTreeGenerator"); public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", RegressionPerformanceEvaluator.class, "BasicRegressionPerformanceEvaluator"); public IntOption maxInstancesOption = new IntOption("maxInstances", 'i', "Maximum number of instances to test.", 1000000, 0, Integer.MAX_VALUE); public FileOption outputPredictionFileOption = new FileOption("outputPredictionFile", 'o', "File to append output predictions to.", null, "pred", true); public EvaluateModelRegression() { } public EvaluateModelRegression(Classifier model, InstanceStream stream, ClassificationPerformanceEvaluator evaluator, int maxInstances) { this.modelOption.setCurrentObject(model); this.streamOption.setCurrentObject(stream); this.evaluatorOption.setCurrentObject(evaluator); this.maxInstancesOption.setValue(maxInstances); } @Override public Class<?> getTaskResultType() { return LearningEvaluation.class; } @Override public Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier model = (Classifier) getPreparedClassOption(this.modelOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); ClassificationPerformanceEvaluator evaluator = (ClassificationPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); int maxInstances = this.maxInstancesOption.getValue(); long instancesProcessed = 0; monitor.setCurrentActivity("Evaluating model...", -1.0); //File for output predictions File outputPredictionFile = this.outputPredictionFileOption.getFile(); PrintStream outputPredictionResultStream = null; if (outputPredictionFile != null) { try { if (outputPredictionFile.exists()) { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile, true), true); } else { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open prediction result file: " + outputPredictionFile, ex); } } while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances))) { Instance testInst = (Instance) stream.nextInstance().copy(); double trueClass = testInst.classValue(); //testInst.setClassMissing(); double[] prediction = model.getVotesForInstance(testInst); //evaluator.addClassificationAttempt(trueClass, prediction, testInst // .weight()); if (outputPredictionFile != null) { outputPredictionResultStream.println(prediction[0] + "," + trueClass); } evaluator.addResult(testInst, prediction); instancesProcessed++; if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(new LearningEvaluation( evaluator, model)); } } } if (outputPredictionResultStream != null) { outputPredictionResultStream.close(); } return new LearningEvaluation(evaluator, model); } }
Java
/* * TaskThread.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.util.concurrent.CopyOnWriteArraySet; import moa.core.ObjectRepository; import moa.core.TimingUtils; /** * Task Thread. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class TaskThread extends Thread { public static enum Status { NOT_STARTED, RUNNING, PAUSED, CANCELLING, CANCELLED, COMPLETED, FAILED } protected Task runningTask; protected volatile Status currentStatus; protected TaskMonitor taskMonitor; protected ObjectRepository repository; protected Object finalResult; protected long taskStartTime; protected long taskEndTime; protected double latestPreviewGrabTime = 0.0; CopyOnWriteArraySet<TaskCompletionListener> completionListeners = new CopyOnWriteArraySet<TaskCompletionListener>(); public TaskThread(Task toRun) { this(toRun, null); } public TaskThread(Task toRun, ObjectRepository repository) { this.runningTask = toRun; this.repository = repository; this.currentStatus = Status.NOT_STARTED; this.taskMonitor = new StandardTaskMonitor(); this.taskMonitor.setCurrentActivityDescription("Running task " + toRun); } @Override public void run() { TimingUtils.enablePreciseTiming(); this.taskStartTime = TimingUtils.getNanoCPUTimeOfThread(getId()); try { this.currentStatus = Status.RUNNING; this.finalResult = this.runningTask.doTask(this.taskMonitor, this.repository); this.currentStatus = this.taskMonitor.isCancelled() ? Status.CANCELLED : Status.COMPLETED; } catch (Throwable ex) { this.currentStatus = Status.FAILED; this.finalResult = new FailedTaskReport(ex); } this.taskEndTime = TimingUtils.getNanoCPUTimeOfThread(getId()); fireTaskCompleted(); this.taskMonitor.setLatestResultPreview(null); // free preview memory } public synchronized void pauseTask() { if (this.currentStatus == Status.RUNNING) { this.taskMonitor.requestPause(); this.currentStatus = Status.PAUSED; } } public synchronized void resumeTask() { if (this.currentStatus == Status.PAUSED) { this.taskMonitor.requestResume(); this.currentStatus = Status.RUNNING; } } public synchronized void cancelTask() { if ((this.currentStatus == Status.RUNNING) || (this.currentStatus == Status.PAUSED)) { this.taskMonitor.requestCancel(); this.currentStatus = Status.CANCELLING; } } public double getCPUSecondsElapsed() { double secondsElapsed = 0.0; if (this.currentStatus == Status.NOT_STARTED) { secondsElapsed = 0.0; } else if (isComplete()) { secondsElapsed = TimingUtils.nanoTimeToSeconds(this.taskEndTime - this.taskStartTime); } else { secondsElapsed = TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfThread(getId()) - this.taskStartTime); } return secondsElapsed > 0.0 ? secondsElapsed : 0.0; } public Task getTask() { return this.runningTask; } public String getCurrentStatusString() { switch (this.currentStatus) { case NOT_STARTED: return "not started"; case RUNNING: return "running"; case PAUSED: return "paused"; case CANCELLING: return "cancelling"; case CANCELLED: return "cancelled"; case COMPLETED: return "completed"; case FAILED: return "failed"; } return "unknown"; } public String getCurrentActivityString() { return (isComplete() || (this.currentStatus == Status.NOT_STARTED)) ? "" : this.taskMonitor.getCurrentActivityDescription(); } public double getCurrentActivityFracComplete() { switch (this.currentStatus) { case NOT_STARTED: return 0.0; case RUNNING: case PAUSED: case CANCELLING: return this.taskMonitor.getCurrentActivityFractionComplete(); case CANCELLED: case COMPLETED: case FAILED: return 1.0; } return 0.0; } public boolean isComplete() { return ((this.currentStatus == Status.CANCELLED) || (this.currentStatus == Status.COMPLETED) || (this.currentStatus == Status.FAILED)); } public Object getFinalResult() { return this.finalResult; } public void addTaskCompletionListener(TaskCompletionListener tcl) { this.completionListeners.add(tcl); } public void removeTaskCompletionListener(TaskCompletionListener tcl) { this.completionListeners.remove(tcl); } protected void fireTaskCompleted() { for (TaskCompletionListener listener : this.completionListeners) { listener.taskCompleted(this); } } public void getPreview(ResultPreviewListener previewer) { this.taskMonitor.requestResultPreview(previewer); this.latestPreviewGrabTime = getCPUSecondsElapsed(); } public Object getLatestResultPreview() { return this.taskMonitor.getLatestResultPreview(); } public double getLatestPreviewGrabTimeSeconds() { return this.latestPreviewGrabTime; } }
Java
/* * EvaluateOnlineRecommender.java * Copyright (C) 2012 Universitat Politecnica de Catalunya * @author Alex Catarineu (a.catarineu@gmail.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.evaluation.LearningCurve; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.IntOption; import moa.recommender.dataset.Dataset; import moa.recommender.predictor.RatingPredictor; import moa.recommender.rc.data.RecommenderData; /** * Test for evaluating a recommender by training and periodically testing * on samples from a rating dataset. When finished, it will show the learning * curve of the recommender rating predictor. * * <p>Parameters:</p> * <ul> * <li> d: dataset - the dataset to be used to train/test the rating predictor.</li> * <li> f: sample frequency - the frequency in which a rating from the dataset will be used to test the model </li> * </ul> * * @author Alex Catarineu (a.catarineu@gmail.com) * @version $Revision: 7 $ */ public class EvaluateOnlineRecommender extends MainTask { @Override public String getPurposeString() { return "Evaluates a online reccommender system."; } private static final long serialVersionUID = 1L; public ClassOption datasetOption = new ClassOption("dataset", 'd', "Dataset to evaluate.", Dataset.class, "moa.recommender.dataset.impl.MovielensDataset"); public ClassOption ratingPredictorOption = new ClassOption("ratingPredictor", 's', "Rating Predictor to evaluate on.", RatingPredictor.class, "moa.recommender.predictor.BRISMFPredictor"); public IntOption sampleFrequencyOption = new IntOption("sampleFrequency", 'f', "How many instances between samples of the learning performance.", 100, 0, Integer.MAX_VALUE); public EvaluateOnlineRecommender() { } @Override public Class<?> getTaskResultType() { return LearningCurve.class; } @Override public Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Dataset d = (Dataset) getPreparedClassOption(this.datasetOption); RatingPredictor rp = (RatingPredictor)getPreparedClassOption(this.ratingPredictorOption); LearningCurve learningCurve = new LearningCurve("n"); RecommenderData data = rp.getData(); data.clear(); data.disableUpdates(false); long start = System.currentTimeMillis(); long evalTime = 0; double sum = 0; int n = 0; //ArrayList<TestMetric> metrics = new ArrayList<TestMetric>(); int sampleFrequency = this.sampleFrequencyOption.getValue(); int count = 0; while (d.next()) ++count; d.reset(); while (d.next()) { Integer user = d.curUserID(); Integer item = d.curItemID(); Double rating = d.curRating(); long startPredTime = System.currentTimeMillis(); double pred = rp.predictRating(user, item); sum += Math.pow(pred - rating, 2); evalTime += System.currentTimeMillis() - startPredTime; data.setRating(user, item, rating); //System.out.println(data.countRatingsItem(item) + " " + data.countRatingsUser(user)); //if (n++%100 == 99) metrics.add(new TestMetric("RMSE (" + n +")", Math.sqrt(sum/(double)n))); n++; if (n%sampleFrequency == sampleFrequency-1) { if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivityFractionComplete((double)n/(double)count); learningCurve.insertEntry(new LearningEvaluation( new Measurement[]{ new Measurement( "n", n), new Measurement( "RMSE", Math.sqrt(sum/(double)n)), new Measurement( "trainingTime", (int)((System.currentTimeMillis() - start - evalTime)/1000)), new Measurement( "evalTime", (int)(evalTime/1000)) } )); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learningCurve.headerToString() + "\n" + learningCurve.entryToString(learningCurve.numEntries() - 1)); } } } //System.out.println(n + " " + Math.sqrt(sum/(double)n)); //metrics.add(new TestMetric("RMSE (" + n +")", Math.sqrt(sum/(double)n))); // long trainingTime = System.currentTimeMillis() - start - evalTime; //return new TestStatistics((int)(trainingTime/1000), // (int)(evalTime/1000), // metrics.toArray(new TestMetric[metrics.size()])); return learningCurve; } }
Java
/* * ResultPreviewListener.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; /** * Interface implemented by classes that preview results * on the Graphical User Interface * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface ResultPreviewListener { /** * This method is used to receive a signal from * <code>TaskMonitor</code> that the lastest preview has * changed. This method is implemented in <code>PreviewPanel</code> * to change the results that are shown in its panel. * */ public void latestPreviewChanged(); }
Java
/* * EvaluateModel.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import moa.classifiers.Classifier; import moa.core.ObjectRepository; import moa.evaluation.ClassificationPerformanceEvaluator; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.IntOption; import moa.streams.InstanceStream; import weka.core.Instance; import weka.core.Utils; /** * Task for evaluating a static model on a stream. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class EvaluateModel extends MainTask { @Override public String getPurposeString() { return "Evaluates a static model on a stream."; } private static final long serialVersionUID = 1L; public ClassOption modelOption = new ClassOption("model", 'm', "Classifier to evaluate.", Classifier.class, "LearnModel"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to evaluate on.", InstanceStream.class, "generators.RandomTreeGenerator"); public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", ClassificationPerformanceEvaluator.class, "BasicClassificationPerformanceEvaluator"); public IntOption maxInstancesOption = new IntOption("maxInstances", 'i', "Maximum number of instances to test.", 1000000, 0, Integer.MAX_VALUE); public FileOption outputPredictionFileOption = new FileOption("outputPredictionFile", 'o', "File to append output predictions to.", null, "pred", true); public EvaluateModel() { } public EvaluateModel(Classifier model, InstanceStream stream, ClassificationPerformanceEvaluator evaluator, int maxInstances) { this.modelOption.setCurrentObject(model); this.streamOption.setCurrentObject(stream); this.evaluatorOption.setCurrentObject(evaluator); this.maxInstancesOption.setValue(maxInstances); } @Override public Class<?> getTaskResultType() { return LearningEvaluation.class; } @Override public Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier model = (Classifier) getPreparedClassOption(this.modelOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); ClassificationPerformanceEvaluator evaluator = (ClassificationPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); int maxInstances = this.maxInstancesOption.getValue(); long instancesProcessed = 0; monitor.setCurrentActivity("Evaluating model...", -1.0); //File for output predictions File outputPredictionFile = this.outputPredictionFileOption.getFile(); PrintStream outputPredictionResultStream = null; if (outputPredictionFile != null) { try { if (outputPredictionFile.exists()) { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile, true), true); } else { outputPredictionResultStream = new PrintStream( new FileOutputStream(outputPredictionFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open prediction result file: " + outputPredictionFile, ex); } } while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances))) { Instance testInst = (Instance) stream.nextInstance().copy(); int trueClass = (int) testInst.classValue(); //testInst.setClassMissing(); double[] prediction = model.getVotesForInstance(testInst); //evaluator.addClassificationAttempt(trueClass, prediction, testInst // .weight()); if (outputPredictionFile != null) { outputPredictionResultStream.println(Utils.maxIndex(prediction) + "," + trueClass); } evaluator.addResult(testInst, prediction); instancesProcessed++; if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(new LearningEvaluation( evaluator, model)); } } } if (outputPredictionResultStream != null) { outputPredictionResultStream.close(); } return new LearningEvaluation(evaluator, model); } }
Java
/* * EvaluatePeriodicHeldOutTest.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Ammar Shaker (shaker@mathematik.uni-marburg.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import java.util.ArrayList; import java.util.List; import moa.classifiers.Classifier; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.StringUtils; import moa.core.TimingUtils; import moa.evaluation.ClassificationPerformanceEvaluator; import moa.evaluation.LearningCurve; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.FlagOption; import moa.options.IntOption; import moa.streams.CachedInstancesStream; import moa.streams.InstanceStream; import weka.core.Instance; import weka.core.Instances; /** * Task for evaluating a classifier on a stream by periodically testing on a heldout set. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class EvaluatePeriodicHeldOutTest extends MainTask { @Override public String getPurposeString() { return "Evaluates a classifier on a stream by periodically testing on a heldout set."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Classifier to train.", Classifier.class, "trees.HoeffdingTree"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", InstanceStream.class, "generators.RandomTreeGenerator"); public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", ClassificationPerformanceEvaluator.class, "BasicClassificationPerformanceEvaluator"); public IntOption testSizeOption = new IntOption("testSize", 'n', "Number of testing examples.", 1000000, 0, Integer.MAX_VALUE); public IntOption trainSizeOption = new IntOption("trainSize", 'i', "Number of training examples, <1 = unlimited.", 0, 0, Integer.MAX_VALUE); public IntOption trainTimeOption = new IntOption("trainTime", 't', "Number of training seconds.", 10 * 60 * 60, 0, Integer.MAX_VALUE); public IntOption sampleFrequencyOption = new IntOption( "sampleFrequency", 'f', "Number of training examples between samples of learning performance.", 100000, 0, Integer.MAX_VALUE); public FileOption dumpFileOption = new FileOption("dumpFile", 'd', "File to append intermediate csv results to.", null, "csv", true); public FlagOption cacheTestOption = new FlagOption("cacheTest", 'c', "Cache test instances in memory."); @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier learner = (Classifier) getPreparedClassOption(this.learnerOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); ClassificationPerformanceEvaluator evaluator = (ClassificationPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); learner.setModelContext(stream.getHeader()); long instancesProcessed = 0; LearningCurve learningCurve = new LearningCurve("evaluation instances"); File dumpFile = this.dumpFileOption.getFile(); PrintStream immediateResultStream = null; if (dumpFile != null) { try { if (dumpFile.exists()) { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile, true), true); } else { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open immediate result file: " + dumpFile, ex); } } boolean firstDump = true; InstanceStream testStream = null; int testSize = this.testSizeOption.getValue(); if (this.cacheTestOption.isSet()) { monitor.setCurrentActivity("Caching test examples...", -1.0); Instances testInstances = new Instances(stream.getHeader(), this.testSizeOption.getValue()); while (testInstances.numInstances() < testSize) { testInstances.add(stream.nextInstance()); if (testInstances.numInstances() % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivityFractionComplete((double) testInstances.numInstances() / (double) (this.testSizeOption.getValue())); } } testStream = new CachedInstancesStream(testInstances); } else { //testStream = (InstanceStream) stream.copy(); testStream = stream; /*monitor.setCurrentActivity("Skipping test examples...", -1.0); for (int i = 0; i < testSize; i++) { stream.nextInstance(); }*/ } instancesProcessed = 0; TimingUtils.enablePreciseTiming(); double totalTrainTime = 0.0; while ((this.trainSizeOption.getValue() < 1 || instancesProcessed < this.trainSizeOption.getValue()) && stream.hasMoreInstances() == true) { monitor.setCurrentActivityDescription("Training..."); long instancesTarget = instancesProcessed + this.sampleFrequencyOption.getValue(); long trainStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); while (instancesProcessed < instancesTarget && stream.hasMoreInstances() == true) { learner.trainOnInstance(stream.nextInstance()); instancesProcessed++; if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivityFractionComplete((double) (instancesProcessed) / (double) (this.trainSizeOption.getValue())); } } double lastTrainTime = TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - trainStartTime); totalTrainTime += lastTrainTime; if (totalTrainTime > this.trainTimeOption.getValue()) { break; } if (this.cacheTestOption.isSet()) { testStream.restart(); } evaluator.reset(); long testInstancesProcessed = 0; monitor.setCurrentActivityDescription("Testing (after " + StringUtils.doubleToString( ((double) (instancesProcessed) / (double) (this.trainSizeOption.getValue()) * 100.0), 2) + "% training)..."); long testStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); int instCount = 0 ; for (instCount = 0; instCount < testSize; instCount++) { if (stream.hasMoreInstances() == false) { break; } Instance testInst = (Instance) testStream.nextInstance().copy(); double trueClass = testInst.classValue(); testInst.setClassMissing(); double[] prediction = learner.getVotesForInstance(testInst); testInst.setClassValue(trueClass); evaluator.addResult(testInst, prediction); testInstancesProcessed++; if (testInstancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivityFractionComplete((double) testInstancesProcessed / (double) (testSize)); } } if ( instCount != testSize) { break; } double testTime = TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - testStartTime); List<Measurement> measurements = new ArrayList<Measurement>(); measurements.add(new Measurement("evaluation instances", instancesProcessed)); measurements.add(new Measurement("total train time", totalTrainTime)); measurements.add(new Measurement("total train speed", instancesProcessed / totalTrainTime)); measurements.add(new Measurement("last train time", lastTrainTime)); measurements.add(new Measurement("last train speed", this.sampleFrequencyOption.getValue() / lastTrainTime)); measurements.add(new Measurement("test time", testTime)); measurements.add(new Measurement("test speed", this.testSizeOption.getValue() / testTime)); Measurement[] performanceMeasurements = evaluator.getPerformanceMeasurements(); for (Measurement measurement : performanceMeasurements) { measurements.add(measurement); } Measurement[] modelMeasurements = learner.getModelMeasurements(); for (Measurement measurement : modelMeasurements) { measurements.add(measurement); } learningCurve.insertEntry(new LearningEvaluation(measurements.toArray(new Measurement[measurements.size()]))); if (immediateResultStream != null) { if (firstDump) { immediateResultStream.println(learningCurve.headerToString()); firstDump = false; } immediateResultStream.println(learningCurve.entryToString(learningCurve.numEntries() - 1)); immediateResultStream.flush(); } if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learningCurve.copy()); } // if (learner instanceof HoeffdingTree // || learner instanceof HoeffdingOptionTree) { // int numActiveNodes = (int) Measurement.getMeasurementNamed( // "active learning leaves", // modelMeasurements).getValue(); // // exit if tree frozen // if (numActiveNodes < 1) { // break; // } // int numNodes = (int) Measurement.getMeasurementNamed( // "tree size (nodes)", modelMeasurements) // .getValue(); // if (numNodes == lastNumNodes) { // noGrowthCount++; // } else { // noGrowthCount = 0; // } // lastNumNodes = numNodes; // } else if (learner instanceof OzaBoost || learner instanceof // OzaBag) { // double numActiveNodes = Measurement.getMeasurementNamed( // "[avg] active learning leaves", // modelMeasurements).getValue(); // // exit if all trees frozen // if (numActiveNodes == 0.0) { // break; // } // int numNodes = (int) (Measurement.getMeasurementNamed( // "[avg] tree size (nodes)", // learner.getModelMeasurements()).getValue() * Measurement // .getMeasurementNamed("ensemble size", // modelMeasurements).getValue()); // if (numNodes == lastNumNodes) { // noGrowthCount++; // } else { // noGrowthCount = 0; // } // lastNumNodes = numNodes; // } } if (immediateResultStream != null) { immediateResultStream.close(); } return learningCurve; } @Override public Class<?> getTaskResultType() { return LearningCurve.class; } }
Java
/* * FailedTaskReport.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.PrintWriter; import java.io.StringWriter; import moa.AbstractMOAObject; import moa.core.StringUtils; /** * Class for reporting a failed task. * <code>TaskThread</code> returns this class as final result object when a task fails. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class FailedTaskReport extends AbstractMOAObject { private static final long serialVersionUID = 1L; protected Throwable failureReason; public FailedTaskReport(Throwable failureReason) { this.failureReason = failureReason; } public Throwable getFailureReason() { return this.failureReason; } @Override public void getDescription(StringBuilder sb, int indent) { sb.append("Failure reason: "); sb.append(this.failureReason.getMessage()); StringUtils.appendNewlineIndented(sb, indent, "*** STACK TRACE ***"); StringWriter stackTraceWriter = new StringWriter(); this.failureReason.printStackTrace(new PrintWriter(stackTraceWriter)); sb.append(stackTraceWriter.toString()); } }
Java
/* * MeasureStreamSpeed.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.TimingUtils; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.IntOption; import moa.streams.InstanceStream; /** * Task for measuring the speed of the stream. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class MeasureStreamSpeed extends MainTask { @Override public String getPurposeString() { return "Measures the speed of a stream."; } private static final long serialVersionUID = 1L; public ClassOption streamOption = new ClassOption("stream", 's', "Stream to measure.", InstanceStream.class, "generators.RandomTreeGenerator"); public IntOption generateSizeOption = new IntOption("generateSize", 'g', "Number of examples.", 10000000, 0, Integer.MAX_VALUE); @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { TimingUtils.enablePreciseTiming(); int numInstances = 0; InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); long genStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); while (numInstances < this.generateSizeOption.getValue()) { stream.nextInstance(); numInstances++; } double genTime = TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - genStartTime); return new LearningEvaluation( new Measurement[]{ new Measurement("Number of instances generated", numInstances), new Measurement("Time elapsed", genTime), new Measurement("Instances per second", numInstances / genTime)}); } @Override public Class<?> getTaskResultType() { return LearningEvaluation.class; } }
Java
/** * EvaluateClustering.java * * @author Albert Bifet (abifet@cs.waikato.ac.nz) * @editor Yunsu Kim * * Last edited: 2013/06/02 */ package moa.tasks; import moa.clusterers.AbstractClusterer; import moa.core.ObjectRepository; import moa.evaluation.LearningCurve; import moa.gui.BatchCmd; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.IntOption; import moa.streams.clustering.ClusteringStream; /** * Task for evaluating a clusterer on a stream. * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class EvaluateClustering extends MainTask { @Override public String getPurposeString() { return "Evaluates a clusterer on a stream."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Clusterer to train.", AbstractClusterer.class, "clustream.Clustream"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", ClusteringStream.class, "RandomRBFGeneratorEvents"); public IntOption instanceLimitOption = new IntOption("instanceLimit", 'i', "Maximum number of instances to test/train on (-1 = no limit).", 100000, -1, Integer.MAX_VALUE); public IntOption measureCollectionTypeOption = new IntOption( "measureCollectionType", 'm', "Type of measure collection", 0, 0, Integer.MAX_VALUE); /*public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Performance evaluation method.", LearningPerformanceEvaluator.class, "BasicClusteringPerformanceEvaluator");*/ /*public IntOption timeLimitOption = new IntOption("timeLimit", 't', "Maximum number of seconds to test/train for (-1 = no limit).", -1, -1, Integer.MAX_VALUE); public IntOption sampleFrequencyOption = new IntOption("sampleFrequency", 'f', "How many instances between samples of the learning performance.", 100000, 0, Integer.MAX_VALUE); public IntOption maxMemoryOption = new IntOption("maxMemory", 'b', "Maximum size of model (in bytes). -1 = no limit.", -1, -1, Integer.MAX_VALUE); public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE);*/ public FileOption dumpFileOption = new FileOption("dumpFile", 'd', "File to append intermediate csv reslts to.", "dumpClustering.csv", "csv", true); @Override public Class<?> getTaskResultType() { return LearningCurve.class; } @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { BatchCmd.runBatch((ClusteringStream) getPreparedClassOption(this.streamOption), (AbstractClusterer) getPreparedClassOption(this.learnerOption), (int) measureCollectionTypeOption.getValue(), (int) this.instanceLimitOption.getValue(), (String) dumpFileOption.getValue()); LearningCurve learningCurve = new LearningCurve("EvaluateClustering does not support custom output file (> [filename]).\n" + "Check out the dump file to see the results (if you haven't specified, dumpClustering.csv by default)."); //System.out.println(learner.toString()); return learningCurve; } }
Java
/* * LearnModelRegression.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.classifiers.Classifier; import moa.classifiers.Regressor; import moa.core.ObjectRepository; import moa.options.ClassOption; import moa.options.IntOption; import moa.streams.InstanceStream; /** * Task for learning a model without any evaluation. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class LearnModelRegression extends RegressionMainTask { @Override public String getPurposeString() { return "Learns a model from a stream."; } private static final long serialVersionUID = 1L; public ClassOption learnerOption = new ClassOption("learner", 'l', "Classifier to train.", Regressor.class, "trees.FIMTDD"); public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", InstanceStream.class, "generators.RandomTreeGenerator"); public IntOption maxInstancesOption = new IntOption("maxInstances", 'm', "Maximum number of instances to train on per pass over the data.", 10000000, 0, Integer.MAX_VALUE); public IntOption numPassesOption = new IntOption("numPasses", 'p', "The number of passes to do over the data.", 1, 1, Integer.MAX_VALUE); public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE); public LearnModelRegression() { } public LearnModelRegression(Classifier learner, InstanceStream stream, int maxInstances, int numPasses) { this.learnerOption.setCurrentObject(learner); this.streamOption.setCurrentObject(stream); this.maxInstancesOption.setValue(maxInstances); this.numPassesOption.setValue(numPasses); } @Override public Class<?> getTaskResultType() { return Regressor.class; } @Override public Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier learner = (Classifier) getPreparedClassOption(this.learnerOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); learner.setModelContext(stream.getHeader()); int numPasses = this.numPassesOption.getValue(); int maxInstances = this.maxInstancesOption.getValue(); for (int pass = 0; pass < numPasses; pass++) { long instancesProcessed = 0; monitor.setCurrentActivity("Training learner" + (numPasses > 1 ? (" (pass " + (pass + 1) + "/" + numPasses + ")") : "") + "...", -1.0); if (pass > 0) { stream.restart(); } while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances))) { learner.trainOnInstance(stream.nextInstance()); instancesProcessed++; if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learner.copy()); } } } } learner.setModelContext(stream.getHeader()); return learner; } }
Java
/* * TaskCompletionListener.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; /** * Interface representing a listener for the task in TaskThread to be completed. * TaskThread fires that the task is completed * to all its listeners when it finishes to run its task. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface TaskCompletionListener { /** * The method to perform when the task finishes. * * @param task the TaskThead that this listener is listening */ public void taskCompleted(TaskThread task); }
Java
/* * RunTasks.java * Copyright (C) 2011 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.MOAObject; import moa.core.ObjectRepository; import moa.options.ClassOption; import moa.options.FloatOption; import moa.options.StringOption; /** * Task for running several experiments modifying values of parameters. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class RunStreamTasks extends ConceptDriftMainTask { @Override public String getPurposeString() { return "Runs several experiments modifying values of parameters."; } private static final long serialVersionUID = 1L; public ClassOption taskOption = new ClassOption("task", 't', "Task to do.", Task.class, "EvaluatePrequential -l active.ActiveClassifier -i 1000000 -d temp.txt"); public StringOption streamParameterOption = new StringOption("streamParameter", 'p', "Stream parameter to vary.", "b"); public FloatOption firstValueOption = new FloatOption("firstValue", 'f', "First value", 0.0); public FloatOption lastValueOption = new FloatOption("lastValue", 'l', "Last value", 1.0); public FloatOption incrementValueOption = new FloatOption("incrementValue", 'i', "Increment value", 0.1); @Override public Class<?> getTaskResultType() { return this.task.getTaskResultType(); } protected Task task; @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Object result = null; Task taskBase = (Task) getPreparedClassOption(this.taskOption); String commandString = this.taskOption.getValueAsCLIString(); //for each possible value of the parameter for (int valueParameter = (int) this.firstValueOption.getValue(); valueParameter <= this.lastValueOption.getValue(); valueParameter += (int) this.incrementValueOption.getValue()) { //Add parameter this.task = (Task) ((MOAObject) taskBase).copy(); if (this.task instanceof EvaluateConceptDrift) { String stream = ((EvaluateConceptDrift) this.task).streamOption.getValueAsCLIString(); ((EvaluateConceptDrift) this.task).streamOption.setValueViaCLIString(stream + " -" + streamParameterOption.getValue() + " " + valueParameter); } //Run task result = this.task.doTask(monitor, repository); //System.out.println(((AbstractOptionHandler) this.task).getCLICreationString(Task.class)); } return result; } }
Java
/* * RunTasks.java * Copyright (C) 2011 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.core.ObjectRepository; import moa.options.ClassOption; import moa.options.FloatOption; import moa.options.StringOption; /** * Task for running several experiments modifying values of parameters. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class RunTasks extends MainTask { @Override public String getPurposeString() { return "Runs several experiments modifying values of parameters."; } private static final long serialVersionUID = 1L; public ClassOption taskOption = new ClassOption("task", 't', "Task to do.", Task.class, "EvaluatePrequential -l active.ActiveClassifier -i 1000000 -d temp.txt"); public StringOption classifierParameterOption = new StringOption("classifierParameter", 'p', "Classifier parameter to vary.", "b"); public FloatOption firstValueOption = new FloatOption("firstValue", 'f', "First value", 0.0); public FloatOption lastValueOption = new FloatOption("lastValue", 'l', "Last value", 1.0); public FloatOption incrementValueOption = new FloatOption("incrementValue", 'i', "Increment value", 0.1); @Override public Class<?> getTaskResultType() { return this.task.getTaskResultType(); } protected Task task; @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Object result = null; String commandString = this.taskOption.getValueAsCLIString(); //for each possible value of the parameter for (double valueParameter = this.firstValueOption.getValue(); valueParameter <= this.lastValueOption.getValue(); valueParameter += this.incrementValueOption.getValue()) { //Add parameter this.task = (Task) getPreparedClassOption(this.taskOption); if (this.task instanceof EvaluatePrequential) { String classifier = ((EvaluatePrequential) this.task).learnerOption.getValueAsCLIString(); ((EvaluatePrequential) this.task).learnerOption.setValueViaCLIString(classifier + " -" + classifierParameterOption.getValue() + " " + valueParameter); } if (this.task instanceof EvaluateInterleavedTestThenTrain) { String classifier = ((EvaluateInterleavedTestThenTrain) this.task).learnerOption.getValueAsCLIString(); ((EvaluateInterleavedTestThenTrain) this.task).learnerOption.setValueViaCLIString(classifier + " -" + classifierParameterOption.getValue() + " " + valueParameter); } //Run task result = this.task.doTask(monitor, repository); //System.out.println(((AbstractOptionHandler) this.task).getCLICreationString(Task.class)); } return result; } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package moa.tasks; import java.util.ArrayList; import moa.streams.InstanceStream; import moa.streams.clustering.ClusterEvent; /** * * @author albert */ public abstract class ConceptDriftMainTask extends MainTask { protected ArrayList<ClusterEvent> events; protected void setEventsList(ArrayList<ClusterEvent> events) { this.events = events; } public ArrayList<ClusterEvent> getEventsList() { return this.events; } }
Java
/* * EvaluateInterleavedChunks.java * Copyright (C) 2010 Poznan University of Technology, Poznan, Poland * @author Dariusz Brzezinski (dariusz.brzezinski@cs.put.poznan.pl) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import moa.classifiers.Classifier; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.TimingUtils; import moa.evaluation.ClassificationPerformanceEvaluator; import moa.evaluation.LearningCurve; import moa.evaluation.LearningEvaluation; import moa.options.ClassOption; import moa.options.FileOption; import moa.options.IntOption; import moa.streams.InstanceStream; import weka.core.Instance; import weka.core.Instances; public class EvaluateInterleavedChunks extends MainTask { @Override public String getPurposeString() { return "Evaluates a classifier on a stream by testing then training with chunks of data in sequence."; } private static final long serialVersionUID = 1L; /** * Allows to select the trained classifier. */ public ClassOption learnerOption = new ClassOption("learner", 'l', "Classifier to train.", Classifier.class, "bayes.NaiveBayes"); /** * Allows to select the stream the classifier will learn. */ public ClassOption streamOption = new ClassOption("stream", 's', "Stream to learn from.", InstanceStream.class, "generators.RandomTreeGenerator"); /** * Allows to select the classifier performance evaluation method. */ public ClassOption evaluatorOption = new ClassOption("evaluator", 'e', "Classification performance evaluation method.", ClassificationPerformanceEvaluator.class, "BasicClassificationPerformanceEvaluator"); /** * Allows to define the maximum number of instances to test/train on (-1 = no limit). */ public IntOption instanceLimitOption = new IntOption("instanceLimit", 'i', "Maximum number of instances to test/train on (-1 = no limit).", 100000000, -1, Integer.MAX_VALUE); /** * Allow to define the training/testing chunk size. */ public IntOption chunkSizeOption = new IntOption("chunkSize", 'c', "Number of instances in a data chunk.", 1000, 1, Integer.MAX_VALUE); /** * Allows to define the maximum number of seconds to test/train for (-1 = no limit). */ public IntOption timeLimitOption = new IntOption("timeLimit", 't', "Maximum number of seconds to test/train for (-1 = no limit).", -1, -1, Integer.MAX_VALUE); /** * Defines how often classifier parameters will be calculated. */ public IntOption sampleFrequencyOption = new IntOption("sampleFrequency", 'f', "How many instances between samples of the learning performance.", 100000, 0, Integer.MAX_VALUE); /** * Allows to define the memory limit for the created model. */ public IntOption maxMemoryOption = new IntOption("maxMemory", 'b', "Maximum size of model (in bytes). -1 = no limit.", -1, -1, Integer.MAX_VALUE); /** * Allows to define the frequency of memory checks. */ public IntOption memCheckFrequencyOption = new IntOption( "memCheckFrequency", 'q', "How many instances between memory bound checks.", 100000, 0, Integer.MAX_VALUE); /** * Allows to define the output file name and location. */ public FileOption dumpFileOption = new FileOption("dumpFile", 'd', "File to append intermediate csv reslts to.", null, "csv", true); /** * Defines the task's result type. */ public Class<?> getTaskResultType() { return LearningCurve.class; } @Override protected Object doMainTask(TaskMonitor monitor, ObjectRepository repository) { Classifier learner = (Classifier) getPreparedClassOption(this.learnerOption); InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); ClassificationPerformanceEvaluator evaluator = (ClassificationPerformanceEvaluator) getPreparedClassOption(this.evaluatorOption); learner.setModelContext(stream.getHeader()); int maxInstances = this.instanceLimitOption.getValue(); int chunkSize = this.chunkSizeOption.getValue(); long instancesProcessed = 0; int maxSeconds = this.timeLimitOption.getValue(); int secondsElapsed = 0; monitor.setCurrentActivity("Evaluating learner...", -1.0); LearningCurve learningCurve = new LearningCurve( "learning evaluation instances"); File dumpFile = this.dumpFileOption.getFile(); PrintStream immediateResultStream = null; if (dumpFile != null) { try { if (dumpFile.exists()) { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile, true), true); } else { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile), true); } } catch (Exception ex) { throw new RuntimeException( "Unable to open immediate result file: " + dumpFile, ex); } } boolean firstDump = true; boolean firstChunk = true; boolean preciseCPUTiming = TimingUtils.enablePreciseTiming(); long evaluateStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); long sampleTestTime =0, sampleTrainTime = 0; double RAMHours = 0.0; while (stream.hasMoreInstances() && ((maxInstances < 0) || (instancesProcessed < maxInstances)) && ((maxSeconds < 0) || (secondsElapsed < maxSeconds))) { Instances chunkInstances = new Instances(stream.getHeader(), chunkSize); while (stream.hasMoreInstances() && chunkInstances.numInstances() < chunkSize) { chunkInstances.add(stream.nextInstance()); if (chunkInstances.numInstances() % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor.setCurrentActivityFractionComplete((double) instancesProcessed/ (double) (instancesProcessed + estimatedRemainingInstances)); } } ////Testing long testStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); if(!firstChunk) { for (int i=0; i< chunkInstances.numInstances(); i++) { Instance testInst = (Instance) chunkInstances.instance(i).copy(); //testInst.setClassMissing(); double[] prediction = learner.getVotesForInstance(testInst); evaluator.addResult(testInst, prediction); } } else { firstChunk = false; } sampleTestTime += TimingUtils.getNanoCPUTimeOfCurrentThread() - testStartTime; ////Training long trainStartTime = TimingUtils.getNanoCPUTimeOfCurrentThread(); for (int i=0; i< chunkInstances.numInstances(); i++) { learner.trainOnInstance(chunkInstances.instance(i)); instancesProcessed++; } sampleTrainTime += TimingUtils.getNanoCPUTimeOfCurrentThread() - trainStartTime; ////Result output if (instancesProcessed % this.sampleFrequencyOption.getValue() == 0) { double RAMHoursIncrement = learner.measureByteSize() / (1024.0 * 1024.0 * 1024.0); //GBs RAMHoursIncrement *= (TimingUtils.nanoTimeToSeconds(sampleTrainTime + sampleTestTime) / 3600.0); //Hours RAMHours += RAMHoursIncrement; double avgTrainTime = TimingUtils.nanoTimeToSeconds(sampleTrainTime)/((double)this.sampleFrequencyOption.getValue()/chunkInstances.numInstances()); double avgTestTime = TimingUtils.nanoTimeToSeconds(sampleTestTime)/((double)this.sampleFrequencyOption.getValue()/chunkInstances.numInstances()); sampleTestTime = 0; sampleTrainTime = 0; learningCurve.insertEntry(new LearningEvaluation( new Measurement[] { new Measurement("learning evaluation instances", instancesProcessed), new Measurement(("evaluation time ("+ (preciseCPUTiming ? "cpu " : "") + "seconds)"),TimingUtils.nanoTimeToSeconds(TimingUtils.getNanoCPUTimeOfCurrentThread() - evaluateStartTime)), new Measurement("average chunk train time", avgTrainTime), new Measurement("average chunk train speed", chunkInstances.numInstances() / avgTrainTime), new Measurement("average chunk test time", avgTestTime), new Measurement("average chunk test speed", chunkInstances.numInstances()/ avgTestTime), new Measurement( "model cost (RAM-Hours)", RAMHours)}, evaluator, learner)); if (immediateResultStream != null) { if (firstDump) { immediateResultStream.println(learningCurve .headerToString()); firstDump = false; } immediateResultStream.println(learningCurve .entryToString(learningCurve.numEntries() - 1)); immediateResultStream.flush(); } } ////Memory testing if (instancesProcessed % INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream .estimatedRemainingInstances(); if (maxInstances > 0) { long maxRemaining = maxInstances - instancesProcessed; if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } } monitor .setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) instancesProcessed / (double) (instancesProcessed + estimatedRemainingInstances)); if (monitor.resultPreviewRequested()) { monitor.setLatestResultPreview(learningCurve.copy()); } secondsElapsed = (int) TimingUtils .nanoTimeToSeconds(TimingUtils .getNanoCPUTimeOfCurrentThread() - evaluateStartTime); } } if (immediateResultStream != null) { immediateResultStream.close(); } return learningCurve; } }
Java
/* * Task.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.MOAObject; import moa.core.ObjectRepository; /** * Interface representing a task. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface Task extends MOAObject { /** * Gets the result type of this task. * Tasks can return LearningCurve, LearningEvaluation, * Classifier, String, Instances.. * * @return a class object of the result of this task */ public Class<?> getTaskResultType(); /** * This method performs this task, * when TaskMonitor and ObjectRepository are no needed. * * @return an object with the result of this task */ public Object doTask(); /** * This method performs this task. * <code>AbstractTask</code> implements this method so all * its extensions only need to implement <code>doTaskImpl</code> * * @param monitor the TaskMonitor to use * @param repository the ObjectRepository to use * @return an object with the result of this task */ public Object doTask(TaskMonitor monitor, ObjectRepository repository); }
Java
/* * MainTask.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.io.File; import java.io.IOException; import java.io.Serializable; import moa.core.ObjectRepository; import moa.core.SerializeUtils; import moa.options.FileOption; /** * Abstract Main Task. All tasks that want to write their result * to a file must extend this class. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public abstract class MainTask extends AbstractTask { private static final long serialVersionUID = 1L; /** The number of instances between monitor updates. */ protected static final int INSTANCES_BETWEEN_MONITOR_UPDATES = 10; /** File option to save the final result of the task to. */ public FileOption outputFileOption = new FileOption("taskResultFile", 'O', "File to save the final result of the task to.", null, "moa", true); @Override protected Object doTaskImpl(TaskMonitor monitor, ObjectRepository repository) { Object result = doMainTask(monitor, repository); if (monitor.taskShouldAbort()) { return null; } File outputFile = this.outputFileOption.getFile(); if (outputFile != null) { if (result instanceof Serializable) { monitor.setCurrentActivity("Saving result of task " + getTaskName() + " to file " + outputFile + "...", -1.0); try { SerializeUtils.writeToFile(outputFile, (Serializable) result); } catch (IOException ioe) { throw new RuntimeException("Failed writing result of task " + getTaskName() + " to file " + outputFile, ioe); } } else { throw new RuntimeException("Result of task " + getTaskName() + " is not serializable, so cannot be written to file " + outputFile); } } return result; } /** * This method performs this task. * <code>AbstractTask</code> implements <code>doTask</code>, * that uses <code>doTaskImpl</code>. * <code>MainTask</code> implements <code>doTaskImpl</code> using * <code>doMainTask</code> so its extensions only need to implement * <code>doMainTask</code>. * * @param monitor the TaskMonitor to use * @param repository the ObjectRepository to use * @return an object with the result of this task */ protected abstract Object doMainTask(TaskMonitor monitor, ObjectRepository repository); }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package moa.tasks; import java.util.ArrayList; import moa.streams.InstanceStream; import moa.streams.clustering.ClusterEvent; /** * * @author albert */ public abstract class RegressionMainTask extends MainTask { protected ArrayList<ClusterEvent> events; protected void setEventsList(ArrayList<ClusterEvent> events) { this.events = events; } public ArrayList<ClusterEvent> getEventsList() { return this.events; } }
Java
/* * CacheShuffledStream.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import java.util.Random; import weka.core.Instances; import moa.core.ObjectRepository; import moa.options.ClassOption; import moa.options.IntOption; import moa.streams.CachedInstancesStream; import moa.streams.InstanceStream; /** * Task for storing and shuffling examples in memory. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class CacheShuffledStream extends AbstractTask { @Override public String getPurposeString() { return "Stores and shuffles examples in memory."; } private static final long serialVersionUID = 1L; public ClassOption streamOption = new ClassOption("stream", 's', "Stream to cache and shuffle.", InstanceStream.class, "generators.RandomTreeGenerator"); public IntOption maximumCacheSizeOption = new IntOption("maximumCacheSize", 'm', "Maximum number of instances to cache.", 1000000, 1, Integer.MAX_VALUE); public IntOption shuffleRandomSeedOption = new IntOption( "shuffleRandomSeed", 'r', "Seed for random shuffling of instances.", 1); @Override protected Object doTaskImpl(TaskMonitor monitor, ObjectRepository repository) { InstanceStream stream = (InstanceStream) getPreparedClassOption(this.streamOption); Instances cache = new Instances(stream.getHeader(), 0); monitor.setCurrentActivity("Caching instances...", -1.0); while ((cache.numInstances() < this.maximumCacheSizeOption.getValue()) && stream.hasMoreInstances()) { cache.add(stream.nextInstance()); if (cache.numInstances() % MainTask.INSTANCES_BETWEEN_MONITOR_UPDATES == 0) { if (monitor.taskShouldAbort()) { return null; } long estimatedRemainingInstances = stream.estimatedRemainingInstances(); long maxRemaining = this.maximumCacheSizeOption.getValue() - cache.numInstances(); if ((estimatedRemainingInstances < 0) || (maxRemaining < estimatedRemainingInstances)) { estimatedRemainingInstances = maxRemaining; } monitor.setCurrentActivityFractionComplete(estimatedRemainingInstances < 0 ? -1.0 : (double) cache.numInstances() / (double) (cache.numInstances() + estimatedRemainingInstances)); } } monitor.setCurrentActivity("Shuffling instances...", -1.0); cache.randomize(new Random(this.shuffleRandomSeedOption.getValue())); return new CachedInstancesStream(cache); } public Class<?> getTaskResultType() { return CachedInstancesStream.class; } }
Java
/* * AbstractTask.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.tasks; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; /** * Abstract Task. All runnable tasks in MOA extend this class. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public abstract class AbstractTask extends AbstractOptionHandler implements Task { /** * Gets the name of this task. * * @return the name of this task */ public String getTaskName() { return this.getClass().getSimpleName(); } @Override public Object doTask() { return doTask(new NullMonitor(), null); } @Override public Object doTask(TaskMonitor monitor, ObjectRepository repository) { monitor.setCurrentActivity("Preparing options to " + getTaskName() + "...", -1.0); prepareClassOptions(monitor, repository); if (monitor.taskShouldAbort()) { return null; } monitor.setCurrentActivity("Doing task " + getTaskName() + "...", -1.0); Object result = doTaskImpl(monitor, repository); monitor.setCurrentActivity("Task " + getTaskName() + " complete.", 1.0); this.classOptionNamesToPreparedObjects = null; // clean up refs return result; } /** * This method performs this task. * <code>AbstractTask</code> implements <code>doTask</code> so all * its extensions only need to implement <code>doTaskImpl</code>. * * @param monitor the TaskMonitor to use * @param repository the ObjectRepository to use * @return an object with the result of this task */ protected abstract Object doTaskImpl(TaskMonitor monitor, ObjectRepository repository); @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // tasks prepare themselves upon running } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } }
Java
/* * Predicates.java * Copyright (C) 2012 University of Porto, Portugal * @author P. Kosina, E. Almeida, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules; /** * Class that creates and evaluates the predicates * * <p>Learning Decision Rules from Data Streams, IJCAI 2011, J. Gama, P. Kosina </p> * * @author P. Kosina, E. Almeida, J. Gama * @version $Revision: 2 $ * */ import moa.AbstractMOAObject; import weka.core.Instance; //import samoa.instances.Instance; public class Predicates extends AbstractMOAObject{ private static final long serialVersionUID = 1L; private double attributeValue; private double symbol; private double value; public Predicates(double attribVal, double symb,double val){ this.attributeValue = attribVal; this.symbol = symb; this.value = val; } public double getAttributeValue() { return this.attributeValue; } public double getSymbol() { return this.symbol; } public double getValue() { return this.value; } public void setAttributeValue(double attributeValue) { this.attributeValue = attributeValue; } public void setSymbol(double symbol) { this.symbol = symbol; } public void setValue(double value) { this.value = value; } public boolean evaluate(Instance inst) { boolean result = false; double attributeValue = inst.value((int) this.attributeValue); if (this.symbol == 0.0 && attributeValue == this.value) { result = true; } else if (this.symbol == -1.0 && attributeValue <= this.value) { result = true; } else if (this.symbol == 1.0 && attributeValue > this.value) { result = true; } return result; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } }
Java
/* * AMRulesRegressor.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules; /** * Adaptive Model Rules (AMRules), the streaming rule learning algorithm for regression problems. * * @author A. Bifet, J. Duarte, J. Gama (jgama@fep.up.pt) * @version $Revision: 2 $* * * This algorithm learn ordered and unordered rule set from data stream. Each rule in AMRules use a * Page-Hinkley test to detect changes in the processing generating data and react to changes by pruning the rule set. * This algorithm also does the detection of anomalies. * * <p>Learning Adaptive Model Rules from High-Speed Data Streams, ECML 2013, E. Almeida, C. Ferreira, and J. Gama; </p> * Project Knowledge Discovery from Data Streams, FCT LIAAD-INESC TEC,. * * <p>Parameters:</p> * <ul> * <li> Hoeffding Bound parameters</li> * <li> -c: split Confidence </li> * <li> -t: Tie Threshold </li> * <li> -g: GracePeriod, the number of instances a leaf should observe between split attempts </li> * <li> Page-Hinckley parameters</li> * <li> -H: Drift detection OFF</li> * <li> -a: The alpha value to use in the Page Hinckley change detection tests.</li> * <li> -l: The threshold value (Lambda) to be used in the Page Hinckley change detection tests.</li> * <li> Anomaly Detection parameters</li> * <li> -A: Anomaly detection ON</li> * <li> -u: Univariate Threshold</li> * <li> -m: Multivariate Threshold</li> * <li> Method parameters</li> * <li> -P: Prediction function to use. Adaptive / Perceptron / Target Mean. Adaptive predefined </li> * <li> -O: Learn ordered rules. Unordered rule predefined</li> * <li> Perceptron parameters</li> * <li> -s: randomSeed</li> * <li> -r: learning Ratio </li> * <li> -d: learning Ratio Decay</li> * <li> Output Verbose Level </li> * <li> -v: Verbosity level 1 to 5<li> * </ul> */ import moa.classifiers.Regressor; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.rules.core.Rule; import moa.classifiers.rules.core.RuleActiveLearningNode; import moa.classifiers.rules.core.RuleActiveRegressionNode; import moa.classifiers.rules.core.Rule.Builder; import moa.classifiers.rules.core.voting.ErrorWeightedVote; import moa.classifiers.rules.core.voting.InverseErrorWeightedVote; import moa.classifiers.rules.functions.Perceptron; import moa.core.StringUtils; import moa.options.ClassOption; import moa.options.FlagOption; import moa.options.FloatOption; import moa.options.MultiChoiceOption; public class AMRulesRegressor extends AbstractAMRules implements Regressor{ /** * */ private static final long serialVersionUID = 5988040868275521928L; //============================= SET OPTIONS ==============================// public FlagOption constantLearningRatioDecayOption = new FlagOption( "learningRatio_Decay_set_constant", 'd', "Learning Ratio Decay in Perceptron set to be constant. (The next parameter)."); public FloatOption learningRatioOption = new FloatOption( "learningRatio", 's', "Constante Learning Ratio to use for training the Perceptrons in the leaves.", 0.025); public MultiChoiceOption predictionFunctionOption = new MultiChoiceOption( "predictionFunctionOption", 'P', "The prediction function to use.", new String[]{ "Adaptative","Perceptron", "Target Mean"}, new String[]{ "Adaptative","Perceptron", "Target Mean"}, 0); public ClassOption votingTypeOption = new ClassOption("votingType", 'V', "Voting Type.", ErrorWeightedVote.class, "InverseErrorWeightedVote"); //============================= END SET OPTIONS ==============================// //============================== Classes ====================================// protected Rule newRule(int ID, RuleActiveLearningNode node, double[] statistics) { Rule r=newRule(ID); if (node!=null) { if(((RuleActiveRegressionNode)node).getPerceptron()!=null) { ((RuleActiveRegressionNode)r.getLearningNode()).setPerceptron(new Perceptron(((RuleActiveRegressionNode)node).getPerceptron())); ((RuleActiveRegressionNode)r.getLearningNode()).getPerceptron().setLearningRatio(this.learningRatioOption.getValue()); } if (statistics==null) { double mean; if(node.getNodeStatistics().getValue(0)>0){ mean=node.getNodeStatistics().getValue(1)/node.getNodeStatistics().getValue(0); ((RuleActiveRegressionNode)r.getLearningNode()).getTargetMean().reset(mean, 1); } } } if (statistics!=null && ((RuleActiveRegressionNode)r.getLearningNode()).getTargetMean()!=null) { double mean; if(statistics[0]>0){ mean=statistics[1]/statistics[0]; ((RuleActiveRegressionNode)r.getLearningNode()).getTargetMean().reset(mean, (long)statistics[0]); } } return r; } public RuleActiveLearningNode newRuleActiveLearningNode(Builder builder) { return new RuleActiveRegressionNode(builder); } public RuleActiveLearningNode newRuleActiveLearningNode(double[] initialClassObservations) { return new RuleActiveRegressionNode(initialClassObservations); } @Override public void getModelDescription(StringBuilder out, int indent) { super.getModelDescription(out, indent); StringUtils.appendIndented(out, indent, "The prediction function used: "+this.predictionFunctionOption.getChosenLabel()); StringUtils.appendNewline(out); } /** * This method initializes and resets the algorithm. */ @Override public void resetLearningImpl() { this.statistics= new double[]{0.0,0,0}; this.ruleNumberID=0; this.defaultRule = newRule(++this.ruleNumberID); } private Rule newRule(int ID) { Rule r=new Rule.Builder(). threshold(this.pageHinckleyThresholdOption.getValue()). alpha(this.pageHinckleyAlphaOption.getValue()). changeDetection(this.DriftDetectionOption.isSet()). predictionFunction(this.predictionFunctionOption.getChosenIndex()). statistics(new double[3]). id(ID). amRules(this).build(); r.getBuilder().setOwner(r); return r; } @Override public boolean isRandomizable() { return true; } @Override public ErrorWeightedVote newErrorWeightedVote() { return (ErrorWeightedVote)((ErrorWeightedVote) votingTypeOption.getPreMaterializedObject()).copy(); } }
Java
/* * AbstractAMRules.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules; /** * Adaptive Model Rules (AMRules), the streaming rule learning algorithm for regression problems. * * @author A. Bifet, J. Duarte, J. Gama (jgama@fep.up.pt) * @version $Revision: 2 $* * * This algorithm learn ordered and unordered rule set from data stream. Each rule in AMRules use a * Page-Hinkley test to detect changes in the processing generating data and react to changes by pruning the rule set. * This algorithm also does the detection of anomalies. * **/ import java.util.Arrays; import java.util.Iterator; import weka.core.Instance; import moa.classifiers.AbstractClassifier; import moa.classifiers.rules.core.Rule; import moa.classifiers.rules.core.RuleActiveLearningNode; import moa.classifiers.rules.core.RuleSet; import moa.classifiers.rules.core.Rule.Builder; import moa.classifiers.rules.core.attributeclassobservers.FIMTDDNumericAttributeClassLimitObserver; import moa.classifiers.rules.core.voting.ErrorWeightedVote; import moa.core.DoubleVector; import moa.core.Measurement; import moa.core.StringUtils; import moa.options.ClassOption; import moa.options.FlagOption; import moa.options.FloatOption; import moa.options.IntOption; public abstract class AbstractAMRules extends AbstractClassifier { private static final long serialVersionUID = 1L; protected RuleSet ruleSet = new RuleSet(); protected Rule defaultRule; protected int ruleNumberID; protected double[] statistics; public static final double NORMAL_CONSTANT = Math.sqrt(2 * Math.PI); public FloatOption splitConfidenceOption = new FloatOption( "splitConfidence", 'c', "Hoeffding Bound Parameter. The allowable error in split decision, values closer to 0 will take longer to decide.", 0.0000001, 0.0, 1.0); public FloatOption tieThresholdOption = new FloatOption("tieThreshold", 't', "Hoeffding Bound Parameter. Threshold below which a split will be forced to break ties.", 0.05, 0.0, 1.0); public IntOption gracePeriodOption = new IntOption("gracePeriod", 'g', "Hoeffding Bound Parameter. The number of instances a leaf should observe between split attempts.", 200, 1, Integer.MAX_VALUE); public FlagOption DriftDetectionOption = new FlagOption("DoNotDetectChanges", 'H', "Drift Detection. Page-Hinkley."); public FloatOption pageHinckleyAlphaOption = new FloatOption( "pageHinckleyAlpha", 'a', "The alpha value to use in the Page Hinckley change detection tests.", 0.005, 0.0, 1.0); public IntOption pageHinckleyThresholdOption = new IntOption( "pageHinckleyThreshold", 'l', "The threshold value (Lambda) to be used in the Page Hinckley change detection tests.", 35, 0, Integer.MAX_VALUE); public FlagOption noAnomalyDetectionOption = new FlagOption("noAnomalyDetection", 'A', "Disable anomaly Detection."); public FloatOption multivariateAnomalyProbabilityThresholdOption = new FloatOption( "multivariateAnomalyProbabilityThresholdd", 'm', "Multivariate anomaly threshold value.", 0.99, 0.0, 1.0); public FloatOption univariateAnomalyprobabilityThresholdOption = new FloatOption( "univariateAnomalyprobabilityThreshold", 'u', "Univariate anomaly threshold value.", 0.10, 0.0, 1.0); public IntOption anomalyNumInstThresholdOption = new IntOption( "anomalyThreshold", 'n', "The threshold value of anomalies to be used in the anomaly detection.", 30, 0, Integer.MAX_VALUE); public FlagOption unorderedRulesOption = new FlagOption("setUnorderedRulesOn", 'U', "unorderedRules."); public IntOption VerbosityOption = new IntOption( "verbosity", 'v', "Output Verbosity Control Level. 1 (Less) to 5 (More)", 1, 1, 5); public ClassOption numericObserverOption = new ClassOption("numericObserver", 'z', "Numeric observer.", FIMTDDNumericAttributeClassLimitObserver.class, "FIMTDDNumericAttributeClassLimitObserver"); public AbstractAMRules() { super(); } /** * description of the Methods used. * isRandomizable * resetLearningImpl * newRule // to build an object with the parameters. * trainOnInstanceImpl * isAnomaly * getVotesForInstance * getModelMeasurementsImpl * getModelDescription // to printout to MOA GUI * debug // use debug('string') to printout to console */ @Override public abstract boolean isRandomizable(); /** * Rule.Builder() to build an object with the parameters. * If you have an algorithm with many parameters, especially if some of them are optional, * it can be beneficial to define an object that represents all of the parameters. * @return */ abstract protected Rule newRule(int ID, RuleActiveLearningNode learningNode, double [] statistics); //Learning node and statistics can be null /** * AMRules Algorithm. * Method for updating (training) the AMRules model using a new instance */ private int numChangesDetected; //Just for statistics private int numAnomaliesDetected; //Just for statistics private int numInstances; ////Just for statistics @Override public void trainOnInstanceImpl(Instance instance) { /** * AMRules Algorithm * //For each rule in the rule set //If rule covers the instance //if the instance is not an anomaly //Update Change Detection Tests //Compute prediction error //Call PHTest //If change is detected then //Remove rule //Else //Update sufficient statistics of rule //If number of examples in rule > Nmin //Expand rule //If ordered set then //break //If none of the rule covers the instance //Update sufficient statistics of default rule //If number of examples in default rule is multiple of Nmin //Expand default rule and add it to the set of rules //Reset the default rule */ ++numInstances; debug("Train",3); debug("Nº instance "+numInstances + " - " + instance.toString(),3); boolean rulesCoveringInstance = false; Iterator<Rule> ruleIterator= this.ruleSet.iterator(); while (ruleIterator.hasNext()) { Rule rule = ruleIterator.next(); if (rule.isCovering(instance) == true) { rulesCoveringInstance = true; if (isAnomaly(instance, rule) == false) { //Update Change Detection Tests double error = rule.computeError(instance); //Use adaptive mode error boolean changeDetected = rule.getLearningNode().updateChangeDetection(error); if (changeDetected == true) { debug("I) Drift Detected. Exa. : " + this.numInstances + " (" + rule.getInstancesSeen() +") Remove Rule: " +rule.getRuleNumberID(),1); ruleIterator.remove(); this.numChangesDetected++; //Just for statistics } else { rule.updateStatistics(instance); if (rule.getInstancesSeen() % this.gracePeriodOption.getValue() == 0.0) { if (rule.tryToExpand(this.splitConfidenceOption.getValue(), this.tieThresholdOption.getValue()) ) { rule.split(); debug("Rule Expanded:",2); debug(rule.printRule(),2); } } } if (!this.unorderedRulesOption.isSet()) break; } else { debug("Anomaly Detected: " + this.numInstances + " Rule: " +rule.getRuleNumberID() ,1); this.numAnomaliesDetected++;//Just for statistics } } } if (rulesCoveringInstance == false){ defaultRule.updateStatistics(instance); if (defaultRule.getInstancesSeen() % this.gracePeriodOption.getValue() == 0.0) { debug("Nr. examples "+defaultRule.getInstancesSeen(), 4); if (defaultRule.tryToExpand(this.splitConfidenceOption.getValue(), this.tieThresholdOption.getValue()) == true) { Rule newDefaultRule=newRule(defaultRule.getRuleNumberID(),defaultRule.getLearningNode(),defaultRule.getLearningNode().getStatisticsOtherBranchSplit()); //other branch defaultRule.split(); defaultRule.setRuleNumberID(++ruleNumberID); this.ruleSet.add(this.defaultRule); debug("Default rule expanded! New Rule:",2); debug(defaultRule.printRule(),2); debug("New default rule:", 3); debug(newDefaultRule.printRule(),3); defaultRule=newDefaultRule; } } } } /** * Method to verify if the instance is an anomaly. * @param instance * @param rule * @return */ private boolean isAnomaly(Instance instance, Rule rule) { //AMRUles is equipped with anomaly detection. If on, compute the anomaly value. boolean isAnomaly = false; if (this.noAnomalyDetectionOption.isSet() == false){ if (rule.getInstancesSeen() >= this.anomalyNumInstThresholdOption.getValue()) { isAnomaly = rule.isAnomaly(instance, this.univariateAnomalyprobabilityThresholdOption.getValue(), this.multivariateAnomalyProbabilityThresholdOption.getValue(), this.anomalyNumInstThresholdOption.getValue()); } } return isAnomaly; } /** * getVotesForInstance extension of the instance method getVotesForInstance * in moa.classifier.java * returns the prediction of the instance. * Called in EvaluateModelRegression */ @Override public double[] getVotesForInstance(Instance instance) { ErrorWeightedVote errorWeightedVote=newErrorWeightedVote(); //DoubleVector combinedVote = new DoubleVector(); debug("Test",3); int numberOfRulesCovering = 0; VerboseToConsole(instance); // Verbose to console Dataset name. for (Rule rule: ruleSet) { if (rule.isCovering(instance) == true){ numberOfRulesCovering++; //DoubleVector vote = new DoubleVector(rule.getPrediction(instance)); double [] vote=rule.getPrediction(instance); double error= rule.getCurrentError(); debug("Rule No"+ rule.getRuleNumberID() + " Vote: " + Arrays.toString(vote) + " Error: " + error + " Y: " + instance.classValue(),3); //predictionValueForThisRule); errorWeightedVote.addVote(vote,error); //combinedVote.addValues(vote); if (!this.unorderedRulesOption.isSet()) { // Ordered Rules Option. break; // Only one rule cover the instance. } } } if (numberOfRulesCovering == 0) { //combinedVote = new DoubleVector(defaultRule.getPrediction(instance)); double [] vote=defaultRule.getPrediction(instance); double error= defaultRule.getCurrentError(); errorWeightedVote.addVote(vote,error); debug("Default Rule Vote " + Arrays.toString(vote) + " Error " + error + " Y: " + instance.classValue(),3); } double[] weightedVote=errorWeightedVote.computeWeightedVote(); double weightedError=errorWeightedVote.getWeightedError(); debug("Weighted Rule - Vote: " + Arrays.toString(weightedVote) + " Weighted Error: " + weightedError + " Y:" + instance.classValue(),3); return weightedVote; } /** * print GUI evaluate model */ @Override protected Measurement[] getModelMeasurementsImpl() { return new Measurement[]{ new Measurement("anomaly detections", this.numAnomaliesDetected), new Measurement("change detections", this.numChangesDetected), new Measurement("rules (number)", this.ruleSet.size()+1)}; } /** * print GUI learn model */ @Override public void getModelDescription(StringBuilder out, int indent) { indent=0; if(!this.unorderedRulesOption.isSet()){ StringUtils.appendIndented(out, indent, "Method Ordered"); StringUtils.appendNewline(out); }else{ StringUtils.appendIndented(out, indent, "Method Unordered"); StringUtils.appendNewline(out); } if(this.DriftDetectionOption.isSet()){ StringUtils.appendIndented(out, indent, "Change Detection OFF"); StringUtils.appendNewline(out); }else{ StringUtils.appendIndented(out, indent, "Change Detection ON"); StringUtils.appendNewline(out); } if(this.noAnomalyDetectionOption.isSet()){ StringUtils.appendIndented(out, indent, "Anomaly Detection OFF"); StringUtils.appendNewline(out); }else{ StringUtils.appendIndented(out, indent, "Anomaly Detection ON"); StringUtils.appendNewline(out); } StringUtils.appendIndented(out, indent, "Number of Rules: " + (this.ruleSet.size()+1)); StringUtils.appendNewline(out); } /** * Print to console * @param string */ protected void debug(String string, int level) { if (VerbosityOption.getValue()>=level){ System.out.println(string); } } protected void VerboseToConsole(Instance inst) { if(VerbosityOption.getValue()>=5){ System.out.println(); System.out.println("I) Dataset: "+inst.dataset().relationName()); if(!this.unorderedRulesOption.isSet()){ System.out.println("I) Method Ordered"); }else{ System.out.println("I) Method Unordered"); } } } public void PrintRuleSet() { debug("Rule in RuleSet:",2); for (Rule rule: ruleSet) { debug(rule.printRule(),2); } debug("Default rule :",2); debug(this.defaultRule.printRule(),2); } abstract public RuleActiveLearningNode newRuleActiveLearningNode(Builder builder); abstract public RuleActiveLearningNode newRuleActiveLearningNode(double[] initialClassObservations); public int getModelAttIndexToInstanceAttIndex(int index, Instance inst){ return modelAttIndexToInstanceAttIndex(index, inst); } @Override public void resetLearningImpl() { // TODO Auto-generated method stub } /** * Gets the index of the attribute in the instance, * given the index of the attribute in the learner. * * @param index the index of the attribute in the learner * @param inst the instance * @return the index in the instance */ public static int modelAttIndexToInstanceAttIndex(int index, Instance inst) { return index<= inst.classIndex() ? index : index + 1; } abstract public ErrorWeightedVote newErrorWeightedVote(); }
Java
/* * RuleActiveRegressionNode.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.core.attributeclassobservers.FIMTDDNumericAttributeClassObserver; import moa.classifiers.core.splitcriteria.SDRSplitCriterion; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.classifiers.rules.AMRulesRegressor; import moa.classifiers.rules.AbstractAMRules; import moa.classifiers.rules.core.splitcriteria.SDRSplitCriterionAMRules; import moa.classifiers.rules.functions.Perceptron; import moa.classifiers.rules.functions.TargetMean; import moa.core.DoubleVector; import weka.core.Instance; /** * A modified ActiveLearningNode that uses a Perceptron as the leaf node model, * and ensures that the class values sent to the attribute observers are not * truncated to ints if regression is being performed */ public class RuleActiveRegressionNode extends RuleActiveLearningNode{ /** * */ private static final long serialVersionUID = 1607453624545272049L; protected Perceptron perceptron; public Perceptron getPerceptron() { return perceptron; } public void setPerceptron(Perceptron perceptron) { this.perceptron = perceptron; } public TargetMean getTargetMean() { return targetMean; } public void setTargetMean(TargetMean targetMean) { this.targetMean = targetMean; } protected TargetMean targetMean; public RuleActiveRegressionNode(double[] initialClassObservations) { super(initialClassObservations); } public RuleActiveRegressionNode() { this(new double[0]); } public RuleActiveRegressionNode(Rule.Builder builder) { super(builder); this.perceptron = new Perceptron(); this.perceptron.prepareForUse(); this.perceptron.learningRatioOption = ((AMRulesRegressor)this.amRules).learningRatioOption; this.perceptron.constantLearningRatioDecayOption = ((AMRulesRegressor)this.amRules).constantLearningRatioDecayOption; if(this.predictionFunction!=1) { this.targetMean = new TargetMean(); if (builder.statistics[0]>0) this.targetMean.reset(builder.statistics[1]/builder.statistics[0],(long)builder.statistics[0]); } this.predictionFunction = builder.predictionFunction; if (builder.statistics!=null) this.nodeStatistics=new DoubleVector(builder.statistics); } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#updateStatistics(weka.core.Instance) */ public void updateStatistics(Instance instance) { super.updateStatistics(instance); this.perceptron.trainOnInstance(instance); if (this.predictionFunction != 1) { //Train target mean if prediction function is not Perceptron this.targetMean.trainOnInstance(instance); } } public double[] getPrediction(Instance instance, int predictionMode) { double[] ret = new double[1]; if (predictionMode == 1) ret=this.perceptron.getVotesForInstance(instance); else ret=this.targetMean.getVotesForInstance(instance); return ret; } public double getNormalizedPrediction(Instance instance) { double res; double [] aux; switch (this.predictionFunction) { //perceptron - 1 case 1: res=this.perceptron.normalizedPrediction(instance); ; break; //target mean - 2 case 2: aux=this.targetMean.getVotesForInstance(null); res=normalize(aux[0]); break; //adaptive - 0 case 0: int predictionMode = this.getLearnerToUse(instance, 0); if(predictionMode == 1) { res=this.perceptron.normalizedPrediction(instance); } else{ aux=this.targetMean.getVotesForInstance(instance); res = normalize(aux[0]); } break; default: throw new UnsupportedOperationException("Prediction mode not in range."); } return res; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getLearnerToUse(weka.core.Instance, int) */ public int getLearnerToUse(Instance instance, int predMode) { int predictionMode = predMode; if (predictionMode == 0) { double perceptronError= this.perceptron.getCurrentError(); double meanTargetError =this.targetMean.getCurrentError(); debug("\n Check P:" + perceptronError + " M:" + meanTargetError,5); debug("Rule" + this.owner.ruleNumberID + " P:" + this.perceptron.getVotesForInstance(instance)[0] + " (" + perceptronError + ")" + " M:" + this.targetMean.getVotesForInstance(instance)[0]+ " (" + meanTargetError + ")",3) ; //Commented by JD debug("Observed Value: " + instance.classValue(),5); if (perceptronError < meanTargetError) { predictionMode = 1; //PERCEPTRON } else { predictionMode = 2; //TARGET MEAN } } return predictionMode; } private double normalize(double value) { double meanY = this.nodeStatistics.getValue(1)/this.nodeStatistics.getValue(0); double sdY = computeSD(this.nodeStatistics.getValue(2), this.nodeStatistics.getValue(1), (long)this.nodeStatistics.getValue(0)); double normalizedY = 0.0; if (sdY > 0.0000001) { normalizedY = (value - meanY) / (sdY); } return normalizedY; } public double computeSD(double squaredVal, double val, long size) { if (size > 1) { return Math.sqrt((squaredVal - ((val * val) / size)) / (size - 1.0)); } return 0.0; } public double computeError(Instance instance) { double normalizedPrediction = getNormalizedPrediction(instance); double normalizedClassValue = normalize(instance.classValue()); return Math.abs(normalizedClassValue - normalizedPrediction); } public boolean isAnomaly(Instance instance, double uniVariateAnomalyProbabilityThreshold, double multiVariateAnomalyProbabilityThreshold, int numberOfInstanceesForAnomaly) { //AMRUles is equipped with anomaly detection. If on, compute the anomaly value. long perceptronIntancesSeen=this.perceptron.getInstancesSeen(); if ( perceptronIntancesSeen>= numberOfInstanceesForAnomaly) { double atribSum = 0.0; double atribSquredSum = 0.0; double D = 0.0; double N = 0.0; double anomaly = 0.0; for (int x = 0; x < instance.numAttributes() - 1; x++) { // Perceptron is initialized each rule. // this is a local anomaly. int instAttIndex = AMRulesRegressor.modelAttIndexToInstanceAttIndex(x, instance); atribSum = this.perceptron.perceptronattributeStatistics.getValue(x); atribSquredSum = this.perceptron.squaredperceptronattributeStatistics.getValue(x); double mean = atribSum / perceptronIntancesSeen; double sd = computeSD(atribSquredSum, atribSum, perceptronIntancesSeen); double probability = computeProbability(mean, sd, instance.value(instAttIndex)); if (probability > 0.0) { D = D + Math.abs(Math.log(probability)); if (probability < uniVariateAnomalyProbabilityThreshold) {//0.10 N = N + Math.abs(Math.log(probability)); } } else { debug("Anomaly with probability 0 in atribute : " + x, 4); } } anomaly = 0.0; if (D != 0.0) { anomaly = N / D; } if (anomaly >= multiVariateAnomalyProbabilityThreshold) { debuganomaly(instance, uniVariateAnomalyProbabilityThreshold, multiVariateAnomalyProbabilityThreshold, anomaly); return true; } } return false; } protected void debuganomaly(Instance instance, double uni, double multi, double probability) { double atribSum = 0.0; double atribSquredSum = 0.0; for (int x = 0; x < instance.numAttributes() - 1; x++) { int instAttIndex = AMRulesRegressor.modelAttIndexToInstanceAttIndex(x, instance); atribSum = perceptron.perceptronattributeStatistics.getValue(x); atribSquredSum = perceptron.squaredperceptronattributeStatistics.getValue(x); double mean = atribSum / perceptron.getInstancesSeen(); double sd = computeSD( atribSquredSum, atribSum, perceptron.getInstancesSeen() ); debug("Attribute : " + x, 5); debug("Value : " + instance.value(instAttIndex), 5); debug("Mean : " + mean, 5); debug("SD : " + sd, 5); debug("Probability : " + probability, 5); debug("Univariate : " + uni, 5); debug("Multivariate : " + multi, 5); debug("Anomaly in rule :" + this.owner.ruleNumberID, 5); } } public void initialize(RuleActiveLearningNode oldLearningNode) { if(((RuleActiveRegressionNode) oldLearningNode).perceptron!=null) { this.perceptron=new Perceptron(((RuleActiveRegressionNode) oldLearningNode).perceptron); this.perceptron.resetError(); this.perceptron.setLearningRatio(((AMRulesRegressor)this.amRules).learningRatioOption.getValue()); } if(((RuleActiveRegressionNode) oldLearningNode).targetMean!=null) { this.targetMean= new TargetMean(((RuleActiveRegressionNode) oldLearningNode).targetMean); this.targetMean.resetError(); } //reset statistics this.nodeStatistics.setValue(0, 0); this.nodeStatistics.setValue(1, 0); this.nodeStatistics.setValue(2, 0); } public double[] getSimplePrediction() { if( this.targetMean!=null) return this.targetMean.getVotesForInstance(null); else return new double[]{0}; } public boolean tryToExpand(double splitConfidence, double tieThreshold) { // splitConfidence. Hoeffding Bound test parameter. // tieThreshold. Hoeffding Bound test parameter. SplitCriterion splitCriterion = new SDRSplitCriterionAMRules(); //SplitCriterion splitCriterion = new SDRSplitCriterionAMRulesNode();//JD for assessing only best branch // Using this criterion, find the best split per attribute and rank the results AttributeSplitSuggestion[] bestSplitSuggestions = this.getBestSplitSuggestions(splitCriterion); Arrays.sort(bestSplitSuggestions); // Declare a variable to determine if any of the splits should be performed boolean shouldSplit = false; // If only one split was returned, use it if (bestSplitSuggestions.length < 2) { shouldSplit = ((bestSplitSuggestions.length > 0) && (bestSplitSuggestions[0].merit > 0)); bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; } // Otherwise, consider which of the splits proposed may be worth trying else { // Determine the hoeffding bound value, used to select how many instances should be used to make a test decision // to feel reasonably confident that the test chosen by this sample is the same as what would be chosen using infinite examples double hoeffdingBound = computeHoeffdingBound(1, splitConfidence, getWeightSeen()); debug("Hoeffding bound " + hoeffdingBound, 4); // Determine the top two ranked splitting suggestions bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; AttributeSplitSuggestion secondBestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 2]; debug("Merits: " + secondBestSuggestion.merit + " " + bestSuggestion.merit, 4); // If the upper bound of the sample mean for the ratio of SDR(best suggestion) to SDR(second best suggestion), // as determined using the hoeffding bound, is less than 1, then the true mean is also less than 1, and thus at this // particular moment of observation the bestSuggestion is indeed the best split option with confidence 1-delta, and // splitting should occur. // Alternatively, if two or more splits are very similar or identical in terms of their splits, then a threshold limit // (default 0.05) is applied to the hoeffding bound; if the hoeffding bound is smaller than this limit then the two // competing attributes are equally good, and the split will be made on the one with the higher SDR value. if (bestSuggestion.merit > 0) { if ((((secondBestSuggestion.merit / bestSuggestion.merit) + hoeffdingBound) < 1) || (hoeffdingBound < tieThreshold)) { debug("Expanded ", 5); shouldSplit = true; } } } if (shouldSplit == true) { AttributeSplitSuggestion splitDecision = bestSplitSuggestions[bestSplitSuggestions.length - 1]; double minValue = Double.MAX_VALUE; double[] branchMerits = SDRSplitCriterionAMRules.computeBranchSplitMerits(bestSuggestion.resultingClassDistributions); for (int i = 0; i < bestSuggestion.numSplits(); i++) { double value = branchMerits[i]; if (value < minValue) { minValue = value; splitIndex = i; statisticsNewRuleActiveLearningNode = bestSuggestion.resultingClassDistributionFromSplit(i); } } statisticsBranchSplit = splitDecision.resultingClassDistributionFromSplit(splitIndex); statisticsOtherBranchSplit = bestSuggestion.resultingClassDistributionFromSplit(splitIndex == 0 ? 1 : 0); } return shouldSplit; } public void learnFromInstance(Instance inst) { // Update the statistics for this node // number of instances passing through the node nodeStatistics.addToValue(0, 1); // sum of y values nodeStatistics.addToValue(1, inst.classValue()); // sum of squared y values nodeStatistics.addToValue(2, inst.classValue()*inst.classValue()); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = AbstractAMRules.modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { // At this stage all nominal attributes are ignored if (inst.attribute(instAttIndex).isNumeric()) //instAttIndex { obs = newNumericClassObserver(); this.attributeObservers.set(i, obs); } } if (obs != null) { ((FIMTDDNumericAttributeClassObserver) obs).observeAttributeClass(inst.value(instAttIndex), inst.classValue(), inst.weight()); } } } public AttributeSplitSuggestion[] getBestSplitSuggestions(SplitCriterion criterion) { List<AttributeSplitSuggestion> bestSuggestions = new LinkedList<AttributeSplitSuggestion>(); // Set the nodeStatistics up as the preSplitDistribution, rather than the observedClassDistribution double[] nodeSplitDist = this.nodeStatistics.getArrayCopy(); for (int i = 0; i < this.attributeObservers.size(); i++) { AttributeClassObserver obs = this.attributeObservers.get(i); if (obs != null) { // AT THIS STAGE NON-NUMERIC ATTRIBUTES ARE IGNORED AttributeSplitSuggestion bestSuggestion = null; if (obs instanceof FIMTDDNumericAttributeClassObserver) { bestSuggestion = obs.getBestEvaluatedSplitSuggestion(criterion, nodeSplitDist, i, true); } if (bestSuggestion != null) { bestSuggestions.add(bestSuggestion); } } } return bestSuggestions.toArray(new AttributeSplitSuggestion[bestSuggestions.size()]); } @Override public double getWeightSeen() { if (nodeStatistics != null) { return this.nodeStatistics.getValue(0); } else { return 0; } } @Override public double getCurrentError() { double error; if (this.perceptron!=null){ if (targetMean==null) error=perceptron.getCurrentError(); else{ double errorP=perceptron.getCurrentError(); double errorTM=targetMean.getCurrentError(); error = (errorP<errorTM) ? errorP : errorTM; } } else error=Double.MAX_VALUE; return error; } }
Java
/* * UniformWeightedVote.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.voting; import java.util.ArrayList; import java.util.List; import moa.AbstractMOAObject; /** * AbstractErrorWeightedVote class for weighted votes based on estimates of errors. * * @author João Duarte (jmduarte@inescporto.pt) * @version $Revision: 1 $ */ public abstract class AbstractErrorWeightedVote extends AbstractMOAObject implements ErrorWeightedVote{ /** * */ private static final long serialVersionUID = -7340491298217227675L; protected List<double[]> votes; protected List<Double> errors; protected double[] weights; public AbstractErrorWeightedVote() { super(); votes = new ArrayList<double[]>(); errors = new ArrayList<Double>(); } @Override public void addVote(double [] vote, double error) { votes.add(vote); errors.add(error); } @Override abstract public double[] computeWeightedVote(); @Override public double getWeightedError() { double weightedError=0; if (weights!=null && weights.length==errors.size()) { for (int i=0; i<weights.length; ++i) weightedError+=errors.get(i)*weights[i]; } else weightedError=-1; return weightedError; } @Override public double [] getWeights() { return weights; } @Override public int getNumberVotes() { return votes.size(); } }
Java
/* * UniformWeightedVote.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.voting; /** * UniformWeightedVote class for weighted votes based on estimates of errors. * * @author João Duarte (jmduarte@inescporto.pt) * @version $Revision: 1 $ */ public class UniformWeightedVote extends AbstractErrorWeightedVote { private static final long serialVersionUID = 6359349250620616482L; @Override public double[] computeWeightedVote() { int n=votes.size(); weights=new double[n]; double [] weightedVote=null; if (n>0){ int d=votes.get(0).length; weightedVote=new double[d]; for (int i=0; i<n; i++) { weights[i]=1.0/n; for(int j=0; j<d; j++) weightedVote[j]+=(votes.get(i)[j]*weights[i]); } } return weightedVote; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } }
Java
/* * UniformWeightedVote.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.voting; /** * InverseErrorWeightedVote class for weighted votes based on estimates of errors. * * @author João Duarte (jmduarte@inescporto.pt) * @version $Revision: 1 $ */ public class InverseErrorWeightedVote extends AbstractErrorWeightedVote { /** * */ private static final double EPS = 0.000000001; //just to prevent divide by 0 in 1/X -> 1/(x+EPS) private static final long serialVersionUID = 6359349250620616482L; @Override public double[] computeWeightedVote() { int n=votes.size(); weights=new double[n]; double [] weightedVote=null; if (n>0){ int d=votes.get(0).length; weightedVote=new double[d]; double sumError=0; //weights are 1/(error+eps) for (int i=0; i<n; ++i){ if(errors.get(i)<Double.MAX_VALUE){ weights[i]=1.0/(errors.get(i)+EPS); sumError+=weights[i]; } else weights[i]=0; } if(sumError>0) for (int i=0; i<n; ++i) { //normalize so that weights sum 1 weights[i]/=sumError; //compute weighted vote for(int j=0; j<d; j++) weightedVote[j]+=votes.get(i)[j]*weights[i]; } //Only occurs if all errors=Double.MAX_VALUE else { //compute arithmetic vote for (int i=0; i<n; ++i) { for(int j=0; j<d; j++) weightedVote[j]+=votes.get(i)[j]/n; } } } return weightedVote; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } }
Java
/* * UniformWeightedVote.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.voting; import moa.MOAObject; /** * ErrorWeightedVote interface for weighted votes based on estimates of errors. * * @author João Duarte (jmduarte@inescporto.pt) * @version $Revision: 1 $ */ public interface ErrorWeightedVote { /** * Adds a vote and the corresponding error for the computation of the weighted vote and respective weighted error. * * @param vote a vote returned by a classifier * @param error the error associated to the vote */ public void addVote(double [] vote, double error); /** * Computes the weighted vote. * Also updates the weights of the votes. * * @return the weighted vote */ public double [] computeWeightedVote(); /** * Returns the weighted error. * * @pre computeWeightedVote() * @return the weighted error */ public double getWeightedError(); /** * Return the weights error. * * @pre computeWeightedVote() * @return the weights */ public double [] getWeights(); /** * The number of votes added so far. * * @return the number of votes */ public int getNumberVotes(); /** * Creates a copy of the object * * @return copy of the object */ public MOAObject copy(); }
Java
/* * RuleActiveLearningNode.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.rules.AbstractAMRules; import moa.classifiers.rules.driftdetection.PageHinkleyTest; import moa.classifiers.rules.driftdetection.PageHinkleyFading; import moa.classifiers.trees.HoeffdingTree; import moa.classifiers.trees.HoeffdingTree.ActiveLearningNode; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import weka.core.Instance; /** * A modified ActiveLearningNode that uses a Perceptron as the leaf node model, * and ensures that the class values sent to the attribute observers are not * truncated to ints if regression is being performed */ public abstract class RuleActiveLearningNode extends ActiveLearningNode { protected PageHinkleyTest pageHinckleyTest; protected int predictionFunction; protected boolean changeDetection; protected Rule owner; private static final long serialVersionUID = 9129659494380381126L; // The statistics for this node: // Number of instances that have reached it // Sum of y values // Sum of squared y values protected DoubleVector nodeStatistics; /** * Create a new RuleActiveLearningNode */ public RuleActiveLearningNode(double[] initialClassObservations) { super(initialClassObservations); this.nodeStatistics = new DoubleVector(initialClassObservations); } public RuleActiveLearningNode() { this(new double[0]); } protected AbstractAMRules amRules; public RuleActiveLearningNode(Rule.Builder builder) { this(builder.statistics); this.changeDetection = builder.changeDetection; if (builder.changeDetection == false) { this.pageHinckleyTest = new PageHinkleyFading(builder.threshold, builder.alpha); } this.amRules = builder.amRules; this.predictionFunction = builder.predictionFunction; this.owner=builder.getOwner(); } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#learnFromInstance(weka.core.Instance) */ abstract public void learnFromInstance(Instance inst); /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#learnFromInstance(weka.core.Instance, moa.classifiers.trees.HoeffdingTree) */ @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { learnFromInstance(inst); } protected AttributeClassObserver newNumericClassObserver() { //return new FIMTDDNumericAttributeClassObserver(); //return new FIMTDDNumericAttributeClassLimitObserver(); return (AttributeClassObserver)((AttributeClassObserver)this.amRules.numericObserverOption.getPreMaterializedObject()).copy(); } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#updateStatistics(weka.core.Instance) */ public void updateStatistics(Instance instance) { learnFromInstance(instance); } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getAttributeObservers() */ public AutoExpandVector<AttributeClassObserver> getAttributeObservers() { return this.attributeObservers; } protected void debug(String string,int level) { if (this.amRules.VerbosityOption.getValue() >= level) { System.out.println(string); } } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getPrediction(weka.core.Instance) */ public double[] getPrediction(Instance instance) { int predictionMode = this.getLearnerToUse(instance, this.predictionFunction); return getPrediction(instance, predictionMode); } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getPrediction(weka.core.Instance, int) */ abstract public double[] getPrediction(Instance instance, int predictionMode); /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getNormalizedPrediction(weka.core.Instance) */ abstract public int getLearnerToUse(Instance instance, int predictionMode); /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#computeError(weka.core.Instance) */ abstract public double computeError(Instance instance); /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#updatePageHinckleyTest(double) */ public boolean updatePageHinckleyTest(double error) { boolean changeDetected = false; if (this.changeDetection == false) { changeDetected = pageHinckleyTest.update(error); } return changeDetected; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getInstancesSeen() */ public long getInstancesSeen() { return (long) this.getWeightSeen(); } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#isAnomaly(weka.core.Instance, double, double, int) */ abstract public boolean isAnomaly(Instance instance, double uniVariateAnomalyProbabilityThreshold, double multiVariateAnomalyProbabilityThreshold, int numberOfInstanceesForAnomaly); //Attribute probability /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#computeProbability(double, double, double) */ public double computeProbability(double mean, double sd, double value) { double probability = 0.0; /* double diff = value - mean; if (sd > 0.0) { double k = (Math.abs(value - mean) / sd); if (k > 1.0) { probability = 1.0 / (k * k); // Chebyshev's inequality } else { probability = Math.exp(-(diff * diff / (2.0 * sd * sd))); } }*/ if (sd > 0.0) { double k = (Math.abs(value - mean) / sd); // One tailed variant of Chebyshev's inequality probability= 1.0 / (1+k*k); } return probability; } protected AttributeSplitSuggestion bestSuggestion = null; /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getSplitIndex() */ public int getSplitIndex() { return splitIndex; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#setSplitIndex(int) */ public void setSplitIndex(int splitIndex) { this.splitIndex = splitIndex; } protected int splitIndex = 0; /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getBestSuggestion() */ public AttributeSplitSuggestion getBestSuggestion() { return bestSuggestion; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#setBestSuggestion(moa.classifiers.core.AttributeSplitSuggestion) */ public void setBestSuggestion(AttributeSplitSuggestion bestSuggestion) { this.bestSuggestion = bestSuggestion; } protected double[] statisticsNewRuleActiveLearningNode = null; protected double[] statisticsBranchSplit = null; /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getStatisticsBranchSplit() */ public double[] getStatisticsBranchSplit() { return statisticsBranchSplit; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#setStatisticsBranchSplit(double[]) */ public void setStatisticsBranchSplit(double[] statisticsBranchSplit) { this.statisticsBranchSplit = statisticsBranchSplit; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getStatisticsNewRuleActiveLearningNode() */ public double[] getStatisticsNewRuleActiveLearningNode() { return statisticsNewRuleActiveLearningNode; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#setStatisticsNewRuleActiveLearningNode(double[]) */ public void setStatisticsNewRuleActiveLearningNode( double[] statisticsNewRuleActiveLearningNode) { this.statisticsNewRuleActiveLearningNode = statisticsNewRuleActiveLearningNode; } protected double[] statisticsOtherBranchSplit; /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getStatisticsOtherBranchSplit() */ public double[] getStatisticsOtherBranchSplit() { return statisticsOtherBranchSplit; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#setStatisticsOtherBranchSplit(double[]) */ public void setStatisticsOtherBranchSplit(double[] statisticsOtherBranchSplit) { this.statisticsOtherBranchSplit = statisticsOtherBranchSplit; } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#tryToExpand(double, double) */ abstract public boolean tryToExpand(double splitConfidence, double tieThreshold); public static double computeHoeffdingBound(double range, double confidence, double n) { return Math.sqrt(((range * range) * Math.log(1.0 / confidence)) / (2.0 * n)); } /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#initialize(moa.classifiers.rules.RuleActiveLearningNode) */ abstract public void initialize(RuleActiveLearningNode oldLearningNode); /* (non-Javadoc) * @see moa.classifiers.rules.RuleActiveLearningNodeInterface#getSimplePrediction() */ abstract public double[] getSimplePrediction(); public DoubleVector getNodeStatistics(){ return this.nodeStatistics; } public boolean updateChangeDetection(double error) { if(changeDetection==false){ return pageHinckleyTest.update(error); } else return false; } abstract public double getCurrentError(); }
Java
/* * Predicate.java * Copyright (C) 2013 University of Porto, Portugal * @author E. Almeida, A. Carvalho, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core; import weka.core.Instance; public interface Predicate { public boolean evaluate(Instance instance); }
Java
/* * RuleSet.java * Copyright (C) 2013 University of Porto, Portugal * @author E. Almeida, A. Carvalho, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core; import java.util.ArrayList; public class RuleSet extends ArrayList<Rule> { private static final long serialVersionUID = 1L; }
Java
/* * NumericAttributeBinaryRulePredicate.java * Copyright (C) 2013 University of Porto, Portugal * @author E. Almeida, A. Carvalho, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.conditionaltests; import moa.classifiers.core.conditionaltests.InstanceConditionalBinaryTest; import moa.classifiers.rules.core.Predicate; import moa.core.InstancesHeader; import weka.core.Instance; /** * Numeric binary conditional test for instances to use to split nodes in * AMRules. * * @version $Revision: 1 $ */ public class NumericAttributeBinaryRulePredicate extends InstanceConditionalBinaryTest implements Predicate { private static final long serialVersionUID = 1L; protected int attIndex; protected double attValue; protected int operator; // 0 =, 1<=, 2> public NumericAttributeBinaryRulePredicate(int attIndex, double attValue, int operator) { this.attIndex = attIndex; this.attValue = attValue; this.operator = operator; } @Override public int branchForInstance(Instance inst) { int instAttIndex = this.attIndex < inst.classIndex() ? this.attIndex : this.attIndex + 1; if (inst.isMissing(instAttIndex)) { return -1; } double v = inst.value(instAttIndex); int ret = 0; switch (this.operator) { case 0: ret = (v == this.attValue) ? 0 : 1; break; case 1: ret = (v <= this.attValue) ? 0 : 1; break; case 2: ret = (v > this.attValue) ? 0 : 1; } return ret; } /** * */ @Override public String describeConditionForBranch(int branch, InstancesHeader context) { if ((branch >= 0) && (branch <= 2)) { String compareChar = (branch == 0) ? "=" : (branch == 1) ? "<=" : ">"; return InstancesHeader.getAttributeNameString(context, this.attIndex) + ' ' + compareChar + InstancesHeader.getNumericValueString(context, this.attIndex, this.attValue); } throw new IndexOutOfBoundsException(); } /** * */ @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override public int[] getAttsTestDependsOn() { return new int[]{this.attIndex}; } public double getSplitValue() { return this.attValue; } @Override public boolean evaluate(Instance inst) { return (branchForInstance(inst) == 0); } @Override public String toString() { if ((operator >= 0) && (operator <= 2)) { String compareChar = (operator == 0) ? "=" : (operator == 1) ? "<=" : ">"; //int equalsBranch = this.equalsPassesTest ? 0 : 1; return "x" + this.attIndex + ' ' + compareChar + ' ' + this.attValue; } throw new IndexOutOfBoundsException(); } public boolean isEqual(NumericAttributeBinaryRulePredicate predicate) { return (this.attIndex == predicate.attIndex && this.attValue == predicate.attValue && this.operator == predicate.operator); } public boolean isUsingSameAttribute(NumericAttributeBinaryRulePredicate predicate) { return (this.attIndex == predicate.attIndex && this.operator == predicate.operator); } public boolean isIncludedInRuleNode( NumericAttributeBinaryRulePredicate predicate) { boolean ret; if (this.operator == 1) { // <= ret = (predicate.attValue <= this.attValue); } else { // > ret = (predicate.attValue > this.attValue); } return ret; } public void setAttributeValue( NumericAttributeBinaryRulePredicate ruleSplitNodeTest) { this.attValue = ruleSplitNodeTest.attValue; } }
Java
/* * NominalAttributeBinaryRulePredicate.java * Copyright (C) 2013 University of Porto, Portugal * @author E. Almeida, A. Carvalho, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.conditionaltests; import moa.classifiers.core.conditionaltests.NominalAttributeBinaryTest; import moa.classifiers.rules.core.Predicate; import weka.core.Instance; /** * Nominal binary conditional test for instances to use to split nodes in rules. * * @version $Revision: 7 $ */ public class NominalAttributeBinaryRulePredicate extends NominalAttributeBinaryTest implements Predicate { public NominalAttributeBinaryRulePredicate(int attIndex, int attValue) { super(attIndex, attValue); } private static final long serialVersionUID = 1L; @Override public boolean evaluate(Instance inst) { return (branchForInstance(inst) == 0); } }
Java
/* * SDRSplitCriterionAMRules.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.splitcriteria; import moa.classifiers.core.splitcriteria.SDRSplitCriterion; import moa.classifiers.core.splitcriteria.SplitCriterion; public class SDRSplitCriterionAMRules extends SDRSplitCriterion implements SplitCriterion { private static final long serialVersionUID = 1L; @Override public double getMeritOfSplit(double[] preSplitDist, double[][] postSplitDists) { double SDR=0.0; double N = preSplitDist[0]; int count = 0; for(int i = 0; i < postSplitDists.length; i++) { double Ni = postSplitDists[i][0]; if(Ni >=0.05*preSplitDist[0]){ count = count +1; } } if(count == postSplitDists.length){ SDR = computeSD(preSplitDist); for(int i = 0; i < postSplitDists.length; i++) { double Ni = postSplitDists[i][0]; SDR -= (Ni/N)*computeSD(postSplitDists[i]); } } return SDR; } @Override public double getRangeOfMerit(double[] preSplitDist) { return 1; } public static double[] computeBranchSplitMerits(double[][] postSplitDists) { double[] SDR = new double[postSplitDists.length]; double N = 0; for(int i = 0; i < postSplitDists.length; i++) { double Ni = postSplitDists[i][0]; N += Ni; } for(int i = 0; i < postSplitDists.length; i++) { double Ni = postSplitDists[i][0]; SDR[i] = (Ni/N)*computeSD(postSplitDists[i]); } return SDR; } }
Java
/* * FIMTDDNumericAttributeClassLimitObserver.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core.attributeclassobservers; import moa.classifiers.core.attributeclassobservers.FIMTDDNumericAttributeClassObserver; import moa.options.IntOption; public class FIMTDDNumericAttributeClassLimitObserver extends FIMTDDNumericAttributeClassObserver { /** * */ private static final long serialVersionUID = 1L; protected int maxNodes; public IntOption maxNodesOption = new IntOption("maxNodesOption", 'z', "Maximum number of nodes", 50, 0, Integer.MAX_VALUE); protected int numNodes; @Override public void observeAttributeClass(double attVal, double classVal, double weight) { if (Double.isNaN(attVal)) { //Instance.isMissingValue(attVal) } else { if (this.root == null) { maxNodes=maxNodesOption.getValue(); this.root = new FIMTDDNumericAttributeClassLimitObserver.Node(attVal, classVal, weight); } else { this.root.insertValue(attVal, classVal, weight); } } } protected class Node extends FIMTDDNumericAttributeClassObserver.Node { /** * */ private static final long serialVersionUID = -4484141636424708465L; public Node(double val, double label, double weight) { super(val, label, weight); } protected Node root = null; /** * Insert a new value into the tree, updating both the sum of values and * sum of squared values arrays */ @Override public void insertValue(double val, double label, double weight) { // If the new value equals the value stored in a node, update // the left (<=) node information if (val == this.cut_point) { this.leftStatistics.addToValue(0,1); this.leftStatistics.addToValue(1,label); this.leftStatistics.addToValue(2,label*label); } // If the new value is less than the value in a node, update the // left distribution and send the value down to the left child node. // If no left child exists, create one else if (val <= this.cut_point) { this.leftStatistics.addToValue(0,1); this.leftStatistics.addToValue(1,label); this.leftStatistics.addToValue(2,label*label); if (this.left == null) { if(numNodes<maxNodes){ this.left = new Node(val, label, weight); ++numNodes; } } else { this.left.insertValue(val, label, weight); } } // If the new value is greater than the value in a node, update the // right (>) distribution and send the value down to the right child node. // If no right child exists, create one else { // val > cut_point this.rightStatistics.addToValue(0,1); this.rightStatistics.addToValue(1,label); this.rightStatistics.addToValue(2,label*label); if (this.right == null) { if(numNodes<maxNodes){ this.right = new Node(val, label, weight); ++numNodes; } } else { this.right.insertValue(val, label, weight); } } } } }
Java
/* * Rule.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.core; /** * Class that stores an arrayList of predicates of a rule and the observers * (statistics). This class implements a function that evaluates a rule. * * <p> * Learning Decision Rules from Data Streams, IJCAI 2011, J. Gama, P. Kosina * </p> * * @author A. Bifet, J. Duarte, J. Gama * @version $Revision: 2 $ * * */ import java.io.Serializable; import java.util.LinkedList; import java.util.List; import moa.AbstractMOAObject; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.classifiers.core.conditionaltests.NumericAttributeBinaryTest; import moa.classifiers.rules.AbstractAMRules; import moa.classifiers.rules.core.conditionaltests.NumericAttributeBinaryRulePredicate; import moa.classifiers.rules.nodes.RuleSplitNode; import moa.core.DoubleVector; import moa.core.StringUtils; import moa.options.FlagOption; import moa.options.FloatOption; import weka.core.Instance; public class Rule extends AbstractMOAObject { private static final long serialVersionUID = 1L; protected List<RuleSplitNode> nodeList = new LinkedList<RuleSplitNode>(); protected RuleActiveLearningNode learningNode; protected int ruleNumberID; public int getRuleNumberID() { return ruleNumberID; } public void setRuleNumberID(int ruleNumberID) { this.ruleNumberID = ruleNumberID; } private double[] statisticsOtherBranchSplit; private Builder builder; /** * getLearningNode Method This is the way to pass info for other classes. * Implements getLearningNode() in class RuleActiveLearningNode * * @return */ public RuleActiveLearningNode getLearningNode() { return learningNode; } public void setLearningNode(RuleActiveLearningNode learningNode) { this.learningNode = learningNode; } public List<RuleSplitNode> getNodeList() { return nodeList; } public long getInstancesSeen() { return this.learningNode.getInstancesSeen(); } public void setNodeList(List<RuleSplitNode> nodeList) { this.nodeList = nodeList; } public Rule(Builder builder) { builder.setOwner(this); this.setBuilder(builder); this.amRules = builder.getAMRules(); this.learningNode = newRuleActiveLearningNode(builder); //JD - use builder ID to set ruleNumberID this.ruleNumberID=builder.id; } protected AbstractAMRules amRules; private RuleActiveLearningNode newRuleActiveLearningNode(Builder builder) { return amRules.newRuleActiveLearningNode(builder); } /*private RuleActiveLearningNode newRuleActiveLearningNode(double[] initialClassObservations) { return amRules.newRuleActiveLearningNode(initialClassObservations); }*/ public boolean isCovering(Instance inst) { boolean isCovering = true; for (RuleSplitNode node : nodeList) { if (node.evaluate(inst) == false) { isCovering = false; break; } } return isCovering; } /** * MOA GUI output */ @Override public void getDescription(StringBuilder sb, int indent) { } public static class Builder implements Serializable { private static final long serialVersionUID = 1712887264918475622L; protected boolean changeDetection; protected boolean usePerceptron; protected double threshold; protected double alpha; protected int predictionFunction; protected double[] statistics; protected double lastTargetMean; private Rule owner; //jd public FlagOption constantLearningRatioDecayOption; public FloatOption learningRatioOption; public int id; public AbstractAMRules amRules; public AbstractAMRules getAMRules() { return amRules; } public Builder() { } public Builder changeDetection(boolean changeDetection) { this.changeDetection = changeDetection; return this; } public Builder threshold(double threshold) { this.threshold = threshold; return this; } public Builder alpha(double alpha) { this.alpha = alpha; return this; } public Builder predictionFunction(int predictionFunction) { this.predictionFunction = predictionFunction; return this; } public Builder statistics(double[] statistics) { this.statistics = statistics; return this; } public Builder owner(Rule owner) { this.setOwner(owner); return this; } public Builder amRules(AbstractAMRules amRules) { this.amRules = amRules; return this; } public Builder id(int id) { this.id = id; return this; } public Rule build() { return new Rule(this); } public Rule getOwner() { return owner; } public void setOwner(Rule owner) { this.owner = owner; } } public void updateStatistics(Instance instance) { this.learningNode.updateStatistics(instance); } /** * Try to Expand method. * @param splitConfidence * @param tieThreshold * @return */ public boolean tryToExpand(double splitConfidence, double tieThreshold) { boolean shouldSplit= this.learningNode.tryToExpand(splitConfidence, tieThreshold); return shouldSplit; } //JD: Only call after tryToExpand returning true public void split() { //this.statisticsOtherBranchSplit = this.learningNode.getStatisticsOtherBranchSplit(); //create a split node, int splitIndex = this.learningNode.getSplitIndex(); InstanceConditionalTest st=this.learningNode.getBestSuggestion().splitTest; if(st instanceof NumericAttributeBinaryTest ) { NumericAttributeBinaryTest splitTest = (NumericAttributeBinaryTest) st; NumericAttributeBinaryRulePredicate predicate = new NumericAttributeBinaryRulePredicate( splitTest.getAttsTestDependsOn()[0], splitTest.getSplitValue(), splitIndex + 1); RuleSplitNode ruleSplitNode = new RuleSplitNode(predicate, this.learningNode.getStatisticsBranchSplit() ); if (this.nodeListAdd(ruleSplitNode) == true) { // create a new learning node RuleActiveLearningNode newLearningNode = newRuleActiveLearningNode(this.getBuilder().statistics(this.learningNode.getStatisticsNewRuleActiveLearningNode())); newLearningNode.initialize(this.learningNode); this.learningNode = newLearningNode; } } else throw new UnsupportedOperationException("AMRules (currently) only supports numerical attributes."); } private boolean nodeListAdd(RuleSplitNode ruleSplitNode) { //Check that the node is not already in the list boolean isIncludedInNodeList = false; boolean isUpdated=false; for (RuleSplitNode node : nodeList) { NumericAttributeBinaryRulePredicate nodeTest = (NumericAttributeBinaryRulePredicate) node.getSplitTest(); NumericAttributeBinaryRulePredicate ruleSplitNodeTest = (NumericAttributeBinaryRulePredicate) ruleSplitNode.getSplitTest(); if (nodeTest.isUsingSameAttribute(ruleSplitNodeTest)) { isIncludedInNodeList = true; if (nodeTest.isIncludedInRuleNode(ruleSplitNodeTest) == true) { //remove this line to keep the most recent attribute value //replace the value nodeTest.setAttributeValue(ruleSplitNodeTest); isUpdated=true; //if is updated (i.e. an expansion happened) a new learning node should be created } } } if (isIncludedInNodeList == false) { this.nodeList.add(ruleSplitNode); } return (!isIncludedInNodeList || isUpdated); } public double[] statisticsOtherBranchSplit() { return this.statisticsOtherBranchSplit; } public String printRule() { StringBuilder out = new StringBuilder(); int indent = 1; StringUtils.appendIndented(out, indent, "Rule Nr." + this.ruleNumberID + " Instances seen:" + this.learningNode.getInstancesSeen() + "\n"); // AC for (RuleSplitNode node : nodeList) { StringUtils.appendIndented(out, indent, node.getSplitTest().toString()); StringUtils.appendIndented(out, indent, " "); StringUtils.appendIndented(out, indent, node.toString()); } DoubleVector pred = new DoubleVector(this.learningNode.getSimplePrediction()); StringUtils.appendIndented(out, 0, " --> y: " + pred.toString()); StringUtils.appendNewline(out); if (this.learningNode instanceof RuleActiveRegressionNode) { if(((RuleActiveRegressionNode)this.learningNode).perceptron!=null){ ((RuleActiveRegressionNode)this.learningNode).perceptron.getModelDescription(out,0 ); StringUtils.appendNewline(out); } } return(out.toString()); } protected void debug(String string, int level) { if (this.amRules.VerbosityOption.getValue()>=level) { System.out.println(string); } } public boolean isAnomaly(Instance instance, double uniVariateAnomalyProbabilityThreshold, double multiVariateAnomalyProbabilityThreshold, int numberOfInstanceesForAnomaly) { return this.learningNode.isAnomaly(instance, uniVariateAnomalyProbabilityThreshold, multiVariateAnomalyProbabilityThreshold, numberOfInstanceesForAnomaly); } public double computeError(Instance instance) { return this.learningNode.computeError(instance); } public boolean updatePageHinckleyTest(double error) { return this.learningNode.updatePageHinckleyTest(error); } public double[] getPrediction(Instance instance, int mode) { return this.learningNode.getPrediction(instance, mode); } public double[] getPrediction(Instance instance) { return this.learningNode.getPrediction(instance); } public Builder getBuilder() { return builder; } public void setBuilder(Builder builder) { this.builder = builder; } public double getCurrentError() { return this.learningNode.getCurrentError(); } }
Java
/* * RuleSplitNode.java * Copyright (C) 2013 University of Porto, Portugal * @author E. Almeida, A. Carvalho, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.nodes; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.classifiers.rules.core.Predicate; import moa.classifiers.trees.HoeffdingTree.SplitNode; import weka.core.Instance; /** * A modified SplitNode method implementing the extra information */ public class RuleSplitNode extends SplitNode { protected double lastTargetMean; protected int operatorObserver; private static final long serialVersionUID = 1L; public InstanceConditionalTest getSplitTest() { return this.splitTest; } /** * Create a new RuleSplitNode */ public RuleSplitNode(InstanceConditionalTest splitTest, double[] classObservations) { super(splitTest, classObservations); } public boolean evaluate(Instance instance) { Predicate predicate = (Predicate) this.splitTest; return predicate.evaluate(instance); } }
Java
/* * RuleClassifierNBayes.java * Copyright (C) 2012 University of Porto, Portugal * @author P. Kosina, E. Almeida, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules; import java.util.ArrayList; import java.util.Collections; //import samoa.instances.Instance; import weka.core.Instance; import moa.classifiers.bayes.NaiveBayes; import moa.options.IntOption; /** * This classifier learn ordered and unordered rule set from data stream with naive Bayes learners. * <p> This algorithm also does the detection of anomalies. * * <p>Learning Decision RuleClassifications from Data Streams, IJCAI 2011, J. Gama, P. Kosina </p> * * <p>Parameters:</p> * <ul> * <li> -q: The number of instances a leaf should observe before permitting Naive Bayes.</li> * <li> -p: Minimum value of p </li> * <li> -t: Tie Threshold </li> * <li> -c: Split Confidence </li> * <li> -g: GracePeriod, the number of instances a leaf should observe between split attempts </li> * <li> -o: Prediction function to use. Ex:FirstHit </li> * <li> -r: Learn ordered or unordered rule </li> * </ul> * * @author P. Kosina, E. Almeida, J. Gama * @version $Revision: 2 $ */ public class RuleClassifierNBayes extends RuleClassifier { private static final long serialVersionUID = 1L; public IntOption nbThresholdOption = new IntOption( "nbThreshold", 'q', "The number of instances a leaf should observe before permitting Naive Bayes.", 0, 0, Integer.MAX_VALUE); @Override public double[] getVotesForInstance(Instance inst) { double[] votes = new double[numClass]; switch (super.predictionFunctionOption.getChosenIndex()) { case 0: votes = firstHitNB(inst); break; case 1: votes = weightedSumNB(inst); break; case 2: votes = weightedMaxNB(inst); break; } return votes; } // The following three functions are used for the prediction protected double[] firstHitNB(Instance inst) { int countFired = 0; boolean fired = false; double[] votes = new double[this.numClass]; for (int j = 0; j < this.ruleSet.size(); j++) { if (this.ruleSet.get(j).ruleEvaluate(inst) == true) { countFired = countFired + 1; if (this.ruleSet.get(j).obserClassDistrib.sumOfValues() >= this.nbThresholdOption.getValue()) { votes = NaiveBayes.doNaiveBayesPredictionLog(inst, this.ruleSet.get(j).obserClassDistrib, this.ruleSet.get(j).observers, this.ruleSet.get(j).observersGauss); votes = exponential(votes); votes = normalize(votes); } else { for (int z = 0; z < this.numClass; z++) { votes[z] = this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues(); } } break; } } if (countFired > 0) { fired = true; } else { fired = false; } if (fired == false) { if (super.getWeightSeen() >= this.nbThresholdOption.getValue()) { votes = NaiveBayes.doNaiveBayesPredictionLog(inst, this.observedClassDistribution, this.attributeObservers, this.attributeObserversGauss); votes = exponential(votes); votes = normalize(votes); } else { votes = super.oberversDistribProb(inst, this.observedClassDistribution); } } return votes; } protected double[] weightedMaxNB(Instance inst) { int countFired = 0; int count = 0; boolean fired = false; double highest = 0.0; double[] votes = new double[this.numClass]; ArrayList<Double> ruleSetVotes = new ArrayList<Double>(); ArrayList<ArrayList<Double>> majorityProb = new ArrayList<ArrayList<Double>>(); for (int j = 0; j < this.ruleSet.size(); j++) { ArrayList<Double> ruleClassDistribProb=new ArrayList<Double>(); if(this.ruleSet.get(j).ruleEvaluate(inst) == true) { countFired = countFired + 1; if (this.ruleSet.get(j).obserClassDistrib.sumOfValues() >= this.nbThresholdOption.getValue()) { votes = NaiveBayes.doNaiveBayesPredictionLog(inst, this.ruleSet.get(j).obserClassDistrib, this.ruleSet.get(j).observers, this.ruleSet.get(j).observersGauss); votes = exponential(votes); votes = normalize(votes); } else { count = count + 1; for (int z = 0; z < this.numClass; z++){ ruleSetVotes.add(this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues()); ruleClassDistribProb.add(this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues()); } majorityProb.add(ruleClassDistribProb); } } } if (count > 0) { Collections.sort(ruleSetVotes); highest = ruleSetVotes.get(ruleSetVotes.size() - 1); for (int t = 0; t < majorityProb.size(); t++) { for (int m = 0; m < majorityProb.get(t).size(); m++) { if(majorityProb.get(t).get(m) == highest){ for(int h = 0; h < majorityProb.get(t).size(); h++){ votes[h]=majorityProb.get(t).get(h); } break; } } } } if (countFired > 0) { fired=true; } else { fired=false; } if (fired == false) { if(super.getWeightSeen() >= this.nbThresholdOption.getValue()) { votes = NaiveBayes.doNaiveBayesPredictionLog(inst, this.observedClassDistribution, this.attributeObservers, this.attributeObserversGauss); votes = exponential(votes); votes = normalize(votes); } else { votes = super.oberversDistribProb(inst, this.observedClassDistribution); } } return votes; } protected double[] weightedSumNB(Instance inst) { int countFired = 0; int count = 0; boolean fired = false; double[] votes = new double[this.numClass]; ArrayList<Double> weightSum = new ArrayList<Double>(); ArrayList<ArrayList<Double>> majorityProb = new ArrayList<ArrayList<Double>>(); for ( int j = 0; j < this.ruleSet.size(); j++) { ArrayList<Double> ruleClassDistribProb=new ArrayList<Double>(); if (this.ruleSet.get(j).ruleEvaluate(inst) == true) { countFired = countFired + 1; if (this.ruleSet.get(j).obserClassDistrib.sumOfValues() >= this.nbThresholdOption.getValue()) { votes = NaiveBayes.doNaiveBayesPredictionLog(inst, this.ruleSet.get(j).obserClassDistrib, ruleSet.get(j).observers, this.ruleSet.get(j).observersGauss); votes = exponential(votes); votes = normalize(votes); } else { count=count+1; for (int z = 0; z < this.numClass; z++) { ruleClassDistribProb.add(this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues()); } majorityProb.add(ruleClassDistribProb); } } } if(count > 0) { for (int m = 0; m < majorityProb.get(0).size(); m++) { double sum = 0.0; for (int t = 0; t < majorityProb.size(); t++){ sum = sum + majorityProb.get(t).get(m); } weightSum.add(sum); } for (int h = 0; h < weightSum.size(); h++) { votes[h] = weightSum.get(h) / majorityProb.size(); } } if(countFired>0){ fired = true; } else { fired=false; } if (fired == false) { if (super.getWeightSeen() >= this.nbThresholdOption.getValue()) { votes = NaiveBayes.doNaiveBayesPredictionLog(inst, this.observedClassDistribution, this.attributeObservers, this.attributeObserversGauss); votes = exponential(votes); votes = normalize(votes); } else { votes = super.oberversDistribProb(inst, this.observedClassDistribution); } } return votes; } protected double[] normalize(double[] votes) { double sum=0; for (int i = 0; i < votes.length; i++) { sum = sum + votes[i]; } for (int j = 0; j < votes.length; j++) { votes[j] = votes[j] / sum; } return votes; } protected double[] exponential(double[] votes) { for (int i = 0; i < votes.length; i++) { votes[i] = Math.exp(votes[i]); } return votes; } }
Java
/* * RuleClassification.java * Copyright (C) 2012 University of Porto, Portugal * @author P. Kosina, E. Almeida, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules; /** * Class that stores an arrayList of predicates of a rule and the observers (statistics). * This class implements a function that evaluates a rule. * * <p>Learning Decision Rules from Data Streams, IJCAI 2011, J. Gama, P. Kosina </p> * * @author P. Kosina, E. Almeida, J. Gama * @version $Revision: 2 $ * * */ import java.util.ArrayList; import moa.AbstractMOAObject; import moa.classifiers.core.attributeclassobservers.*; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import weka.core.Instance; //import samoa.instances.Instance; //import moa.core.Utils; public class RuleClassification extends AbstractMOAObject{ private static final long serialVersionUID = 1L; protected ArrayList<Predicates> predicateSet = new ArrayList<Predicates>(); protected AutoExpandVector<AttributeClassObserver> observers = new AutoExpandVector<AttributeClassObserver>(); //Statistics. protected AutoExpandVector<AttributeClassObserver> observersGauss = new AutoExpandVector<AttributeClassObserver>(); //Statistics. protected ArrayList<ArrayList<Double>> attributeStatisticsSupervised = new ArrayList<ArrayList<Double>>(); protected ArrayList<ArrayList<Double>> squaredAttributeStatisticsSupervised = new ArrayList<ArrayList<Double>>(); protected double[] weightAttribute; // The Perception weights. protected DoubleVector attributeStatistics = new DoubleVector(); // Statistics used for error calculations. protected DoubleVector attributesProbability = new DoubleVector(); // Probalility of each attribute. protected DoubleVector squaredAttributeStatistics = new DoubleVector(); protected DoubleVector obserClassDistrib = new DoubleVector(); protected DoubleVector attributeMissingValues = new DoubleVector(); // for each attribute counts the number of missing values. protected int instancesSeen = 0; // The number of instances contributing to this model. protected int instancesSeenTest = 0; // The number of instances test seen by the rule. protected boolean reset=true; // If the model should be reset or not. // Statistics used for normalize actualClass and predictedClass. protected double actualClassStatistics = 0.0; protected double squaredActualClassStatistics = 0.0; protected double PHmT = 0; //The cumulative sum of the errors. protected double PHMT = Double.MAX_VALUE; // The minimum error value seen so far. protected double XiSum = 0; //Absolute error. protected double ValorTargetRule=0; // Target value of the rule. public RuleClassification(RuleClassification x) { for (int i = 0; i < x.predicateSet.size(); i++) { Predicates pred = new Predicates(x.predicateSet.get(i).getAttributeValue(), x.predicateSet.get(i).getSymbol(), x.predicateSet.get(i).getValue()); this.predicateSet.add(pred); } } public RuleClassification() { } public boolean ruleEvaluate(Instance inst) { int countTrue = 0; boolean ruleEvalu = false; for (int i = 0; i < predicateSet.size(); i++) { if (predicateSet.get(i).evaluate(inst) == true) { countTrue = countTrue + 1; } } if (countTrue == predicateSet.size()) { ruleEvalu = true; } else { ruleEvalu = false; } return ruleEvalu; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } }
Java
/* * SDRSplitCriterionAMRules.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.driftdetection; public class PageHinkleyFading extends PageHinkleyTest { /** * */ private static final long serialVersionUID = 7110953184708812339L; private double fadingFactor=0.99; public PageHinkleyFading(double threshold, double alpha) { super(threshold, alpha); } protected double instancesSeen; @Override public void reset() { super.reset(); this.instancesSeen=0; } @Override public boolean update(double error) { this.instancesSeen=1+fadingFactor*this.instancesSeen; double absolutError = Math.abs(error); this.sumAbsolutError = fadingFactor*this.sumAbsolutError + absolutError; if (this.instancesSeen > 30) { double mT = absolutError - (this.sumAbsolutError / this.instancesSeen) - this.alpha; this.cumulativeSum = this.cumulativeSum + mT; // Update the cumulative mT sum if (this.cumulativeSum < this.minimumValue) { // Update the minimum mT value if the new mT is smaller than the current minimum this.minimumValue = this.cumulativeSum; } return (((this.cumulativeSum - this.minimumValue) > this.threshold)); } return false; } }
Java
/* * SDRSplitCriterionAMRules.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.driftdetection; import java.io.Serializable; public class PageHinkleyTest implements Serializable { private static final long serialVersionUID = 1L; protected double cumulativeSum; public double getCumulativeSum() { return cumulativeSum; } public double getMinimumValue() { return minimumValue; } protected double minimumValue; protected double sumAbsolutError; protected long phinstancesSeen; protected double threshold; protected double alpha; public PageHinkleyTest(double threshold, double alpha) { this.threshold = threshold; this.alpha = alpha; this.reset(); } public void reset() { this.cumulativeSum = 0.0; this.minimumValue = Double.MAX_VALUE; this.sumAbsolutError = 0.0; this.phinstancesSeen = 0; } //Compute Page-Hinkley test public boolean update(double error) { this.phinstancesSeen++; double absolutError = Math.abs(error); this.sumAbsolutError = this.sumAbsolutError + absolutError; if (this.phinstancesSeen > 30) { double mT = absolutError - (this.sumAbsolutError / this.phinstancesSeen) - this.alpha; this.cumulativeSum = this.cumulativeSum + mT; // Update the cumulative mT sum if (this.cumulativeSum < this.minimumValue) { // Update the minimum mT value if the new mT is smaller than the current minimum this.minimumValue = this.cumulativeSum; } return (((this.cumulativeSum - this.minimumValue) > this.threshold)); } return false; } }
Java
/* * FadingTargetMean.java * Copyright (C) 2014 University of Porto, Portugal * @author J. Duarte, A. Bifet, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.functions; import moa.options.FloatOption; import weka.core.Instance; public class FadingTargetMean extends TargetMean { /** * */ private static final long serialVersionUID = -1383391769242905972L; public FloatOption fadingFactorOption = new FloatOption( "fadingFactor", 'f', "Fading factor for the FadingTargetMean accumulated error", 0.99, 0, 1); private double nD; private double fadingFactor; @Override public void trainOnInstanceImpl(Instance inst) { updateAccumulatedError(inst); nD=1+fadingFactor*nD; sum=inst.classValue()+fadingFactor*sum; } @Override public void resetLearningImpl() { super.resetLearningImpl(); this.fadingFactor=fadingFactorOption.getValue(); } @Override public double[] getVotesForInstance(Instance inst) { double[] currentMean=new double[1]; if (nD>0) currentMean[0]=sum/nD; else currentMean[0]=0; return currentMean; } }
Java
/* * TargetMean.java * Copyright (C) 2014 University of Porto, Portugal * @author J. Duarte, A. Bifet, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.functions; /** * TargetMean - Returns the mean of the target variable of the training instances * * @author João Duarte * * */ import weka.core.Instance; import moa.classifiers.AbstractClassifier; import moa.classifiers.Regressor; import moa.core.Measurement; import moa.core.StringUtils; import moa.options.FloatOption; public class TargetMean extends AbstractClassifier implements Regressor { /** * */ protected long n; protected double sum; protected double errorSum; protected double nError; private double fadingErrorFactor; private static final long serialVersionUID = 7152547322803559115L; public FloatOption fadingErrorFactorOption = new FloatOption( "fadingErrorFactor", 'e', "Fading error factor for the TargetMean accumulated error", 0.99, 0, 1); @Override public boolean isRandomizable() { return false; } @Override public double[] getVotesForInstance(Instance inst) { double[] currentMean=new double[1]; if (n>0) currentMean[0]=sum/n; else currentMean[0]=0; return currentMean; } @Override public void resetLearningImpl() { sum=0; n=0; errorSum=Double.MAX_VALUE; nError=0; } @Override public void trainOnInstanceImpl(Instance inst) { updateAccumulatedError(inst); ++this.n; this.sum+=inst.classValue(); } protected void updateAccumulatedError(Instance inst){ double mean=0; nError=1+fadingErrorFactor*nError; if(n>0) mean=sum/n; errorSum=Math.abs(inst.classValue()-mean)+fadingErrorFactor*errorSum; } @Override protected Measurement[] getModelMeasurementsImpl() { return null; } @Override public void getModelDescription(StringBuilder out, int indent) { StringUtils.appendIndented(out, indent, "Current Mean: " + this.sum/this.n); StringUtils.appendNewline(out); } /* JD * Resets the learner but initializes with a starting point * */ public void reset(double currentMean, long numberOfInstances) { this.sum=currentMean*numberOfInstances; this.n=numberOfInstances; this.resetError(); } /* JD * Resets the learner but initializes with a starting point * */ public double getCurrentError(){ if(this.nError>0) return this.errorSum/this.nError; else return Double.MAX_VALUE; } public TargetMean(TargetMean t) { super(); this.n = t.n; this.sum = t.sum; this.errorSum = t.errorSum; this.nError = t.nError; this.fadingErrorFactor = t.fadingErrorFactor; this.fadingErrorFactorOption = t.fadingErrorFactorOption; } public TargetMean() { super(); fadingErrorFactor=fadingErrorFactorOption.getValue(); } public void resetError() { this.errorSum=0; this.nError=0; } }
Java
/* * FadingTargetMean.java * Copyright (C) 2014 University of Porto, Portugal * @author A. Bifet, J. Duarte, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules.functions; import moa.classifiers.AbstractClassifier; import moa.classifiers.Regressor; import moa.core.DoubleVector; import moa.core.Measurement; import moa.options.FlagOption; import moa.options.FloatOption; import weka.core.Instance; public class Perceptron extends AbstractClassifier implements Regressor{ private final double SD_THRESHOLD = 0.0000001; //THRESHOLD for normalizing attribute and target values private static final long serialVersionUID = 1L; public FlagOption constantLearningRatioDecayOption = new FlagOption( "learningRatio_Decay_set_constant", 'd', "Learning Ratio Decay in Perceptron set to be constant. (The next parameter)."); public FloatOption learningRatioOption = new FloatOption( "learningRatio", 'l', "Constante Learning Ratio to use for training the Perceptrons in the leaves.", 0.01); public FloatOption learningRateDecayOption = new FloatOption( "learningRateDecay", 'm', " Learning Rate decay to use for training the Perceptron.", 0.001); public FloatOption fadingFactorOption = new FloatOption( "fadingFactor", 'e', "Fading factor for the Perceptron accumulated error", 0.99, 0, 1); private double nError; protected double fadingFactor; protected double learningRatio; protected double learningRateDecay; // The Perception weights protected double[] weightAttribute; // Statistics used for error calculations public DoubleVector perceptronattributeStatistics = new DoubleVector(); public DoubleVector squaredperceptronattributeStatistics = new DoubleVector(); // The number of instances contributing to this model protected int perceptronInstancesSeen; protected int perceptronYSeen; protected double accumulatedError; // If the model (weights) should be reset or not protected boolean initialisePerceptron; protected double perceptronsumY; protected double squaredperceptronsumY; public Perceptron() { this.initialisePerceptron = true; } /* * Perceptron */ public Perceptron(Perceptron p) { super(); this.constantLearningRatioDecayOption = p.constantLearningRatioDecayOption; this.learningRatioOption = p.learningRatioOption; this.learningRateDecayOption=p.learningRateDecayOption; this.fadingFactorOption = p.fadingFactorOption; this.nError = p.nError; this.fadingFactor = p.fadingFactor; this.learningRatio = p.learningRatio; this.learningRateDecay = p.learningRateDecay; if (p.weightAttribute!=null) this.weightAttribute = p.weightAttribute.clone(); this.perceptronattributeStatistics = new DoubleVector(p.perceptronattributeStatistics); this.squaredperceptronattributeStatistics = new DoubleVector(p.squaredperceptronattributeStatistics); this.perceptronInstancesSeen = p.perceptronInstancesSeen; this.initialisePerceptron = p.initialisePerceptron; this.perceptronsumY = p.perceptronsumY; this.squaredperceptronsumY = p.squaredperceptronsumY; this.perceptronYSeen=p.perceptronYSeen; } public void setWeights(double[] w) { this.weightAttribute = w; } public double[] getWeights() { return this.weightAttribute; } public int getInstancesSeen() { return perceptronInstancesSeen; } public void setInstancesSeen(int pInstancesSeen) { this.perceptronInstancesSeen = pInstancesSeen; } /** * A method to reset the model */ public void resetLearningImpl() { this.initialisePerceptron = true; this.reset(); } public void reset(){ this.nError=0.0; this.accumulatedError = 0.0; this.perceptronInstancesSeen = 0; this.perceptronattributeStatistics = new DoubleVector(); this.squaredperceptronattributeStatistics = new DoubleVector(); this.perceptronsumY = 0.0; this.squaredperceptronsumY = 0.0; this.perceptronYSeen=0; } public void resetError(){ this.nError=0.0; this.accumulatedError = 0.0; } /** * Update the model using the provided instance */ public void trainOnInstanceImpl(Instance inst) { accumulatedError= Math.abs(this.prediction(inst)-inst.classValue()) + fadingFactor*accumulatedError; nError=1+fadingFactor*nError; // Initialise Perceptron if necessary if (this.initialisePerceptron == true) { this.fadingFactor=this.fadingFactorOption.getValue(); this.classifierRandom.setSeed(randomSeedOption.getValue()); this.initialisePerceptron = false; // not in resetLearningImpl() because it needs Instance! this.weightAttribute = new double[inst.numAttributes()]; for (int j = 0; j < inst.numAttributes(); j++) { weightAttribute[j] = 2 * this.classifierRandom.nextDouble() - 1; } // Update Learning Rate learningRatio = learningRatioOption.getValue(); this.learningRateDecay = learningRateDecayOption.getValue(); } // Update attribute statistics this.perceptronInstancesSeen++; this.perceptronYSeen++; for(int j = 0; j < inst.numAttributes() -1; j++) { perceptronattributeStatistics.addToValue(j, inst.value(j)); squaredperceptronattributeStatistics.addToValue(j, inst.value(j)*inst.value(j)); } this.perceptronsumY += inst.classValue(); this.squaredperceptronsumY += inst.classValue() * inst.classValue(); if(constantLearningRatioDecayOption.isSet()==false){ learningRatio = learningRatioOption.getValue() / (1+ perceptronInstancesSeen*learningRateDecay); } //double prediction = this.updateWeights(inst,learningRatio); //accumulatedError= Math.abs(prediction-inst.classValue()) + fadingFactor*accumulatedError; this.updateWeights(inst,learningRatio); } /** * Output the prediction made by this perceptron on the given instance */ private double prediction(Instance inst) { double[] normalizedInstance = normalizedInstance(inst); double normalizedPrediction = prediction(normalizedInstance); return denormalizedPrediction(normalizedPrediction); } public double normalizedPrediction(Instance inst) { double[] normalizedInstance = normalizedInstance(inst); double normalizedPrediction = prediction(normalizedInstance); return normalizedPrediction; } private double denormalizedPrediction(double normalizedPrediction) { if (this.initialisePerceptron==false){ double meanY = perceptronsumY / perceptronYSeen; double sdY = computeSD(squaredperceptronsumY, perceptronsumY, perceptronYSeen); if (sdY > SD_THRESHOLD) return normalizedPrediction * sdY + meanY; else return normalizedPrediction + meanY; } else return normalizedPrediction; //Perceptron may have been "reseted". Use old weights to predict } public double prediction(double[] instanceValues) { double prediction = 0.0; if(this.initialisePerceptron == false) { for (int j = 0; j < instanceValues.length - 1; j++) { prediction += this.weightAttribute[j] * instanceValues[j]; } prediction += this.weightAttribute[instanceValues.length - 1]; } return prediction; } public double[] normalizedInstance(Instance inst){ // Normalize Instance double[] normalizedInstance = new double[inst.numAttributes()]; for(int j = 0; j < inst.numAttributes() -1; j++) { int instAttIndex = modelAttIndexToInstanceAttIndex(j, inst); double mean = perceptronattributeStatistics.getValue(j) / perceptronYSeen; double sd = computeSD(squaredperceptronattributeStatistics.getValue(j), perceptronattributeStatistics.getValue(j), perceptronYSeen); if (sd > SD_THRESHOLD) normalizedInstance[j] = (inst.value(instAttIndex) - mean)/ sd; else normalizedInstance[j] = inst.value(instAttIndex) - mean; } return normalizedInstance; } public double computeSD(double squaredVal, double val, int size) { if (size > 1) { return Math.sqrt((squaredVal - ((val * val) / size)) / (size - 1.0)); } return 0.0; } public double updateWeights(Instance inst, double learningRatio ){ // Normalize Instance double[] normalizedInstance = normalizedInstance(inst); // Compute the Normalized Prediction of Perceptron double normalizedPredict= prediction(normalizedInstance); double normalizedY = normalizeActualClassValue(inst); double sumWeights = 0.0; double delta = normalizedY - normalizedPredict; for (int j = 0; j < inst.numAttributes() - 1; j++) { int instAttIndex = modelAttIndexToInstanceAttIndex(j, inst); if(inst.attribute(instAttIndex).isNumeric()) { this.weightAttribute[j] += learningRatio * delta * normalizedInstance[j]; sumWeights += Math.abs(this.weightAttribute[j]); } } this.weightAttribute[inst.numAttributes() - 1] += learningRatio * delta; sumWeights += Math.abs(this.weightAttribute[inst.numAttributes() - 1]); if (sumWeights > inst.numAttributes()) { // Lasso regression for (int j = 0; j < inst.numAttributes() - 1; j++) { int instAttIndex = modelAttIndexToInstanceAttIndex(j, inst); if(inst.attribute(instAttIndex).isNumeric()) { this.weightAttribute[j] = this.weightAttribute[j] / sumWeights; } } this.weightAttribute[inst.numAttributes() - 1] = this.weightAttribute[inst.numAttributes() - 1] / sumWeights; } return denormalizedPrediction(normalizedPredict); } public void normalizeWeights(){ double sumWeights = 0.0; for (int j = 0; j < this.weightAttribute.length ; j++) { sumWeights += Math.abs(this.weightAttribute[j]); } for (int j = 0; j < this.weightAttribute.length; j++) { this.weightAttribute[j] = this.weightAttribute[j] / sumWeights; } } private double normalizeActualClassValue(Instance inst) { double meanY = perceptronsumY / perceptronYSeen; double sdY = computeSD(squaredperceptronsumY, perceptronsumY, perceptronYSeen); double normalizedY = 0.0; if (sdY > SD_THRESHOLD){ normalizedY = (inst.classValue() - meanY) / sdY; }else{ normalizedY = inst.classValue() - meanY; } return normalizedY; } @Override public boolean isRandomizable() { return true; } @Override public double[] getVotesForInstance(Instance inst) { return new double[]{this.prediction(inst)}; } @Override protected Measurement[] getModelMeasurementsImpl() { return null; } @Override public void getModelDescription(StringBuilder out, int indent) { if(this.weightAttribute!=null){ for(int i=0; i< this.weightAttribute.length-1; ++i) { if(this.weightAttribute[i]>=0 && i>0) out.append(" +" + Math.round(this.weightAttribute[i]*1000)/1000.0 + " X" + i ); else out.append(" " + Math.round(this.weightAttribute[i]*1000)/1000.0 + " X" + i ); } if(this.weightAttribute[this.weightAttribute.length-1]>=0 ) out.append(" +" + Math.round(this.weightAttribute[this.weightAttribute.length-1]*1000)/1000.0); else out.append(" " + Math.round(this.weightAttribute[this.weightAttribute.length-1]*1000)/1000.0); } } public void setLearningRatio(double learningRatio) { this.learningRatio=learningRatio; } public double getCurrentError() { if (nError>0) return accumulatedError/nError; else return Double.MAX_VALUE; } }
Java
/* * RuleClassifier.java * Copyright (C) 2012 University of Porto, Portugal * @author P. Kosina, E. Almeida, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package moa.classifiers.rules; import java.math.BigDecimal; import java.util.*; import moa.classifiers.AbstractClassifier; import moa.classifiers.core.attributeclassobservers.*; import moa.classifiers.core.attributeclassobservers.BinaryTreeNumericAttributeClassObserver.Node; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.Measurement; import moa.core.StringUtils; import moa.options.FlagOption; import moa.options.FloatOption; import moa.options.IntOption; import moa.options.MultiChoiceOption; //import samoa.instances.Instance; import weka.core.Instance; import weka.core.Utils; //import moa.core.Utils; import java.io.*; /** * This classifier learn ordered and unordered rule set from data stream. * This algorithm also does the detection of anomalies. * * <p>Learning Decision RuleClassifications from Data Streams, IJCAI 2011, J. Gama, P. Kosina </p> * * * <p>Parameters:</p> * <ul> * <li> -p: Minimum value of p </li> * <li> -t: Tie Threshold </li> * <li> -c: Split Confidence </li> * <li> -g: GracePeriod, the number of instances a leaf should observe between split attempts </li> * <li> -o: Prediction function to use. Ex:FirstHit </li> * <li> -r: Learn ordered or unordered rule </li> * </ul> * * @author P. Kosina, E. Almeida, J. Gama * @version $Revision: 2 $ */ public class RuleClassifier extends AbstractClassifier{ private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Rule Classifier."; } protected Instance instance; protected AutoExpandVector<AttributeClassObserver> attributeObservers; protected AutoExpandVector<AttributeClassObserver> attributeObserversGauss; protected DoubleVector observedClassDistribution; protected DoubleVector saveBestEntropy = new DoubleVector(); // Saves the best value of entropy, cut_Point and symbol. protected DoubleVector saveBestEntropyNominalAttrib = new DoubleVector(); // Saves the best value of entropy and their cut_Point. protected DoubleVector ruleClassIndex = new DoubleVector(); // The index of the class for each rule. protected DoubleVector saveBestGlobalEntropy = new DoubleVector(); protected ArrayList<ArrayList<Double>> saveBestValGlobalEntropy = new ArrayList<ArrayList<Double>>(); // For each attribute contains the best value of entropy and its cutPoint. protected ArrayList<Double> saveTheBest = new ArrayList<Double>(); // Contains the best attribute. protected ArrayList<RuleClassification> ruleSet = new ArrayList<RuleClassification>(); // protected DoubleVector ruleClassIndexAnomalis = new DoubleVector(); protected ArrayList<RuleClassification> ruleSetAnomalies = new ArrayList<RuleClassification>(); protected ArrayList<Integer> ruleAnomaliesIndex = new ArrayList<Integer>(); protected ArrayList<ArrayList<Integer>> caseAnomaly = new ArrayList<ArrayList<Integer>>(); protected ArrayList<ArrayList<ArrayList<Double>>> ruleAttribAnomalyStatistics = new ArrayList<ArrayList<ArrayList<Double>>>(); protected ArrayList<RuleClassification> ruleSetAnomaliesSupervised = new ArrayList<RuleClassification>(); protected ArrayList<Integer> ruleAnomaliesIndexSupervised = new ArrayList<Integer>(); protected ArrayList<ArrayList<Integer>> caseAnomalySupervised = new ArrayList<ArrayList<Integer>>(); protected ArrayList<ArrayList<ArrayList<Double>>> ruleAttribAnomalyStatisticsSupervised = new ArrayList<ArrayList<ArrayList<Double>>>(); double minEntropyTemp = Double.MAX_VALUE; double cutPointTemp = 0.0; double minEntropyNominalAttrib = Double.MAX_VALUE; double symbol = 0.0; int numInstance = 0; int numAttributes = 0; int numClass = 0; Node root; Predicates pred; public FloatOption PminOption = new FloatOption("Pmin", 'p', "Percentage of the total number of example seen in the node.", 0.1, 0.0, 1.0); public FloatOption splitConfidenceOption = new FloatOption( "splitConfidence", 'c', "The allowable error in split decision, values closer to 0 will take longer to decide.", 0.000001, 0.0, 1.0); public FloatOption tieThresholdOption = new FloatOption("tieThreshold", 't', "Threshold below which a split will be forced to break ties.", 0.05, 0.0, 1.0); public FloatOption anomalyProbabilityThresholdOption = new FloatOption( "anomalyprobabilityThreshold", 'o', "The threshold value.", 0.99, 0.0, 1.0); public FloatOption probabilityThresholdOption = new FloatOption( "probabilityThreshold", 'k', "The threshold value.", 0.10, 0.0, 1.0); public IntOption anomalyNumInstThresholdOption = new IntOption( "anomalyThreshold", 'i', "The threshold value to be used in the anomaly detection.", 15, 0, Integer.MAX_VALUE); public IntOption gracePeriodOption = new IntOption( "gracePeriod",'g', "The number of instances a leaf should observe between split attempts.", 200, 0, Integer.MAX_VALUE); public MultiChoiceOption predictionFunctionOption = new MultiChoiceOption( "predictionFunctionOption", 'z', "The prediction function to use.", new String[]{ "firstHit", "weightedSum", "weightedMax"}, new String[]{ "first Hit", "weighted Sum", "weighted Max"}, 0); public FlagOption orderedRulesOption = new FlagOption("orderedRules", 'r', "orderedRules."); public FlagOption anomalyDetectionOption = new FlagOption("anomalyDetection", 'u', "anomaly Detection."); public FlagOption Supervised = new FlagOption("supervised", 'n', "supervised."); public FlagOption Unsupervised = new FlagOption("unsupervised", 'm', "unsupervised."); @Override public double[] getVotesForInstance(Instance inst) { double[] votes = new double[this.numClass]; switch (this.predictionFunctionOption.getChosenIndex()) { case 0: votes = firstHit(inst); break; case 1: votes = weightedSum(inst); break; case 2: votes = weightedMax(inst); break; } return votes; } @Override protected Measurement[] getModelMeasurementsImpl() { return null; } @Override public void resetLearningImpl() { this.observedClassDistribution = new DoubleVector(); this.attributeObservers = new AutoExpandVector<AttributeClassObserver>(); this.attributeObserversGauss = new AutoExpandVector<AttributeClassObserver>(); } public double getWeightSeen() { return this.observedClassDistribution.sumOfValues(); } @Override public void trainOnInstanceImpl(Instance inst) { int countRuleFiredTrue = 0; boolean ruleFired = false; this.instance = inst; this.numAttributes = instance.numAttributes()-1; this.numClass = instance.numClasses(); this.numInstance = numInstance + 1; int conta1=0; for (int j = 0; j < ruleSet.size(); j++) { if (this.ruleSet.get(j).ruleEvaluate(inst) == true) { countRuleFiredTrue = countRuleFiredTrue + 1; double anomaly = 0.0; if(this.Supervised.isSet()){ anomaly = computeAnomalySupervised(this.ruleSet.get(j), j, inst); // compute anomaly (Supervised method) }else if(this.Unsupervised.isSet()){ anomaly = computeAnomalyUnsupervised(this.ruleSet.get(j), j, inst); // compute anomaly (Unsupervised method) } if(anomaly >= this.anomalyProbabilityThresholdOption.getValue()){ conta1 =conta1+1; } // System.out.print(numInstance+";"+anomaly+"\n"); try { File dir = new File("SeaAnomaliesUnsupervised.txt"); FileWriter fileWriter = new FileWriter(dir, true); PrintWriter printWriter = new PrintWriter(fileWriter); printWriter.println(numInstance+";"+anomaly); printWriter.flush(); printWriter.close(); } catch (IOException e) { e.printStackTrace(); } if((this.ruleSet.get(j).instancesSeen <= this.anomalyNumInstThresholdOption.getValue()) || (anomaly < this.anomalyProbabilityThresholdOption.getValue() && this.anomalyDetectionOption.isSet()) ||!this.anomalyDetectionOption.isSet()){ this.ruleSet.get(j).obserClassDistrib.addToValue((int) inst.classValue(), inst.weight()); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); if(!inst.isMissing(instAttIndex)){ AttributeClassObserver obs = this.ruleSet.get(j).observers.get(i); // Nominal and binary tree. AttributeClassObserver obsGauss = this.ruleSet.get(j).observersGauss.get(i); // Gaussian. if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? newNominalClassObserver() : newNumericClassObserver(); this.ruleSet.get(j).observers.set(i, obs); } if (obsGauss == null) { obsGauss = inst.attribute(instAttIndex).isNumeric() ? newNumericClassObserver2():null; this.ruleSet.get(j).observersGauss.set(i, obsGauss); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); if (inst.attribute(instAttIndex).isNumeric()) { obsGauss.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } } } expandeRule(this.ruleSet.get(j), inst, j); // This function expands the rule } if (this.orderedRulesOption.isSet()) { // Ordered rules break; } } } if (countRuleFiredTrue > 0) { ruleFired = true; }else{ ruleFired = false; } if (ruleFired == false) { //If none of the rules cover the example update sufficient statistics of the default rule this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); if(!inst.isMissing(instAttIndex)){ AttributeClassObserver obs = this.attributeObservers.get(i); AttributeClassObserver obsGauss = this.attributeObserversGauss.get(i); if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? newNominalClassObserver() : newNumericClassObserver(); this.attributeObservers.set(i, obs); } if (obsGauss == null) { obsGauss = inst.attribute(instAttIndex).isNumeric() ? newNumericClassObserver2():null; this.attributeObserversGauss.set(i, obsGauss); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); if (inst.attribute(instAttIndex).isNumeric()) { obsGauss.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } } } createRule(inst); //This function creates a rule } } @Override public void getModelDescription(StringBuilder out, int indent) { if(this.anomalyDetectionOption.isSet()){ if(this.Supervised.isSet()){ this.printAnomaliesSupervised(out, indent); // Get Model Description (Supervised method) }else if(this.Unsupervised.isSet()){ this.printAnomaliesUnsupervised(out, indent); // Get Model Description (Unsupervised method) } }else{ this.getModelDescriptionNoAnomalyDetection(out, indent); // Get Model Description no Anomaly detection } } public void printAnomaliesUnsupervised(StringBuilder out, int indent) { // Get Model Description (Unsupervised method) StringUtils.appendNewline(out); StringUtils.appendIndented(out, indent, "**********************UNSUPERVISED*****************"); StringUtils.appendNewline(out); StringUtils.appendNewline(out); for (int k = 0; k < this.ruleSetAnomalies.size(); k++) { StringUtils.appendIndented(out, indent, "Case: "+this.caseAnomaly.get(k).get(0)+" Anomaly Score: "+this.caseAnomaly.get(k).get(1)+"%"); StringUtils.appendNewline(out); //Ver outra parte default StringUtils.appendIndented(out, indent, "Rule "+this.ruleAnomaliesIndex.get(k)+": "); for (int i = 0; i < this.ruleSetAnomalies.get(k).predicateSet.size(); i++) { if (this.ruleSetAnomalies.get(k).predicateSet.size() == 1) { if (this.ruleSetAnomalies.get(k).predicateSet.get(i).getSymbol() == 0.0) { String nam = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).name(); String val = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).value((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getValue()); StringUtils.appendIndented(out, indent, nam+" = "+val+" --> "+this.instance.classAttribute().value((int)getRuleMajorityClassIndex(this.ruleSetAnomalies.get(k)))); StringUtils.appendNewline(out); } else if (this.ruleSetAnomalies.get(k).predicateSet.get(i).getSymbol() == -1.0){ String nam = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" <= "+this.ruleSetAnomalies.get(k).predicateSet.get(i).getValue()+" --> "+this.instance.classAttribute().value((int)ruleClassIndex.getValue(ruleAnomaliesIndex.get(k)-1))); StringUtils.appendNewline(out); } else { String nam = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" > "+this.ruleSetAnomalies.get(k).predicateSet.get(i).getValue()+" --> "+this.instance.classAttribute().value((int)this.ruleClassIndex.getValue(ruleAnomaliesIndex.get(k)-1))); StringUtils.appendNewline(out); } } else { if (this.ruleSetAnomalies.get(k).predicateSet.get(i).getSymbol() == 0.0) { String nam = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).name(); String val = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).value((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getValue()); StringUtils.appendIndented(out, indent, nam+" = "+val+" "); } else if (this.ruleSetAnomalies.get(k).predicateSet.get(i).getSymbol()==-1.0){ String nam = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" <= "+this.ruleSetAnomalies.get(k).predicateSet.get(i).getValue()+" "); } else { String nam = this.instance.attribute((int)this.ruleSetAnomalies.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" > "+this.ruleSetAnomalies.get(k).predicateSet.get(i).getValue()+" "); } if (i < this.ruleSetAnomalies.get(k).predicateSet.size() - 1) { StringUtils.appendIndented(out, indent, "and "); } else { int count = getCountNominalAttrib(this.ruleSetAnomalies.get(k).predicateSet); if ((this.ruleSetAnomalies.get(k).predicateSet.get(i).getSymbol() == 0.0) || (count != 0)) { StringUtils.appendIndented(out, indent, " --> "+this.instance.classAttribute().value((int)getRuleMajorityClassIndex(this.ruleSetAnomalies.get(k)))); StringUtils.appendNewline(out); } else { StringUtils.appendIndented(out, indent, " --> "+this.instance.classAttribute().value((int)this.ruleClassIndex.getValue(ruleAnomaliesIndex.get(k)-1))); StringUtils.appendNewline(out); } } } } for(int z=0; z < this.ruleAttribAnomalyStatistics.get(k).size(); z++) { if(this.ruleAttribAnomalyStatistics.get(k).get(z).size() == 5){ String s = String.format ("%.3e", this.ruleAttribAnomalyStatistics.get(k).get(z).get(4)); StringUtils.appendIndented(out, indent, instance.attribute(this.ruleAttribAnomalyStatistics.get(k).get(z).get(0).intValue()).name()+"="+round(this.ruleAttribAnomalyStatistics.get(k).get(z).get(1))+" ("+round(this.ruleAttribAnomalyStatistics.get(k).get(z).get(2))+" +- "+round(this.ruleAttribAnomalyStatistics.get(k).get(z).get(3))+") P="+s); StringUtils.appendNewline(out); } else { String s = String.format ("%.3e", this.ruleAttribAnomalyStatistics.get(k).get(z).get(2)); String val = this.instance.attribute(this.ruleAttribAnomalyStatistics.get(k).get(z).get(0).intValue()).value(this.ruleAttribAnomalyStatistics.get(k).get(z).get(1).intValue()); StringUtils.appendIndented(out, indent, instance.attribute(this.ruleAttribAnomalyStatistics.get(k).get(z).get(0).intValue()).name()+"="+ val+" P="+s); StringUtils.appendNewline(out); } } StringUtils.appendNewline(out); } } public void printAnomaliesSupervised(StringBuilder out, int indent) { // Get Model Description (Supervised method) StringUtils.appendNewline(out); StringUtils.appendIndented(out, indent, "************************SUPERVISED*******************"); StringUtils.appendNewline(out); StringUtils.appendNewline(out); for (int k = 0; k < this.ruleSetAnomaliesSupervised.size(); k++) { StringUtils.appendIndented(out, indent, "Case: "+this.caseAnomalySupervised.get(k).get(0)+" Anomaly Score: "+this.caseAnomalySupervised.get(k).get(1)+"%"); StringUtils.appendNewline(out); //Ver outra parte default StringUtils.appendIndented(out, indent, "Rule "+this.ruleAnomaliesIndexSupervised.get(k)+": "); for (int i = 0; i < this.ruleSetAnomaliesSupervised.get(k).predicateSet.size(); i++) { if (this.ruleSetAnomaliesSupervised.get(k).predicateSet.size() == 1) { if (this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getSymbol() == 0.0) { String nam = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).name(); String val = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).value((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getValue()); StringUtils.appendIndented(out, indent, nam+" = "+val+" --> "+this.instance.classAttribute().value((int)getRuleMajorityClassIndex(this.ruleSetAnomaliesSupervised.get(k)))); StringUtils.appendNewline(out); } else if (this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getSymbol() == -1.0){ String nam = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" <= "+this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getValue()+" --> "+this.instance.classAttribute().value((int)ruleClassIndex.getValue(ruleAnomaliesIndexSupervised.get(k)-1))); StringUtils.appendNewline(out); } else { String nam = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" > "+this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getValue()+" --> "+this.instance.classAttribute().value((int)this.ruleClassIndex.getValue(ruleAnomaliesIndexSupervised.get(k)-1))); StringUtils.appendNewline(out); } } else { if (this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getSymbol() == 0.0) { String nam = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).name(); String val = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).value((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getValue()); StringUtils.appendIndented(out, indent, nam+" = "+val+" "); } else if (this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getSymbol()==-1.0){ String nam = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" <= "+this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getValue()+" "); } else { String nam = this.instance.attribute((int)this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" > "+this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getValue()+" "); } if (i < this.ruleSetAnomaliesSupervised.get(k).predicateSet.size() - 1) { StringUtils.appendIndented(out, indent, "and "); } else { int count = getCountNominalAttrib(this.ruleSetAnomaliesSupervised.get(k).predicateSet); if ((this.ruleSetAnomaliesSupervised.get(k).predicateSet.get(i).getSymbol() == 0.0) || (count != 0)) { StringUtils.appendIndented(out, indent, " --> "+this.instance.classAttribute().value((int)getRuleMajorityClassIndex(this.ruleSetAnomaliesSupervised.get(k)))); StringUtils.appendNewline(out); } else { StringUtils.appendIndented(out, indent, " --> "+this.instance.classAttribute().value((int)this.ruleClassIndex.getValue(ruleAnomaliesIndexSupervised.get(k)-1))); StringUtils.appendNewline(out); } } } } for(int z=0; z < this.ruleAttribAnomalyStatisticsSupervised.get(k).size(); z++) { if(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).size() == 5){ String s = String.format ("%.3e", this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(4)); StringUtils.appendIndented(out, indent, instance.attribute(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(0).intValue()).name()+"="+round(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(1))+" ("+round(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(2))+" +- "+round(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(3))+") P="+s); StringUtils.appendNewline(out); } else { String s = String.format ("%.3e", this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(2)); String val = this.instance.attribute(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(0).intValue()).value(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(1).intValue()); StringUtils.appendIndented(out, indent, instance.attribute(this.ruleAttribAnomalyStatisticsSupervised.get(k).get(z).get(0).intValue()).name()+"="+ val+" P="+s); StringUtils.appendNewline(out); } } StringUtils.appendNewline(out); } } public void getModelDescriptionNoAnomalyDetection(StringBuilder out, int indent) { // Get Model Description no Anomaly detection StringUtils.appendNewline(out); StringUtils.appendIndented(out, indent, "Number of Rule: " + ruleSet.size()); StringUtils.appendNewline(out); StringUtils.appendNewline(out); for (int k = 0; k < this.ruleSet.size(); k++) { StringUtils.appendIndented(out, indent, "Rule "+(k+1)+": "); for (int i = 0; i < this.ruleSet.get(k).predicateSet.size(); i++) { if (this.ruleSet.get(k).predicateSet.size() == 1) { if (this.ruleSet.get(k).predicateSet.get(i).getSymbol() == 0.0) { String nam = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).name(); String val = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).value((int)this.ruleSet.get(k).predicateSet.get(i).getValue()); StringUtils.appendIndented(out, indent, nam+" = "+val+" --> "+this.instance.classAttribute().value((int)getRuleMajorityClassIndex(this.ruleSet.get(k)))); StringUtils.appendNewline(out); } else if (this.ruleSet.get(k).predicateSet.get(i).getSymbol() == -1.0){ String nam = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" <= "+this.ruleSet.get(k).predicateSet.get(i).getValue()+" --> "+this.instance.classAttribute().value((int)ruleClassIndex.getValue(k))); StringUtils.appendNewline(out); } else { String nam = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" > "+this.ruleSet.get(k).predicateSet.get(i).getValue()+" --> "+this.instance.classAttribute().value((int)this.ruleClassIndex.getValue(k))); StringUtils.appendNewline(out); } } else { if (this.ruleSet.get(k).predicateSet.get(i).getSymbol() == 0.0) { String nam = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).name(); String val = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).value((int)this.ruleSet.get(k).predicateSet.get(i).getValue()); StringUtils.appendIndented(out, indent, nam+" = "+val+" "); } else if (this.ruleSet.get(k).predicateSet.get(i).getSymbol()==-1.0){ String nam = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" <= "+this.ruleSet.get(k).predicateSet.get(i).getValue()+" "); } else { String nam = this.instance.attribute((int)this.ruleSet.get(k).predicateSet.get(i).getAttributeValue()).name(); StringUtils.appendIndented(out, indent, nam+" > "+this.ruleSet.get(k).predicateSet.get(i).getValue()+" "); } if (i < this.ruleSet.get(k).predicateSet.size() - 1) { StringUtils.appendIndented(out, indent, "and "); } else { int count = getCountNominalAttrib(this.ruleSet.get(k).predicateSet); if ((this.ruleSet.get(k).predicateSet.get(i).getSymbol() == 0.0) || (count != 0)) { StringUtils.appendIndented(out, indent, " --> "+this.instance.classAttribute().value((int)getRuleMajorityClassIndex(this.ruleSet.get(k)))); StringUtils.appendNewline(out); } else { StringUtils.appendIndented(out, indent, " --> "+this.instance.classAttribute().value((int)this.ruleClassIndex.getValue(k))); StringUtils.appendNewline(out); } } } } StringUtils.appendNewline(out); } } @Override public boolean isRandomizable() { return false; } public int getCountNominalAttrib(ArrayList<Predicates> predicateSet) { int count = 0; for (int i = 0; i < predicateSet.size(); i++) { if (predicateSet.get(i).getSymbol() == 0.0) { count = count + 1; break; } } return count; } //Round an number protected BigDecimal round(double val){ BigDecimal value = new BigDecimal(val); if(val!=0.0){ value = value.setScale(3, BigDecimal.ROUND_DOWN); } return value; } //This function initializes the statistics of a rule public void initializeRuleStatistics(RuleClassification rl, Predicates pred, Instance inst) { rl.predicateSet.add(pred); rl.obserClassDistrib=new DoubleVector(); rl.observers=new AutoExpandVector<AttributeClassObserver>(); rl.observersGauss=new AutoExpandVector<AttributeClassObserver>(); rl.instancesSeen = 0; rl.attributeStatistics = new DoubleVector(); rl.squaredAttributeStatistics = new DoubleVector(); rl.attributeStatisticsSupervised = new ArrayList<ArrayList<Double>>(); rl.squaredAttributeStatisticsSupervised = new ArrayList<ArrayList<Double>>(); rl.attributeMissingValues = new DoubleVector(); } // Update rule statistics public void updateRuleAttribStatistics(Instance inst, RuleClassification rl, int ruleIndex){ rl.instancesSeen++; if(rl.squaredAttributeStatisticsSupervised.size() == 0 && rl.attributeStatisticsSupervised.size() == 0){ for (int s = 0; s < inst.numAttributes() -1; s++) { ArrayList<Double> temp1 = new ArrayList<Double>(); ArrayList<Double> temp2 = new ArrayList<Double>(); rl.attributeStatisticsSupervised.add(temp1); rl.squaredAttributeStatisticsSupervised.add(temp2); int instAttIndex = modelAttIndexToInstanceAttIndex(s, inst); if(instance.attribute(instAttIndex).isNumeric()){ for(int i=0; i<inst.numClasses(); i++){ rl.attributeStatisticsSupervised.get(s).add(0.0); rl.squaredAttributeStatisticsSupervised.get(s).add(1.0); } } } } for (int s = 0; s < inst.numAttributes() -1; s++) { int instAttIndex = modelAttIndexToInstanceAttIndex(s, inst); if(!inst.isMissing(instAttIndex)){ if(instance.attribute(instAttIndex).isNumeric()){ rl.attributeStatistics.addToValue(s, inst.value(s)); rl.squaredAttributeStatistics.addToValue(s, inst.value(s) * inst.value(s)); double sumValue = rl.attributeStatisticsSupervised.get(s).get((int)inst.classValue()) + inst.value(s); rl.attributeStatisticsSupervised.get(s).set((int)inst.classValue(), sumValue); double squaredSumvalue = rl.squaredAttributeStatisticsSupervised.get(s).get((int)inst.classValue()) + (inst.value(s) * inst.value(s)); rl.squaredAttributeStatisticsSupervised.get(s).set((int)inst.classValue(), squaredSumvalue); } }else{ rl.attributeMissingValues.addToValue(s, 1); } } } //Compute anomalies unsupervised public double computeAnomalyUnsupervised(RuleClassification rl, int ruleIndex, Instance inst) { //Unsupervised ArrayList<Integer> caseAnomalyTemp = new ArrayList<Integer>(); ArrayList<ArrayList<Double>> AttribAnomalyStatisticTemp2 = new ArrayList<ArrayList<Double>>(); double D = 0.0; double N = 0.0; if (rl.instancesSeen > this.anomalyNumInstThresholdOption.getValue() && this.anomalyDetectionOption.isSet()) { for (int x = 0; x < inst.numAttributes() - 1; x++) { if(!inst.isMissing(x)){ ArrayList<Double> AttribAnomalyStatisticTemp = new ArrayList<Double>(); if (inst.attribute(x).isNumeric()) { //Numeric Attributes if((rl.instancesSeen - rl.attributeMissingValues.getValue(x)) > 30){ double mean = computeMean(rl.attributeStatistics.getValue(x), rl.instancesSeen); double sd = computeSD(rl.squaredAttributeStatistics.getValue(x),rl.attributeStatistics.getValue(x), rl.instancesSeen); double probability = computeProbability(mean, sd, inst.value(x)); if(probability!=0.0) { D = D + Math.log(probability); if(probability < this.probabilityThresholdOption.getValue()){ //0.10 N = N + Math.log(probability); AttribAnomalyStatisticTemp.add((double)x); AttribAnomalyStatisticTemp.add(inst.value(x)); AttribAnomalyStatisticTemp.add(mean); AttribAnomalyStatisticTemp.add(sd); AttribAnomalyStatisticTemp.add(probability); AttribAnomalyStatisticTemp2.add(AttribAnomalyStatisticTemp); } } } }else { //Nominal Attributes AutoExpandVector<DoubleVector> attribNominal = ((NominalAttributeClassObserver)rl.observers.get(x)).attValDistPerClass; //Attributes values distribution per class double numbAttribValue = 0.0; double attribVal = inst.value(x); //Attribute value for(int i = 0; i < attribNominal.size(); i++){ if(attribNominal.get(i) != null){ numbAttribValue = numbAttribValue + attribNominal.get(i).getValue((int)attribVal); } } double probability = numbAttribValue / rl.instancesSeen; if(probability!= 0.0) { D = D + Math.log(probability); if(probability < this.probabilityThresholdOption.getValue()){ //0.10 N = N + Math.log(probability); AttribAnomalyStatisticTemp.add((double)x); AttribAnomalyStatisticTemp.add(inst.value(x)); AttribAnomalyStatisticTemp.add(probability); AttribAnomalyStatisticTemp2.add(AttribAnomalyStatisticTemp); } } } } } } double anomaly=0.0; if(D !=0){ anomaly = Math.abs(N/D); } if(anomaly >= this.anomalyProbabilityThresholdOption.getValue()){ caseAnomalyTemp.add(this.numInstance); double val = anomaly * 100; caseAnomalyTemp.add((int)val); this.caseAnomaly.add(caseAnomalyTemp); RuleClassification x = new RuleClassification(this.ruleSet.get(ruleIndex)); this.ruleSetAnomalies.add(x); this.ruleAnomaliesIndex.add(ruleIndex + 1); this.ruleAttribAnomalyStatistics.add(AttribAnomalyStatisticTemp2); } return anomaly; } //Compute anomalies unsupervised public double computeAnomalySupervised(RuleClassification rl, int ruleIndex, Instance inst) { //Not supervised ArrayList<Integer> caseAnomalyTemp = new ArrayList<Integer>(); ArrayList<ArrayList<Double>> AttribAnomalyStatisticTemp2 = new ArrayList<ArrayList<Double>>(); double D = 0.0; double N = 0.0; if (rl.instancesSeen > this.anomalyNumInstThresholdOption.getValue() && this.anomalyDetectionOption.isSet()) { for (int x = 0; x < inst.numAttributes() - 1; x++) { if(!inst.isMissing(x)){ ArrayList<Double> AttribAnomalyStatisticTemp = new ArrayList<Double>(); if (inst.attribute(x).isNumeric()) { //Numeric Attributes if((rl.instancesSeen - rl.attributeMissingValues.getValue(x)) > 30){ double mean = computeMean((double)rl.attributeStatisticsSupervised.get(x).get((int)inst.classValue()), (int)rl.obserClassDistrib.getValue((int)inst.classValue())); double sd = computeSD((double)rl.squaredAttributeStatisticsSupervised.get(x).get((int)inst.classValue()), (double)rl.attributeStatisticsSupervised.get(x).get((int)inst.classValue()), (int)rl.obserClassDistrib.getValue((int)inst.classValue())); double probability = computeProbability(mean, sd, inst.value(x)); if(probability!=0.0) { D = D + Math.log(probability); if(probability < this.probabilityThresholdOption.getValue()){ //0.10 N = N + Math.log(probability); AttribAnomalyStatisticTemp.add((double)x); AttribAnomalyStatisticTemp.add(inst.value(x)); AttribAnomalyStatisticTemp.add(mean); AttribAnomalyStatisticTemp.add(sd); AttribAnomalyStatisticTemp.add(probability); AttribAnomalyStatisticTemp2.add(AttribAnomalyStatisticTemp); } } } }else { //Nominal double attribVal = inst.value(x); //Attribute value double classVal = inst.classValue(); //Attribute value double probability = rl.observers.get(x).probabilityOfAttributeValueGivenClass(attribVal, (int)classVal); if(probability!=0.0) { D = D + Math.log(probability); if(probability < this.probabilityThresholdOption.getValue()){ //0.10 N = N + Math.log(probability); AttribAnomalyStatisticTemp.add((double)x); AttribAnomalyStatisticTemp.add(inst.value(x)); AttribAnomalyStatisticTemp.add(probability); AttribAnomalyStatisticTemp2.add(AttribAnomalyStatisticTemp); } } } } } } double anomaly=0.0; if(D !=0){ anomaly = Math.abs(N/D); } if(anomaly >= this.anomalyProbabilityThresholdOption.getValue()){ caseAnomalyTemp.add(this.numInstance); double val = anomaly * 100; caseAnomalyTemp.add((int)val); this.caseAnomalySupervised.add(caseAnomalyTemp); RuleClassification y = new RuleClassification(this.ruleSet.get(ruleIndex)); this.ruleSetAnomaliesSupervised.add(y); this.ruleAnomaliesIndexSupervised.add(ruleIndex + 1); this.ruleAttribAnomalyStatisticsSupervised.add(AttribAnomalyStatisticTemp2); } return anomaly; } //Mean public double computeMean(double sum, int size) { return sum / size; } //Standard Deviation public double computeSD(double squaredVal, double val, int size) { return Math.sqrt((squaredVal - ((val * val) / size)) / size); } //Attribute probability public double computeProbability(double mean, double sd, double value) { sd = sd + 0.00001; double probability = 0.0; double diff = value - mean; if (sd > 0.0) { double k = (Math.abs(value - mean)/sd); if (k > 1.0) { probability = 1.0/(k*k); } else { probability= Math.exp(-(diff * diff / (2.0 * sd * sd))); } } return probability; } //This function creates a rule public void createRule(Instance inst) { int remainder = (int)Double.MAX_VALUE; int numInstanciaObservers = (int)this.observedClassDistribution.sumOfValues(); if (numInstanciaObservers != 0 && this.gracePeriodOption.getValue() != 0) { remainder = (numInstanciaObservers) % (this.gracePeriodOption.getValue()); } if (remainder == 0) { this.saveBestValGlobalEntropy = new ArrayList<ArrayList<Double>>(); this.saveBestGlobalEntropy = new DoubleVector(); this.saveTheBest = new ArrayList<Double>(); this.minEntropyTemp = Double.MAX_VALUE; this.minEntropyNominalAttrib = Double.MAX_VALUE; theBestAttributes(inst, this.attributeObservers); boolean HB = checkBestAttrib(numInstanciaObservers, this.attributeObservers, this.observedClassDistribution); if (HB == true) { // System.out.print("this.saveTheBest"+this.saveTheBest+"\n"); double attributeValue = this.saveTheBest.get(3); double symbol = this.saveTheBest.get(2); // =, <=, > : (0.0, -1.0, 1.0). double value = this.saveTheBest.get(0); // Value of the attribute this.pred = new Predicates(attributeValue, symbol, value); RuleClassification Rl = new RuleClassification(); // Create RuleClassification. Rl.predicateSet.add(pred); this.ruleSet.add(Rl); if (Rl.predicateSet.get(0).getSymbol() == -1.0 ||Rl.predicateSet.get(0).getSymbol() == 1.0) { double posClassDouble = this.saveTheBest.get(4); this.ruleClassIndex.setValue(this.ruleSet.size()-1, posClassDouble); }else{ this.ruleClassIndex.setValue(ruleSet.size()-1, 0.0); } this.observedClassDistribution = new DoubleVector(); this.attributeObservers = new AutoExpandVector<AttributeClassObserver>(); this.attributeObserversGauss = new AutoExpandVector<AttributeClassObserver>(); } } } //This function This expands the rule public void expandeRule(RuleClassification rl, Instance inst, int ruleIndex) { int remainder = (int)Double.MAX_VALUE; int numInstanciaObservers = (int)rl.obserClassDistrib.sumOfValues(); // Number of instances for this rule observers. this.updateRuleAttribStatistics(inst, rl, ruleIndex); if (numInstanciaObservers != 0 && this.gracePeriodOption.getValue() != 0) { remainder = (numInstanciaObservers) % (this.gracePeriodOption.getValue()); } if (remainder == 0){ this.saveBestValGlobalEntropy = new ArrayList<ArrayList<Double>>(); this.saveBestGlobalEntropy = new DoubleVector(); this.saveTheBest = new ArrayList<Double>(); this.minEntropyTemp = Double.MAX_VALUE; this.minEntropyNominalAttrib = Double.MAX_VALUE; theBestAttributes(inst, rl.observers); // The best value of entropy for each attribute. boolean HB = checkBestAttrib(numInstanciaObservers, rl.observers, rl.obserClassDistrib); // Check if the best attribute value is really the best. if (HB == true) { double attributeValue = this.saveTheBest.get(3); double symbol = this.saveTheBest.get(2); // =, <=, > (0.0, -1.0, 1.0). double value = this.saveTheBest.get(0); // Value of the attribute. this.pred = new Predicates(attributeValue, symbol, value); int countPred = 0; for (int i = 0; i < rl.predicateSet.size(); i++) { // Checks if the new predicate is not yet in the predicateSet. if (this.pred.getSymbol() == 0.0) { // Nominal Attribute. if (rl.predicateSet.get(i).getAttributeValue() != this.pred.getAttributeValue()) { countPred = countPred + 1; } } else { if (rl.predicateSet.get(i).getAttributeValue() != this.pred.getAttributeValue() || rl.predicateSet.get(i).getSymbol() != this.pred.getSymbol() || rl.predicateSet.get(i).getValue() != this.pred.getValue()) { countPred = countPred+1; } } } if (countPred == rl.predicateSet.size()) { int countDifPred = 0; ArrayList<Predicates> predicSetTemp = new ArrayList<Predicates>(); for (int x = 0; x < rl.predicateSet.size(); x++) { predicSetTemp.add(rl.predicateSet.get(x)); } predicSetTemp.add(this.pred); for (int f = 0; f < this.ruleSet.size(); f++) { int countDifPredTemp = 0; if (this.ruleSet.get(f).predicateSet.size() == predicSetTemp.size()) { for(int x = 0; x < this.ruleSet.get(f).predicateSet.size(); x++) { if (this.ruleSet.get(f).predicateSet.get(x).getAttributeValue() == predicSetTemp.get(x).getAttributeValue() && this.ruleSet.get(f).predicateSet.get(x).getSymbol() == predicSetTemp.get(x).getSymbol() && this.ruleSet.get(f).predicateSet.get(x).getValue() == predicSetTemp.get(x).getValue()) { countDifPredTemp = countDifPredTemp+1; } } if (countDifPredTemp == predicSetTemp.size()) { break; }else{ countDifPred = countDifPred + 1; } }else{ countDifPred = countDifPred + 1; } } if (countDifPred == this.ruleSet.size()) { if (this.pred.getSymbol() == 0.0) { initializeRuleStatistics(rl, pred, inst); } else if (this.pred.getSymbol() == 1.0) { int countIqualPred = 0; for (int f = 0; f < rl.predicateSet.size(); f++) { if (this.pred.getAttributeValue() == rl.predicateSet.get(f).getAttributeValue() && this.pred.getSymbol() == rl.predicateSet.get(f).getSymbol()) { countIqualPred = countIqualPred + 1; if (this.pred.getValue() > rl.predicateSet.get(f).getValue()) { rl.predicateSet.remove(f); initializeRuleStatistics(rl, pred, inst); } } } if (countIqualPred == 0) { initializeRuleStatistics(rl, pred, inst); } }else{ int countIqualPred = 0; for (int f = 0; f < rl.predicateSet.size(); f++) { if (this.pred.getAttributeValue() == rl.predicateSet.get(f).getAttributeValue() && this.pred.getSymbol() == rl.predicateSet.get(f).getSymbol()) { countIqualPred = countIqualPred + 1; if (this.pred.getValue() < rl.predicateSet.get(f).getValue()) { rl.predicateSet.remove(f); initializeRuleStatistics(rl, pred, inst); } } } if (countIqualPred == 0) { initializeRuleStatistics(rl, pred, inst); } } } } } } } // This function gives the best value of entropy for each attribute public void theBestAttributes(Instance instance, AutoExpandVector<AttributeClassObserver> observersParameter) { for(int z = 0; z < instance.numAttributes() - 1; z++){ if(!instance.isMissing(z)){ int instAttIndex = modelAttIndexToInstanceAttIndex(z, instance); ArrayList<Double> attribBest = new ArrayList<Double>(); if(instance.attribute(instAttIndex).isNominal()){ this.minEntropyNominalAttrib=Double.MAX_VALUE; AutoExpandVector<DoubleVector> attribNominal = ((NominalAttributeClassObserver)observersParameter.get(z)).attValDistPerClass; findBestValEntropyNominalAtt(attribNominal, instance.attribute(z).numValues()); // The best value (lowest entropy) of a nominal attribute. attribBest.add(this.saveBestEntropyNominalAttrib.getValue(0)); attribBest.add(this.saveBestEntropyNominalAttrib.getValue(1)); attribBest.add(this.saveBestEntropyNominalAttrib.getValue(2)); this.saveBestValGlobalEntropy.add(attribBest); this.saveBestGlobalEntropy.setValue(z, this.saveBestEntropyNominalAttrib.getValue(1)); } else { this.root=((BinaryTreeNumericAttributeClassObserver)observersParameter.get(z)).root; mainFindBestValEntropy(this.root); // The best value (lowest entropy) of a numeric attribute. attribBest.add(this.saveBestEntropy.getValue(0)); attribBest.add(this.saveBestEntropy.getValue(1)); attribBest.add(this.saveBestEntropy.getValue(2)); attribBest.add(this.saveBestEntropy.getValue(4)); this.saveBestValGlobalEntropy.add(attribBest); this.saveBestGlobalEntropy.setValue(z, this.saveBestEntropy.getValue(1)); } }else{ double value = Double.MAX_VALUE; this.saveBestGlobalEntropy.setValue(z, value); } } } // Compute Entropy public double entropy(DoubleVector ValorDistClassE) { double entropy = 0.0; double sum = 0.0; for (double d : ValorDistClassE.getArrayCopy()) { if (d > 0.0) { entropy -= d * Utils.log2(d); sum += d; } } return sum > 0.0 ? (entropy + sum * Utils.log2(sum)) / sum : 0.0; } // Get the best value of entropy and its cutPoint for a numeric attribute public void findBestValEntropy(Node node, DoubleVector classCountL, DoubleVector classCountR, boolean status, double minEntropy, DoubleVector parentCCLeft ){ if (this.root != null) { if (node != null) { int numClass=0; DoubleVector classCountLTemp = new DoubleVector(); DoubleVector classCountRTemp = new DoubleVector(); DoubleVector parentCCL = new DoubleVector(); DoubleVector parentCCLParameter = new DoubleVector(); for (int f = 0; f < node.classCountsLeft.numValues(); f++) { parentCCLParameter.setValue(f, node.classCountsLeft.getValue(f)); } for (int p = 0; p < parentCCLeft.numValues(); p++) { parentCCL.addToValue(p, parentCCLeft.getValue(p)); } if (classCountL.numValues() >= classCountR.numValues()) { numClass = classCountL.numValues(); } else { numClass = classCountR.numValues(); } // Counting the real class count left and the real class count right. if (node.cut_point != this.root.cut_point) { for (int i = 0; i < numClass; i++) { if (status == true) { // Left node. double parentss = parentCCL.getValue(i) - (node.classCountsLeft.getValue(i) + node.classCountsRight.getValue(i)); double left = classCountL.getValue(i) - node.classCountsRight.getValue(i) - parentss; double right = classCountR.getValue(i) + node.classCountsRight.getValue(i) + parentss; classCountLTemp.addToValue(i, left); classCountRTemp.addToValue(i, right); } if (status == false) { // Right node. double left = classCountL.getValue(i)+ node.classCountsLeft.getValue(i); double right = classCountR.getValue(i)- node.classCountsLeft.getValue(i); classCountLTemp.addToValue(i, left); classCountRTemp.addToValue(i, right); } } } else { classCountLTemp = classCountL; classCountRTemp = classCountR; } double classCountLSum = classCountLTemp.sumOfValues(); double classCountRSum = classCountRTemp.sumOfValues(); double numInst = this.root.classCountsLeft.sumOfValues() + this.root.classCountsRight.sumOfValues(); // The entropy value for all nodes except for the root. if ((classCountLSum > this.PminOption.getValue()*numInst) && (classCountRSum > this.PminOption.getValue() * numInst)) { double classCountLEntropy = entropy( classCountLTemp); // Entropy of class count left. double classCountREntropy = entropy(classCountRTemp); // Entropy of class count right if (((classCountLSum / numInst) * classCountLEntropy + (classCountRSum / numInst)* classCountREntropy) <= minEntropy) { this.minEntropyTemp = (classCountLSum / numInst) * classCountLEntropy + (classCountRSum / numInst) * classCountREntropy; this.cutPointTemp = node.cut_point; if (classCountLEntropy <= classCountREntropy) { this.symbol = -1.0; double value = 0.0; double index = 0.0; for (int h = 0; h < numClass; h++) { if (value <= classCountLTemp.getValue(h)) { value = classCountLTemp.getValue(h); index = (double)h; } } this.saveBestEntropy.setValue(0, this.cutPointTemp); this.saveBestEntropy.setValue(1, classCountLEntropy); this.saveBestEntropy.setValue(2, this.symbol); this.saveBestEntropy.setValue(4, index); } else { this.symbol = 1.0; double value = 0.0; double index = 0.0; for (int h = 0; h < numClass; h++) { if (value <= classCountRTemp.getValue(h)) { value = classCountRTemp.getValue(h); index = (double)h; } } this.saveBestEntropy.setValue(0, this.cutPointTemp); this.saveBestEntropy.setValue(1, classCountREntropy); this.saveBestEntropy.setValue(2, this.symbol); this.saveBestEntropy.setValue(4, index); } } } findBestValEntropy(node.left ,classCountLTemp , classCountRTemp, true, this.minEntropyTemp, parentCCLParameter); findBestValEntropy(node.right ,classCountLTemp , classCountRTemp, false, this.minEntropyTemp, parentCCLParameter); } } } //Best value of entropy public void mainFindBestValEntropy(Node root) { if (root != null) { DoubleVector parentClassCL = new DoubleVector(); DoubleVector classCountL = root.classCountsLeft; //class count left DoubleVector classCountR = root.classCountsRight; //class count left double numInst = root.classCountsLeft.sumOfValues() + root.classCountsRight.sumOfValues(); double classCountLSum = root.classCountsLeft.sumOfValues(); double classCountRSum = root.classCountsRight.sumOfValues(); double classCountLEntropy = entropy(classCountL); double classCountREntropy = entropy(classCountR); this.minEntropyTemp = ( classCountLSum / numInst) * classCountLEntropy + (classCountRSum / numInst)* classCountREntropy; for (int f = 0; f < root.classCountsLeft.numValues(); f++) { parentClassCL.setValue(f, root.classCountsLeft.getValue(f)); } findBestValEntropy(root ,classCountL , classCountR, true, this.minEntropyTemp, parentClassCL); } } //Find best value of entropy for nominal attributes public void findBestValEntropyNominalAtt(AutoExpandVector<DoubleVector> attrib, int attNumValues) { ArrayList<ArrayList<Double>> distClassValue = new ArrayList<ArrayList<Double>>(); // System.out.print("attrib"+attrib+"\n"); for (int z = 0; z < attrib.size(); z++) { distClassValue.add(new ArrayList<Double>()); } for (int v = 0; v < attNumValues; v++) { DoubleVector saveVal = new DoubleVector(); for (int z = 0; z < attrib.size(); z++) { if (attrib.get(z) != null) { distClassValue.get(z).add(attrib.get(z).getValue(v)); } else { distClassValue.get(z).add(0.0); } if(distClassValue.get(z).get(v).isNaN()) { distClassValue.get(z).add(0.0); } saveVal.setValue(z, distClassValue.get(z).get(v)); } double sumValue = saveVal.sumOfValues(); if (sumValue > 0.0) { double entropyVal = entropy(saveVal); if (entropyVal <= this.minEntropyNominalAttrib) { this.minEntropyNominalAttrib = entropyVal; this.saveBestEntropyNominalAttrib.setValue(0, v); this.saveBestEntropyNominalAttrib.setValue(1, entropyVal); this.saveBestEntropyNominalAttrib.setValue(2, 0.0); } } } } //Hoeffding Bound public double ComputeHoeffdingBound(double range, double confidence, double n) { return Math.sqrt(((range * range) * Math.log(1.0 / confidence)) / (2.0 * n)); } //Check if the best attribute is really the best public boolean checkBestAttrib(double n, AutoExpandVector<AttributeClassObserver> observerss, DoubleVector observedClassDistribution){ double h0 = entropy(observedClassDistribution); boolean isTheBest = false; double[] entropyValues = getBestSecondBestEntropy(this.saveBestGlobalEntropy); double bestEntropy = entropyValues[0]; double secondBestEntropy = entropyValues[1]; double range = Utils.log2(this.numClass); double hoeffdingBound = ComputeHoeffdingBound(range, this.splitConfidenceOption.getValue(), n); if ((h0 > bestEntropy) && ((secondBestEntropy - bestEntropy > hoeffdingBound) || (hoeffdingBound < this.tieThresholdOption.getValue()))) { for (int i = 0; i < this.saveBestValGlobalEntropy.size(); i++) { if (bestEntropy ==(this.saveBestValGlobalEntropy.get(i).get(1))) { this.saveTheBest.add(this.saveBestValGlobalEntropy.get(i).get(0)); this.saveTheBest.add(this.saveBestValGlobalEntropy.get(i).get(1)); this.saveTheBest.add(this.saveBestValGlobalEntropy.get(i).get(2)); this.saveTheBest.add((double)i); if (this.saveTheBest.get(2) != 0.0) { this.saveTheBest.add(this.saveBestValGlobalEntropy.get(i).get(3)); } break; } } isTheBest = true; } else { isTheBest = false; } return isTheBest; } //Get best and second best attributes protected double [] getBestSecondBestEntropy(DoubleVector entropy){ double[] entropyValues = new double[2]; double best = Double.MAX_VALUE; double secondBest = Double.MAX_VALUE; for (int i = 0; i < entropy.numValues(); i++) { if (entropy.getValue(i) < best) { secondBest = best; best = entropy.getValue(i); } else{ if (entropy.getValue(i) < secondBest) { secondBest = entropy.getValue(i); } } } entropyValues[0] = best; entropyValues[1] = secondBest; return entropyValues; } //Get rule majority class index protected double getRuleMajorityClassIndex(RuleClassification r) { double maxvalue = 0.0; int posMaxValue = 0; for (int i = 0; i < r.obserClassDistrib.numValues(); i++) { if (r.obserClassDistrib.getValue(i) > maxvalue) { maxvalue = r.obserClassDistrib.getValue(i); posMaxValue = i; } } return (double)posMaxValue; } //Get observers class distribution probability protected double[] oberversDistribProb(Instance inst, DoubleVector classDistrib) { double[] votes = new double[this.numClass]; double sum = classDistrib.sumOfValues(); for (int z = 0; z < this.numClass; z++) { votes[z] = classDistrib.getValue(z) / sum; } return votes; } // The following three functions are used for the prediction protected double[] firstHit(Instance inst) { boolean fired = false; int countFired = 0; double[] votes = new double[this.numClass]; for (int j = 0; j < this.ruleSet.size(); j++) { if (this.ruleSet.get(j).ruleEvaluate(inst) == true) { countFired = countFired + 1; for (int z = 0; z < this.numClass; z++) { votes[z] = this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues(); } return votes; } } if (countFired > 0) { fired = true; } else { fired = false; } if (fired == false) { votes = oberversDistribProb(inst, this.observedClassDistribution); } return votes; } //Get the votes using weighted Max protected double[] weightedMax(Instance inst) { int countFired = 0; boolean fired = false; double highest = 0.0; double[] votes = new double[this.numClass]; ArrayList<Double> ruleSetVotes = new ArrayList<Double>(); ArrayList<ArrayList<Double>> majorityProb = new ArrayList<ArrayList<Double>>(); for (int j = 0; j < this.ruleSet.size(); j++) { ArrayList<Double> ruleProb = new ArrayList<Double>(); if (this.ruleSet.get(j).ruleEvaluate(inst) == true) { countFired = countFired+1; for (int z = 0; z < this.numClass; z++) { ruleSetVotes.add(this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues()); ruleProb.add(this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues()); } majorityProb.add(ruleProb); } } if (countFired > 0) { fired = true; Collections.sort(ruleSetVotes); highest = ruleSetVotes.get(ruleSetVotes.size() - 1); for (int t = 0; t < majorityProb.size(); t++) { for(int m = 0; m < majorityProb.get(t).size(); m++) { if (majorityProb.get(t).get(m) == highest) { for (int h = 0; h < majorityProb.get(t).size(); h++) { votes[h] = majorityProb.get(t).get(h); } break; } } } } else { fired = false; } if (fired == false) { votes = oberversDistribProb(inst, this.observedClassDistribution); } return votes; } //Get the votes using weighted Sum protected double[] weightedSum(Instance inst) { boolean fired = false; int countFired = 0; double[] votes = new double[this.numClass]; ArrayList<Double> weightSum = new ArrayList<Double>(); ArrayList<ArrayList<Double>> majorityProb = new ArrayList<ArrayList<Double>>(); for (int j = 0; j < this.ruleSet.size(); j++) { ArrayList<Double> ruleProb = new ArrayList<Double>(); if (this.ruleSet.get(j).ruleEvaluate(inst) == true) { countFired = countFired + 1; for (int z = 0; z < this.numClass; z++) { ruleProb.add(this.ruleSet.get(j).obserClassDistrib.getValue(z) / this.ruleSet.get(j).obserClassDistrib.sumOfValues()); } majorityProb.add(ruleProb); } } if (countFired > 0) { fired = true; for (int m = 0; m < majorityProb.get(0).size(); m++) { double sum = 0.0; for (int t = 0; t < majorityProb.size(); t++){ sum = sum + majorityProb.get(t).get(m); } weightSum.add(sum); } for (int h = 0; h < weightSum.size(); h++) { votes[h] = weightSum.get(h) / majorityProb.size(); } } else { fired = false; } if (fired == false) { votes = oberversDistribProb(inst, this.observedClassDistribution); } return votes; } protected AttributeClassObserver newNominalClassObserver() { return new NominalAttributeClassObserver(); } protected AttributeClassObserver newNumericClassObserver() { return new BinaryTreeNumericAttributeClassObserver(); } protected AttributeClassObserver newNumericClassObserver2() { return new GaussianNumericAttributeClassObserver(); } public void manageMemory(int currentByteSize, int maxByteSize) { // TODO Auto-generated method stub } }
Java
/* * Classifier.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers; import moa.MOAObject; import moa.core.InstancesHeader; import moa.core.Measurement; import moa.gui.AWTRenderable; import moa.options.OptionHandler; import weka.core.Instance; /** * Classifier interface for incremental classification models. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface Classifier extends MOAObject, OptionHandler, AWTRenderable { /** * Sets the reference to the header of the data stream. * The header of the data stream is extended from WEKA <code>Instances</code>. * This header is needed to know the number of classes and attributes * * @param ih the reference to the data stream header */ public void setModelContext(InstancesHeader ih); /** * Gets the reference to the header of the data stream. * The header of the data stream is extended from WEKA <code>Instances</code>. * This header is needed to know the number of classes and attributes * * @return the reference to the data stream header */ public InstancesHeader getModelContext(); /** * Gets whether this classifier needs a random seed. * Examples of methods that needs a random seed are bagging and boosting. * * @return true if the classifier needs a random seed. */ public boolean isRandomizable(); /** * Sets the seed for random number generation. * * @param s the seed */ public void setRandomSeed(int s); /** * Gets whether training has started. * * @return true if training has started */ public boolean trainingHasStarted(); /** * Gets the sum of the weights of the instances that have been used * by this classifier during the training in <code>trainOnInstance</code> * * @return the weight of the instances that have been used training */ public double trainingWeightSeenByModel(); /** * Resets this classifier. It must be similar to * starting a new classifier from scratch. * */ public void resetLearning(); /** * Trains this classifier incrementally using the given instance. * * @param inst the instance to be used for training */ public void trainOnInstance(Instance inst); /** * Predicts the class memberships for a given instance. If * an instance is unclassified, the returned array elements * must be all zero. * * @param inst the instance to be classified * @return an array containing the estimated membership * probabilities of the test instance in each class */ public double[] getVotesForInstance(Instance inst); /** * Gets whether this classifier correctly classifies an instance. * Uses getVotesForInstance to obtain the prediction and * the instance to obtain its true class. * * * @param inst the instance to be classified * @return true if the instance is correctly classified */ public boolean correctlyClassifies(Instance inst); /** * Gets the current measurements of this classifier. * * @return an array of measurements to be used in evaluation tasks */ public Measurement[] getModelMeasurements(); /** * Gets the classifiers of this ensemble. * Returns null if this classifier is a single classifier. * * @return an array of the classifiers of the ensemble */ public Classifier[] getSubClassifiers(); /** * Produces a copy of this classifier. * * @return the copy of this classifier */ public Classifier copy(); }
Java
/* * ActiveClassifier.java * Copyright (C) 2011 University of Waikato, Hamilton, New Zealand * @author Indre Zliobaite (zliobaite at gmail dot com) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.active; import java.util.LinkedList; import java.util.List; import moa.classifiers.AbstractClassifier; import moa.classifiers.Classifier; import weka.core.Instance; import weka.core.Utils; import moa.core.DoubleVector; import moa.core.Measurement; import moa.options.ClassOption; import moa.options.FloatOption; import moa.options.MultiChoiceOption; /** * Active learning setting for evolving data streams. * * <p>Active learning focuses on learning an accurate model with as few labels * as possible. Streaming data poses additional challenges for active learning, * since the data distribution may change over time (concept drift) and * classifiers need to adapt. Conventional active learning strategies * concentrate on querying the most uncertain instances, which are typically * concentrated around the decision boundary. If changes do not occur close to * the boundary, they will be missed and classifiers will fail to adapt. This * class contains four active learning strategies for streaming data that * explicitly handle concept drift. They are based on randomization, fixed * uncertainty, dynamic allocation of labeling efforts over time and * randomization of the search space [ZBPH]. It also contains the Selective * Sampling strategy, which is adapted from [CGZ] it uses a variable labeling * threshold. * * </p> * * <p>[ZBPH] Indre Zliobaite, Albert Bifet, Bernhard Pfahringer, Geoff Holmes: * Active Learning with Evolving Streaming Data. ECML/PKDD (3) 2011: 597-612</p> * * <p>[CGZ] N. Cesa-Bianchi, C. Gentile, and L. Zaniboni. Worst-case analysis of * selective sampling for linear classification. J. Mach. Learn. Res. (7) 2006: * 1205-1230</p>. * * <p>Parameters:</p> <ul> <li>-l : Classifier to train</li> <li>-d : Strategy to * use: Random, FixedUncertainty, VarUncertainty, RandVarUncertainty, * SelSampling</li> </ul> <li>-b : Budget to use</li> <li>-u : Fixed * threshold</li> <li>-s : Floating budget step</li> <li>-n : Number of * instances at beginning without active learning</li> * * @author Indre Zliobaite (zliobaite at gmail dot com) * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class ActiveClassifier extends AbstractClassifier { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Active learning classifier for evolving data streams"; } public ClassOption baseLearnerOption = new ClassOption("baseLearner", 'l', "Classifier to train.", Classifier.class, "drift.SingleClassifierDrift"); public MultiChoiceOption activeLearningStrategyOption = new MultiChoiceOption( "activeLearningStrategy", 'd', "Active Learning Strategy to use.", new String[]{ "Random", "FixedUncertainty", "VarUncertainty", "RandVarUncertainty", "SelSampling"}, new String[]{ "Random strategy", "Fixed uncertainty strategy", "Uncertainty strategy with variable threshold", "Uncertainty strategy with randomized variable threshold", "Selective Sampling"}, 0); public FloatOption budgetOption = new FloatOption("budget", 'b', "Budget to use.", 0.1, 0.0, 1.0); public FloatOption fixedThresholdOption = new FloatOption("fixedThreshold", 'u', "Fixed threshold.", 0.9, 0.00, 1.00); public FloatOption stepOption = new FloatOption("step", 's', "Floating budget step.", 0.01, 0.00, 1.00); public FloatOption numInstancesInitOption = new FloatOption("numInstancesInit", 'n', "Number of instances at beginning without active learning.", 0.0, 0.00, Integer.MAX_VALUE); public Classifier classifier; public int costLabeling; public int costLabelingRandom; public int iterationControl; public double newThreshold; public double maxPosterior; public double accuracyBaseLearner; private double outPosterior; private double getMaxPosterior(double[] incomingPrediction) { if (incomingPrediction.length > 1) { DoubleVector vote = new DoubleVector(incomingPrediction); if (vote.sumOfValues() > 0.0) { vote.normalize(); } incomingPrediction = vote.getArrayRef(); outPosterior = (incomingPrediction[Utils.maxIndex(incomingPrediction)]); } else { outPosterior = 0; } return outPosterior; } private void labelRandom(Instance inst) { if (this.classifierRandom.nextDouble() < this.budgetOption.getValue()) { this.classifier.trainOnInstance(inst); this.costLabeling++; this.costLabelingRandom++; } } private void labelFixed(double incomingPosterior, Instance inst) { if (incomingPosterior < this.fixedThresholdOption.getValue()) { this.classifier.trainOnInstance(inst); this.costLabeling++; } } private void labelVar(double incomingPosterior, Instance inst) { if (incomingPosterior < this.newThreshold) { this.classifier.trainOnInstance(inst); this.costLabeling++; this.newThreshold *= (1 - this.stepOption.getValue()); } else { this.newThreshold *= (1 + this.stepOption.getValue()); } } private void labelSelSampling(double incomingPosterior, Instance inst) { double p = Math.abs(incomingPosterior - 1.0 / (inst.numClasses())); double budget = this.budgetOption.getValue() / (this.budgetOption.getValue() + p); if (this.classifierRandom.nextDouble() < budget) { this.classifier.trainOnInstance(inst); this.costLabeling++; } } @Override public void resetLearningImpl() { this.classifier = ((Classifier) getPreparedClassOption(this.baseLearnerOption)).copy(); this.classifier.resetLearning(); this.costLabeling = 0; this.costLabelingRandom = 0; this.iterationControl = 0; this.newThreshold = 1.0; this.accuracyBaseLearner = 0; } @Override public void trainOnInstanceImpl(Instance inst) { this.iterationControl++; double costNow; if (this.iterationControl <= this.numInstancesInitOption.getValue()) { costNow = 0; //Use all instances at the beginning this.classifier.trainOnInstance(inst); this.costLabeling++; return; } else { costNow = (this.costLabeling - this.numInstancesInitOption.getValue()) / ((double) this.iterationControl - this.numInstancesInitOption.getValue()); } if (costNow < this.budgetOption.getValue()) { //allow to label switch (this.activeLearningStrategyOption.getChosenIndex()) { case 0: //Random labelRandom(inst); break; case 1: //fixed maxPosterior = getMaxPosterior(this.classifier.getVotesForInstance(inst)); labelFixed(maxPosterior, inst); break; case 2: //variable maxPosterior = getMaxPosterior(this.classifier.getVotesForInstance(inst)); labelVar(maxPosterior, inst); break; case 3: //randomized maxPosterior = getMaxPosterior(this.classifier.getVotesForInstance(inst)); maxPosterior = maxPosterior / (this.classifierRandom.nextGaussian() + 1.0); labelVar(maxPosterior, inst); break; case 4: //selective-sampling maxPosterior = getMaxPosterior(this.classifier.getVotesForInstance(inst)); labelSelSampling(maxPosterior, inst); break; } } } @Override public double[] getVotesForInstance(Instance inst) { return this.classifier.getVotesForInstance(inst); } @Override public boolean isRandomizable() { return true; } @Override public void getModelDescription(StringBuilder out, int indent) { ((AbstractClassifier) this.classifier).getModelDescription(out, indent); } @Override protected Measurement[] getModelMeasurementsImpl() { List<Measurement> measurementList = new LinkedList<Measurement>(); measurementList.add(new Measurement("labeling cost", this.costLabeling)); measurementList.add(new Measurement("newThreshold", this.newThreshold)); measurementList.add(new Measurement("maxPosterior", this.maxPosterior)); measurementList.add(new Measurement("accuracyBaseLearner (percent)", 100 * this.accuracyBaseLearner / this.costLabeling)); Measurement[] modelMeasurements = ((AbstractClassifier) this.classifier).getModelMeasurements(); if (modelMeasurements != null) { for (Measurement measurement : modelMeasurements) { measurementList.add(measurement); } } return measurementList.toArray(new Measurement[measurementList.size()]); } }
Java
/* * DriftDetectionMethodClassifier.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * @author Manuel Baena (mbaena@lcc.uma.es) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package moa.classifiers.drift; import java.util.LinkedList; import java.util.List; import moa.classifiers.AbstractClassifier; import moa.classifiers.Classifier; import moa.classifiers.meta.WEKAClassifier; import moa.core.Measurement; import moa.classifiers.core.driftdetection.ChangeDetector; import moa.options.ClassOption; import weka.core.Instance; import weka.core.Utils; /** * Class for handling concept drift datasets with a wrapper on a * classifier.<p> * * Valid options are:<p> * * -l classname <br> * Specify the full class name of a classifier as the basis for * the concept drift classifier.<p> * -d Drift detection method to use<br> * * @author Manuel Baena (mbaena@lcc.uma.es) * @version 1.1 */ public class DriftDetectionMethodClassifier extends AbstractClassifier { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Classifier that replaces the current classifier with a new one when a change is detected in accuracy."; } public ClassOption baseLearnerOption = new ClassOption("baseLearner", 'l', "Classifier to train.", Classifier.class, "bayes.NaiveBayes"); public ClassOption driftDetectionMethodOption = new ClassOption("driftDetectionMethod", 'd', "Drift detection method to use.", ChangeDetector.class, "DDM"); protected Classifier classifier; protected Classifier newclassifier; protected ChangeDetector driftDetectionMethod; protected boolean newClassifierReset; //protected int numberInstances = 0; protected int ddmLevel; /* public boolean isWarningDetected() { return (this.ddmLevel == DriftDetectionMethod.DDM_WARNING_LEVEL); } public boolean isChangeDetected() { return (this.ddmLevel == DriftDetectionMethod.DDM_OUTCONTROL_LEVEL); }*/ public static final int DDM_INCONTROL_LEVEL = 0; public static final int DDM_WARNING_LEVEL = 1; public static final int DDM_OUTCONTROL_LEVEL = 2; @Override public void resetLearningImpl() { this.classifier = ((Classifier) getPreparedClassOption(this.baseLearnerOption)).copy(); this.newclassifier = this.classifier.copy(); this.classifier.resetLearning(); this.newclassifier.resetLearning(); this.driftDetectionMethod = ((ChangeDetector) getPreparedClassOption(this.driftDetectionMethodOption)).copy(); this.newClassifierReset = false; } protected int changeDetected = 0; protected int warningDetected = 0; @Override public void trainOnInstanceImpl(Instance inst) { //this.numberInstances++; int trueClass = (int) inst.classValue(); boolean prediction; if (Utils.maxIndex(this.classifier.getVotesForInstance(inst)) == trueClass) { prediction = true; } else { prediction = false; } //this.ddmLevel = this.driftDetectionMethod.computeNextVal(prediction); this.driftDetectionMethod.input(prediction ? 0.0 : 1.0); this.ddmLevel = DDM_INCONTROL_LEVEL; if (this.driftDetectionMethod.getChange()) { this.ddmLevel = DDM_OUTCONTROL_LEVEL; } if (this.driftDetectionMethod.getWarningZone()) { this.ddmLevel = DDM_WARNING_LEVEL; } switch (this.ddmLevel) { case DDM_WARNING_LEVEL: //System.out.println("1 0 W"); //System.out.println("DDM_WARNING_LEVEL"); if (newClassifierReset == true) { this.warningDetected++; this.newclassifier.resetLearning(); newClassifierReset = false; } this.newclassifier.trainOnInstance(inst); break; case DDM_OUTCONTROL_LEVEL: //System.out.println("0 1 O"); //System.out.println("DDM_OUTCONTROL_LEVEL"); this.changeDetected++; this.classifier = null; this.classifier = this.newclassifier; if (this.classifier instanceof WEKAClassifier) { ((WEKAClassifier) this.classifier).buildClassifier(); } this.newclassifier = ((Classifier) getPreparedClassOption(this.baseLearnerOption)).copy(); this.newclassifier.resetLearning(); break; case DDM_INCONTROL_LEVEL: //System.out.println("0 0 I"); //System.out.println("DDM_INCONTROL_LEVEL"); newClassifierReset = true; break; default: //System.out.println("ERROR!"); } this.classifier.trainOnInstance(inst); } public double[] getVotesForInstance(Instance inst) { return this.classifier.getVotesForInstance(inst); } @Override public boolean isRandomizable() { return true; } @Override public void getModelDescription(StringBuilder out, int indent) { ((AbstractClassifier) this.classifier).getModelDescription(out, indent); } @Override protected Measurement[] getModelMeasurementsImpl() { List<Measurement> measurementList = new LinkedList<Measurement>(); measurementList.add(new Measurement("Change detected", this.changeDetected)); measurementList.add(new Measurement("Warning detected", this.warningDetected)); Measurement[] modelMeasurements = ((AbstractClassifier) this.classifier).getModelMeasurements(); if (modelMeasurements != null) { for (Measurement measurement : modelMeasurements) { measurementList.add(measurement); } } this.changeDetected = 0; this.warningDetected = 0; return measurementList.toArray(new Measurement[measurementList.size()]); } }
Java
/* * SingleClassifierDrift.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * @author Manuel Baena (mbaena@lcc.uma.es) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.drift; /** * Class for handling concept drift datasets with a wrapper on a * classifier.<p> * * Valid options are:<p> * * -l classname <br> * Specify the full class name of a classifier as the basis for * the concept drift classifier.<p> * -d Drift detection method to use<br> * * @author Manuel Baena (mbaena@lcc.uma.es) * @version 1.1 */ public class SingleClassifierDrift extends DriftDetectionMethodClassifier{ }
Java
/* * NaiveBayesMultinomial.java * Copyright (C) 2011 University of Waikato, Hamilton, New Zealand * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.bayes; import moa.core.Measurement; import moa.core.StringUtils; import moa.options.FloatOption; import weka.core.*; import java.util.*; import moa.classifiers.AbstractClassifier; import moa.core.DoubleVector; /** * <!-- globalinfo-start --> Class for building and using a multinomial Naive * Bayes classifier. Performs text classic bayesian prediction while making * naive assumption that all inputs are independent. For more information * see,<br/> <br/> Andrew Mccallum, Kamal Nigam: A Comparison of Event Models * for Naive Bayes Text Classification. In: AAAI-98 Workshop on 'Learning for * Text Categorization', 1998.<br/> <br/> The core equation for this * classifier:<br/> <br/> P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)<br/> * <br/> where Ci is class i and D is a document.<br/> <br/> Incremental version * of the algorithm. * <p/> * <!-- globalinfo-end --> * <!-- technical-bibtex-start --> BibTeX: * <pre> * &#64;inproceedings{Mccallum1998, * author = {Andrew Mccallum and Kamal Nigam}, * booktitle = {AAAI-98 Workshop on 'Learning for Text Categorization'}, * title = {A Comparison of Event Models for Naive Bayes Text Classification}, * year = {1998} * } * </pre> * <p/> * <!-- technical-bibtex-end --> */ public class NaiveBayesMultinomial extends AbstractClassifier { public FloatOption laplaceCorrectionOption = new FloatOption("laplaceCorrection", 'l', "Laplace correction factor.", 1.0, 0.00, Integer.MAX_VALUE); /** * for serialization */ private static final long serialVersionUID = -7204398796974263187L; @Override public String getPurposeString() { return "AMultinomial Naive Bayes classifier: performs classic bayesian prediction while making naive assumption that all inputs are independent."; } /** * sum of weight_of_instance * word_count_of_instance for each class */ protected double[] m_classTotals; /** * copy of header information for use in toString method */ protected Instances m_headerInfo; /** * number of class values */ protected int m_numClasses; /** * the probability of a class (i.e. Pr[H]) */ protected double[] m_probOfClass; /** * probability that a word (w) exists in a class (H) (i.e. Pr[w|H]) The * matrix is in the this format: m_wordTotalForClass[wordAttribute][class] */ protected DoubleVector[] m_wordTotalForClass; protected boolean reset = false; @Override public void resetLearningImpl() { this.reset = true; } /** * Trains the classifier with the given instance. * * @param instance the new training instance to include in the model */ @Override public void trainOnInstanceImpl(Instance inst) { if (this.reset == true) { this.m_numClasses = inst.numClasses(); double laplace = this.laplaceCorrectionOption.getValue(); int numAttributes = inst.numAttributes(); m_probOfClass = new double[m_numClasses]; Arrays.fill(m_probOfClass, laplace); m_classTotals = new double[m_numClasses]; Arrays.fill(m_classTotals, laplace * numAttributes); m_wordTotalForClass = new DoubleVector[m_numClasses]; for (int i = 0; i< m_numClasses; i++) { //Arrays.fill(wordTotal, laplace); m_wordTotalForClass[i] = new DoubleVector(); } this.reset = false; } // Update classifier int classIndex = inst.classIndex(); int classValue = (int) inst.value(classIndex); double w = inst.weight(); m_probOfClass[classValue] += w; m_classTotals[classValue] += w * totalSize(inst); double total = m_classTotals[classValue]; for (int i = 0; i < inst.numValues(); i++) { int index = inst.index(i); if (index != classIndex && !inst.isMissing(i)) { //m_wordTotalForClass[index][classValue] += w * inst.valueSparse(i); double laplaceCorrection = 0.0; if (m_wordTotalForClass[classValue].getValue(index)== 0) { laplaceCorrection = this.laplaceCorrectionOption.getValue(); } m_wordTotalForClass[classValue].addToValue(index, w * inst.valueSparse(i) + laplaceCorrection); } } } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution */ @Override public double[] getVotesForInstance(Instance instance) { if (this.reset == true) { return new double[2]; } double[] probOfClassGivenDoc = new double[m_numClasses]; double totalSize = totalSize(instance); for (int i = 0; i < m_numClasses; i++) { probOfClassGivenDoc[i] = Math.log(m_probOfClass[i]) - totalSize * Math.log(m_classTotals[i]); } for (int i = 0; i < instance.numValues(); i++) { int index = instance.index(i); if (index == instance.classIndex() || instance.isMissing(i)) { continue; } double wordCount = instance.valueSparse(i); for (int c = 0; c < m_numClasses; c++) { double value = m_wordTotalForClass[c].getValue(index); probOfClassGivenDoc[c] += wordCount * Math.log(value == 0 ? this.laplaceCorrectionOption.getValue() : value ); } } return Utils.logs2probs(probOfClassGivenDoc); } public double totalSize(Instance instance) { int classIndex = instance.classIndex(); double total = 0.0; for (int i = 0; i < instance.numValues(); i++) { int index = instance.index(i); if (index == classIndex || instance.isMissing(i)) { continue; } double count = instance.valueSparse(i); if (count >= 0) { total += count; } else { //throw new Exception("Numeric attribute value is not >= 0. " + i + " " + index + " " + // instance.valueSparse(i) + " " + " " + instance); } } return total; } @Override protected Measurement[] getModelMeasurementsImpl() { return null; } @Override public void getModelDescription(StringBuilder result, int indent) { StringUtils.appendIndented(result, indent, "xxx MNB1 xxx\n\n"); result.append("The independent probability of a class\n"); result.append("--------------------------------------\n"); for (int c = 0; c < m_numClasses; c++) { result.append(m_headerInfo.classAttribute().value(c)).append("\t"). append(Double.toString(m_probOfClass[c])).append("\n"); } result.append("\nThe probability of a word given the class\n"); result.append("-----------------------------------------\n\t"); for (int c = 0; c < m_numClasses; c++) { result.append(m_headerInfo.classAttribute().value(c)).append("\t"); } result.append("\n"); for (int w = 0; w < m_headerInfo.numAttributes(); w++) { if (w == m_headerInfo.classIndex()) { continue; } result.append(m_headerInfo.attribute(w).name()).append("\t"); for (int c = 0; c < m_numClasses; c++) { double value = m_wordTotalForClass[c].getValue(w); if (value == 0){ value = this.laplaceCorrectionOption.getValue(); } result.append(value / m_classTotals[c]).append("\t"); } result.append("\n"); } StringUtils.appendNewline(result); } @Override public boolean isRandomizable() { return false; } }
Java
/* * NaiveBayes.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.bayes; import moa.classifiers.AbstractClassifier; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.core.attributeclassobservers.GaussianNumericAttributeClassObserver; import moa.classifiers.core.attributeclassobservers.NominalAttributeClassObserver; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.Measurement; import moa.core.StringUtils; import weka.core.Instance; /** * Naive Bayes incremental learner. * * <p>Performs classic bayesian prediction while making naive assumption that * all inputs are independent.<br /> Naive Bayes is a classifier algorithm known * for its simplicity and low computational cost. Given n different classes, the * trained Naive Bayes classifier predicts for every unlabelled instance I the * class C to which it belongs with high accuracy.</p> * * <p>Parameters:</p> <ul> <li>-r : Seed for random behaviour of the * classifier</li> </ul> * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class NaiveBayes extends AbstractClassifier { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Naive Bayes classifier: performs classic bayesian prediction while making naive assumption that all inputs are independent."; } protected DoubleVector observedClassDistribution; protected AutoExpandVector<AttributeClassObserver> attributeObservers; @Override public void resetLearningImpl() { this.observedClassDistribution = new DoubleVector(); this.attributeObservers = new AutoExpandVector<AttributeClassObserver>(); } @Override public void trainOnInstanceImpl(Instance inst) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? newNominalClassObserver() : newNumericClassObserver(); this.attributeObservers.set(i, obs); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } } @Override public double[] getVotesForInstance(Instance inst) { return doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } @Override protected Measurement[] getModelMeasurementsImpl() { return null; } @Override public void getModelDescription(StringBuilder out, int indent) { for (int i = 0; i < this.observedClassDistribution.numValues(); i++) { StringUtils.appendIndented(out, indent, "Observations for "); out.append(getClassNameString()); out.append(" = "); out.append(getClassLabelString(i)); out.append(":"); StringUtils.appendNewlineIndented(out, indent + 1, "Total observed weight = "); out.append(this.observedClassDistribution.getValue(i)); out.append(" / prob = "); out.append(this.observedClassDistribution.getValue(i) / this.observedClassDistribution.sumOfValues()); for (int j = 0; j < this.attributeObservers.size(); j++) { StringUtils.appendNewlineIndented(out, indent + 1, "Observations for "); out.append(getAttributeNameString(j)); out.append(": "); // TODO: implement observer output out.append(this.attributeObservers.get(j)); } StringUtils.appendNewline(out); } } @Override public boolean isRandomizable() { return false; } protected AttributeClassObserver newNominalClassObserver() { return new NominalAttributeClassObserver(); } protected AttributeClassObserver newNumericClassObserver() { return new GaussianNumericAttributeClassObserver(); } public static double[] doNaiveBayesPrediction(Instance inst, DoubleVector observedClassDistribution, AutoExpandVector<AttributeClassObserver> attributeObservers) { double[] votes = new double[observedClassDistribution.numValues()]; double observedClassSum = observedClassDistribution.sumOfValues(); for (int classIndex = 0; classIndex < votes.length; classIndex++) { votes[classIndex] = observedClassDistribution.getValue(classIndex) / observedClassSum; for (int attIndex = 0; attIndex < inst.numAttributes() - 1; attIndex++) { int instAttIndex = modelAttIndexToInstanceAttIndex(attIndex, inst); AttributeClassObserver obs = attributeObservers.get(attIndex); if ((obs != null) && !inst.isMissing(instAttIndex)) { votes[classIndex] *= obs.probabilityOfAttributeValueGivenClass(inst.value(instAttIndex), classIndex); } } } // TODO: need logic to prevent underflow? return votes; } // Naive Bayes Prediction using log10 for VFDR rules public static double[] doNaiveBayesPredictionLog(Instance inst, DoubleVector observedClassDistribution, AutoExpandVector<AttributeClassObserver> observers, AutoExpandVector<AttributeClassObserver> observers2) { AttributeClassObserver obs; double[] votes = new double[observedClassDistribution.numValues()]; double observedClassSum = observedClassDistribution.sumOfValues(); for (int classIndex = 0; classIndex < votes.length; classIndex++) { votes[classIndex] = Math.log10(observedClassDistribution.getValue(classIndex) / observedClassSum); for (int attIndex = 0; attIndex < inst.numAttributes() - 1; attIndex++) { int instAttIndex = modelAttIndexToInstanceAttIndex(attIndex, inst); if (inst.attribute(instAttIndex).isNominal()) { obs = observers.get(attIndex); } else { obs = observers2.get(attIndex); } if ((obs != null) && !inst.isMissing(instAttIndex)) { votes[classIndex] += Math.log10(obs.probabilityOfAttributeValueGivenClass(inst.value(instAttIndex), classIndex)); } } } return votes; } public void manageMemory(int currentByteSize, int maxByteSize) { // TODO Auto-generated method stub } }
Java
/* * RandomHoeffdingTree.java * Copyright (C) 2010 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import moa.classifiers.bayes.NaiveBayes; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import weka.core.Instance; import weka.core.Utils; /** * Random decision trees for data streams. * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class RandomHoeffdingTree extends HoeffdingTree { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Random decision trees for data streams."; } public static class RandomLearningNode extends ActiveLearningNode { private static final long serialVersionUID = 1L; protected int[] listAttributes; protected int numAttributes; public RandomLearningNode(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); if (this.listAttributes == null) { this.numAttributes = (int) Math.floor(Math.sqrt(inst.numAttributes())); this.listAttributes = new int[this.numAttributes]; for (int j = 0; j < this.numAttributes; j++) { boolean isUnique = false; while (isUnique == false) { this.listAttributes[j] = ht.classifierRandom.nextInt(inst.numAttributes() - 1); isUnique = true; for (int i = 0; i < j; i++) { if (this.listAttributes[j] == this.listAttributes[i]) { isUnique = false; break; } } } } } for (int j = 0; j < this.numAttributes - 1; j++) { int i = this.listAttributes[j]; int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? ht.newNominalClassObserver() : ht.newNumericClassObserver(); this.attributeObservers.set(i, obs); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } } } public static class LearningNodeNB extends RandomLearningNode { private static final long serialVersionUID = 1L; public LearningNodeNB(double[] initialClassObservations) { super(initialClassObservations); } @Override public double[] getClassVotes(Instance inst, HoeffdingTree ht) { if (getWeightSeen() >= ht.nbThresholdOption.getValue()) { return NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } return super.getClassVotes(inst, ht); } @Override public void disableAttribute(int attIndex) { // should not disable poor atts - they are used in NB calc } } public static class LearningNodeNBAdaptive extends LearningNodeNB { private static final long serialVersionUID = 1L; protected double mcCorrectWeight = 0.0; protected double nbCorrectWeight = 0.0; public LearningNodeNBAdaptive(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { int trueClass = (int) inst.classValue(); if (this.observedClassDistribution.maxIndex() == trueClass) { this.mcCorrectWeight += inst.weight(); } if (Utils.maxIndex(NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers)) == trueClass) { this.nbCorrectWeight += inst.weight(); } super.learnFromInstance(inst, ht); } @Override public double[] getClassVotes(Instance inst, HoeffdingTree ht) { if (this.mcCorrectWeight > this.nbCorrectWeight) { return this.observedClassDistribution.getArrayCopy(); } return NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } } public RandomHoeffdingTree() { this.removePoorAttsOption = null; } @Override protected LearningNode newLearningNode(double[] initialClassObservations) { LearningNode ret; int predictionOption = this.leafpredictionOption.getChosenIndex(); if (predictionOption == 0) { //MC ret = new RandomLearningNode(initialClassObservations); } else if (predictionOption == 1) { //NB ret = new LearningNodeNB(initialClassObservations); } else { //NBAdaptive ret = new LearningNodeNBAdaptive(initialClassObservations); } return ret; } @Override public boolean isRandomizable() { return true; } }
Java
/* * FIMTDD.java * Copyright (C) 2014 Jožef Stefan Institute, Ljubljana, Slovenia * Copyright (C) 2013 University of Porto, Portugal * @author Aljaž Osojnik <aljaz.osojnik@ijs.si> * @author Katie de Lange, E. Almeida, J. Gama * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ /* Project Knowledge Discovery from Data Streams, FCT LIAAD-INESC TEC, * * Contact: jgama@fep.up.pt */ package moa.classifiers.trees; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.Random; import java.util.Stack; import weka.core.Instance; import moa.options.*; import moa.AbstractMOAObject; import moa.classifiers.Classifier; import moa.classifiers.Regressor; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.attributeclassobservers.FIMTDDNumericAttributeClassObserver; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.classifiers.AbstractClassifier; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.Measurement; import moa.core.SizeOf; import moa.core.StringUtils; /* * Implementation of FIMTDD, regression and model trees for data streams. */ public class FIMTDD extends AbstractClassifier implements Regressor { private static final long serialVersionUID = 1L; protected Node treeRoot; private int leafNodeCount = 0; private int splitNodeCount = 0; private double examplesSeen = 0.0; private double sumOfValues = 0.0; private double sumOfSquares = 0.0; private DoubleVector sumOfAttrValues = new DoubleVector(); private DoubleVector sumOfAttrSquares = new DoubleVector(); public int maxID = 0; //region ================ OPTIONS ================ public ClassOption splitCriterionOption = new ClassOption( "splitCriterion", 's', "Split criterion to use.", SplitCriterion.class, "moa.classifiers.core.splitcriteria.VarianceReductionSplitCriterion"); public IntOption gracePeriodOption = new IntOption( "gracePeriod", 'g', "The number of instances a leaf should observe between split attempts.", 200, 0, Integer.MAX_VALUE); public FloatOption splitConfidenceOption = new FloatOption( "splitConfidence", 'c', "The allowable error in split decision, values closer to 0 will take longer to decide.", 0.0000001, 0.0, 1.0); public FloatOption tieThresholdOption = new FloatOption( "tieThreshold", 't', "Threshold below which a split will be forced to break ties.", 0.05, 0.0, 1.0); public FloatOption PageHinckleyAlphaOption = new FloatOption( "PageHinckleyAlpha", 'a', "The alpha value to use in the Page Hinckley change detection tests.", 0.005, 0.0, 1.0); public IntOption PageHinckleyThresholdOption = new IntOption( "PageHinckleyThreshold", 'h', "The threshold value to be used in the Page Hinckley change detection tests.", 50, 0, Integer.MAX_VALUE); public FloatOption alternateTreeFadingFactorOption = new FloatOption( "alternateTreeFadingFactor", 'f', "The fading factor to use when deciding if an alternate tree should replace an original.", 0.995, 0.0, 1.0); public IntOption alternateTreeTMinOption = new IntOption( "alternateTreeTMin", 'y', "The Tmin value to use when deciding if an alternate tree should replace an original.", 150, 0, Integer.MAX_VALUE); public IntOption alternateTreeTimeOption = new IntOption( "alternateTreeTime", 'u', "The 'time' (in terms of number of instances) value to use when deciding if an alternate tree should be discarded.", 1500, 0, Integer.MAX_VALUE); public FlagOption regressionTreeOption = new FlagOption( "regressionTree", 'e', "Build a regression tree instead of a model tree."); public FloatOption learningRatioOption = new FloatOption( "learningRatio", 'l', "Learning ratio to use for training the Perceptrons in the leaves.", 0.02); public FloatOption learningRateDecayFactorOption = new FloatOption( "learningRatioDecayFactor", 'd', "Learning rate decay factor (not used when learning rate is constant).", 0.001); public FlagOption learningRatioConstOption = new FlagOption( "learningRatioConst", 'o', "Keep learning rate constant instead of decaying (if kept constant learning ratio is suggested to be 0.001)."); //endregion ================ OPTIONS ================ //region ================ CLASSES ================ public abstract static class Node extends AbstractMOAObject { private static final long serialVersionUID = 1L; protected double weightSeenAtLastSplitEvaluation; public int ID; // The parent of this particular node protected SplitNode parent; protected Node alternateTree; protected Node originalNode; protected AutoExpandVector<FIMTDDNumericAttributeClassObserver> attributeObservers = new AutoExpandVector<FIMTDDNumericAttributeClassObserver>(); // The error values for the Page Hinckley test // PHmT = the cumulative sum of the errors // PHMT = the minimum error value seen so far protected boolean changeDetection = true; protected double PHsum = 0; protected double PHmin = Double.MAX_VALUE; // The statistics for this node: // Number of instances that have reached it protected double examplesSeen; // Sum of y values protected double sumOfValues; // Sum of squared y values protected double sumOfSquares; // Sum of absolute errors protected double sumOfAbsErrors; // Needed for PH tracking of mean error public Node() { } public void copyStatistics(Node node) { examplesSeen = node.examplesSeen; sumOfValues = node.sumOfValues; sumOfSquares = node.sumOfSquares; sumOfAbsErrors = node.sumOfAbsErrors; // sumOfSquaredErrors = node.sumOfSquaredErrors; } public int calcByteSize() { return (int) (SizeOf.sizeOf(this)) + (int) (SizeOf.fullSizeOf(this.attributeObservers)); } public int calcByteSizeIncludingSubtree() { return calcByteSize(); } public boolean isLeaf() { return true; } public double examplesSeen() { return examplesSeen; } /** * Set the parent node */ public void setParent(SplitNode parent) { this.parent = parent; } /** * Return the parent node */ public SplitNode getParent() { return parent; } public void disableChangeDetection() { changeDetection = false; } public void restartChangeDetection() { changeDetection = true; PHsum = 0; PHmin = Integer.MAX_VALUE; } /** * Check to see if the tree needs updating */ public boolean PageHinckleyTest(double error, double threshold) { // Update the cumulative mT sum PHsum += error; // Update the minimum mT value if the new mT is // smaller than the current minimum if(PHsum < PHmin) { PHmin = PHsum; } // Return true if the cumulative value - the current minimum is // greater than the current threshold (in which case we should adapt) return PHsum - PHmin > threshold; } public void getDescription(StringBuilder sb, int i) {} public int countLeaves() { return 1; } public void describeSubtree(FIMTDD tree, StringBuilder out, int indent) { StringUtils.appendIndented(out, indent, "Leaf"); } public double getPrediction(Instance inst, FIMTDD tree) { return 0; } } public static class LeafNode extends Node { private static final long serialVersionUID = 1L; // Perceptron model that carries out the actual learning in each node public FIMTDDPerceptron learningModel; protected double examplesSeenAtLastSplitEvaluation = 0; public double examplesSeenAtLastSplitEvaluation() { return examplesSeenAtLastSplitEvaluation; } public void setExamplesSeenAtLastSplitEvaluation(double seen) { examplesSeenAtLastSplitEvaluation = seen; } /** * Create a new LeafNode */ public LeafNode(FIMTDD tree) { ID = tree.maxID; if (tree.buildingModelTree()) { learningModel = tree.newLeafModel(); } examplesSeen = 0; sumOfValues = 0; sumOfSquares = 0; sumOfAbsErrors = 0; // sumOfSquaredErrors = 0; } /** * Method to learn from an instance that passes the new instance to the perceptron learner, * and also prevents the class value from being truncated to an int when it is passed to the * attribute observer */ public void learnFromInstance(Instance inst, FIMTDD tree, boolean growthAllowed) { // Update the statistics for this node // number of instances passing through the node examplesSeen += 1; // sum of y values sumOfValues += inst.classValue(); // sum of squared y values sumOfSquares += inst.classValue() * inst.classValue(); // sum of absolute errors sumOfAbsErrors += Math.abs(tree.normalizeTargetValue(inst.classValue()) - tree.normalizeTargetValue(getPrediction(inst, tree))); if (tree.buildingModelTree()) learningModel.updatePerceptron(inst, tree);; for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); FIMTDDNumericAttributeClassObserver obs = attributeObservers.get(i); if (obs == null) { // At this stage all nominal attributes are ignored if (inst.attribute(instAttIndex).isNumeric()) { obs = tree.newNumericClassObserver(); this.attributeObservers.set(i, obs); } } if (obs != null) { obs.observeAttributeClass(inst.value(instAttIndex),inst.classValue(), inst.weight()); } } if (growthAllowed) { checkForSplit(tree); } } /** * Return the best split suggestions for this node using the given split criteria */ public AttributeSplitSuggestion[] getBestSplitSuggestions(SplitCriterion criterion, FIMTDD tree) { List<AttributeSplitSuggestion> bestSuggestions = new LinkedList<AttributeSplitSuggestion>(); // Set the nodeStatistics up as the preSplitDistribution, rather than the observedClassDistribution double[] nodeSplitDist = new double[] {examplesSeen, sumOfValues, sumOfSquares}; for (int i = 0; i < this.attributeObservers.size(); i++) { FIMTDDNumericAttributeClassObserver obs = this.attributeObservers.get(i); if (obs != null) { // AT THIS STAGE NON-NUMERIC ATTRIBUTES ARE IGNORED AttributeSplitSuggestion bestSuggestion = null; if (obs instanceof FIMTDDNumericAttributeClassObserver) { bestSuggestion = obs.getBestEvaluatedSplitSuggestion(criterion, nodeSplitDist, i, false); } if (bestSuggestion != null) { bestSuggestions.add(bestSuggestion); } } } return bestSuggestions.toArray(new AttributeSplitSuggestion[bestSuggestions.size()]); } /** * Retrieve the class votes using the perceptron learner */ public double getPredictionModel(Instance inst, FIMTDD tree) { return learningModel.prediction(inst, tree); } public double getPredictionTargetMean(Instance inst, FIMTDD tree) { return (examplesSeen > 0.0) ? sumOfValues / examplesSeen : 0.0; } public double getPrediction(Instance inst, FIMTDD tree) { return (tree.buildingModelTree()) ? getPredictionModel(inst, tree) : getPredictionTargetMean(inst, tree); } public double[] getClassVotes(Instance inst, FIMTDD tree) { return new double[] {getPrediction(inst, tree)}; } public void checkForSplit(FIMTDD tree) { // If it has seen Nmin examples since it was last tested for splitting, attempt a split of this node if (examplesSeen - examplesSeenAtLastSplitEvaluation >= tree.gracePeriodOption.getValue()) { int index = (parent != null) ? parent.getChildIndex(this) : 0; tree.attemptToSplit(this, parent, index); // Take note of how many instances were seen when this split evaluation was made, so we know when to perform the next split evaluation examplesSeenAtLastSplitEvaluation = examplesSeen; } } public void describeSubtree(FIMTDD tree, StringBuilder out, int indent) { StringUtils.appendIndented(out, indent, "Leaf "); if (tree.buildingModelTree()) { learningModel.getModelDescription(out, 0); } else { out.append(tree.getClassNameString() + " = " + String.format("%.4f", (sumOfValues / examplesSeen))); StringUtils.appendNewline(out); } } } public static class SplitNode extends Node { private static final long serialVersionUID = 1L; protected InstanceConditionalTest splitTest; protected AutoExpandVector<Node> children = new AutoExpandVector<Node>(); // Keep track of the statistics for loss error calculations protected double lossExamplesSeen; protected double lossFadedSumOriginal; protected double lossFadedSumAlternate; protected double lossNumQiTests; protected double lossSumQi; protected double previousWeight = 0; /** * Create a new SplitNode * @param tree */ public SplitNode(InstanceConditionalTest splitTest, FIMTDD tree) { this.splitTest = splitTest; ID = tree.maxID; } public void disableChangeDetection() { changeDetection = false; for (Node child : children) { child.disableChangeDetection(); } } public void restartChangeDetection() { if (this.alternateTree == null) { changeDetection = true; PHsum = 0; PHmin = Integer.MAX_VALUE; for (Node child : children) child.restartChangeDetection(); } } protected void setChild(int i, Node child) { children.set(i, child); } public int instanceChildIndex(Instance inst) { return this.splitTest.branchForInstance(inst); } public Node getChild(int i) { return children.get(i); } public int getChildIndex(Node child) { return children.indexOf(child); } public void initializeAlternateTree(FIMTDD tree) { // Start a new alternate tree, beginning with a learning node alternateTree = tree.newLeafNode(); alternateTree.originalNode = this; // Set up the blank statistics // Number of instances reaching this node since the alternate tree was started lossExamplesSeen = 0; // Faded squared error (original tree) lossFadedSumOriginal = 0; // Faded squared error (alternate tree) lossFadedSumAlternate = 0; // Number of evaluations of alternate tree lossNumQiTests = 0; // Sum of Qi values lossSumQi = 0; // Number of examples at last test previousWeight = 0; // Disable the change detection mechanism bellow this node disableChangeDetection(); } public int countLeaves() { Stack<Node> stack = new Stack<Node>(); stack.addAll(children); int ret = 0; while (!stack.isEmpty()) { Node node = stack.pop(); if (node instanceof LeafNode) { ret++; } else if (node instanceof SplitNode) { stack.addAll(((SplitNode) node).children); } } return ret; } @Override public void describeSubtree(FIMTDD tree, StringBuilder out, int indent) { for (int branch = 0; branch < children.size(); branch++) { Node child = getChild(branch); if (child != null) { StringUtils.appendIndented(out, indent, "if "); out.append(this.splitTest.describeConditionForBranch(branch, tree.getModelContext())); out.append(": "); StringUtils.appendNewline(out); child.describeSubtree(tree, out, indent + 2); } } } public double getPrediction(Instance inst, FIMTDD tree) { return children.get(splitTest.branchForInstance(inst)).getPrediction(inst, tree); } } public class FIMTDDPerceptron { // The Perception weights protected DoubleVector weightAttribute = new DoubleVector(); protected double sumOfValues; protected double sumOfSquares; // The number of instances contributing to this model protected int instancesSeen = 0; // If the model should be reset or not protected boolean reset; public String getPurposeString() { return "A perceptron regressor as specified by Ikonomovska et al. used for FIMTDD"; } public FIMTDDPerceptron(FIMTDDPerceptron original) { weightAttribute = (DoubleVector) original.weightAttribute.copy(); reset = false; } public FIMTDDPerceptron() { reset = true; } public DoubleVector getWeights() { return weightAttribute; } /** * Update the model using the provided instance */ public void updatePerceptron(Instance inst, FIMTDD tree) { // Initialize perceptron if necessary if (this.reset == true) { reset = false; weightAttribute = new DoubleVector(); instancesSeen = 0; for (int j = 0; j < inst.numAttributes(); j++) { // The last index corresponds to the constant b weightAttribute.setValue(j, 2 * tree.classifierRandom.nextDouble() - 1); } } // Update attribute statistics instancesSeen++; // Update weights double learningRatio = 0.0; if (tree.learningRatioConstOption.isSet()) { learningRatio = learningRatioOption.getValue(); } else { learningRatio = learningRatioOption.getValue() / (1 + instancesSeen * tree.learningRateDecayFactorOption.getValue()); } sumOfValues += inst.classValue(); sumOfSquares += inst.classValue() * inst.classValue(); updateWeights(inst, learningRatio, tree); } public void updateWeights(Instance inst, double learningRatio, FIMTDD tree) { // Compute the normalized instance and the delta DoubleVector normalizedInstance = normalizedInstance(inst, tree); double normalizedPrediction = prediction(normalizedInstance); double normalizedValue = tree.normalizeTargetValue(inst.classValue()); double delta = normalizedValue - normalizedPrediction; normalizedInstance.scaleValues(delta * learningRatio); weightAttribute.addValues(normalizedInstance); } public DoubleVector normalizedInstance(Instance inst, FIMTDD tree) { // Normalize Instance DoubleVector normalizedInstance = new DoubleVector(); for (int j = 0; j < inst.numAttributes() - 1; j++) { int instAttIndex = modelAttIndexToInstanceAttIndex(j, inst); double mean = tree.sumOfAttrValues.getValue(j) / tree.examplesSeen; double sd = computeSD(tree.sumOfAttrSquares.getValue(j), tree.sumOfAttrValues.getValue(j), tree.examplesSeen); if (inst.attribute(instAttIndex).isNumeric() && tree.examplesSeen > 1 && sd > 0) normalizedInstance.setValue(j, (inst.value(instAttIndex) - mean) / (3 * sd)); else normalizedInstance.setValue(j, 0); } if (tree.examplesSeen > 1) normalizedInstance.setValue(inst.numAttributes() - 1, 1.0); // Value to be multiplied with the constant factor else normalizedInstance.setValue(inst.numAttributes() - 1, 0.0); return normalizedInstance; } /** * Output the prediction made by this perceptron on the given instance */ public double prediction(DoubleVector instanceValues) { return scalarProduct(weightAttribute, instanceValues); } private double prediction(Instance inst, FIMTDD tree) { DoubleVector normalizedInstance = normalizedInstance(inst, tree); double normalizedPrediction = prediction(normalizedInstance); return denormalizePrediction(normalizedPrediction, tree); } private double denormalizePrediction(double normalizedPrediction, FIMTDD tree) { double mean = tree.sumOfValues / tree.examplesSeen; double sd = computeSD(tree.sumOfSquares, tree.sumOfValues, tree.examplesSeen); if (examplesSeen > 1) return normalizedPrediction * sd * 3 + mean; else return 0.0; } public void getModelDescription(StringBuilder out, int indent) { StringUtils.appendIndented(out, indent, getClassNameString() + " ="); if (getModelContext() != null) { for (int j = 0; j < getModelContext().numAttributes() - 1; j++) { if (getModelContext().attribute(j).isNumeric()) { out.append((j == 0 || weightAttribute.getValue(j) < 0) ? " " : " + "); out.append(String.format("%.4f", weightAttribute.getValue(j))); out.append(" * "); out.append(getAttributeNameString(j)); } } out.append(" + " + weightAttribute.getValue((getModelContext().numAttributes() - 1))); } StringUtils.appendNewline(out); } } //endregion ================ CLASSES ================ //region ================ METHODS ================ // Regressor methods public FIMTDD() {} public String getPurposeString() { return "Implementation of the FIMT-DD tree as described by Ikonomovska et al."; } public void resetLearningImpl() { this.treeRoot = null; this.leafNodeCount = 0; this.splitNodeCount = 0; this.maxID = 0; } public boolean isRandomizable() { return true; } public void getModelDescription(StringBuilder out, int indent) { if (treeRoot != null) treeRoot.describeSubtree(this, out, indent); } protected Measurement[] getModelMeasurementsImpl() { return new Measurement[]{ new Measurement("tree size (nodes)", this.leafNodeCount + this.splitNodeCount), new Measurement("tree size (leaves)", this.leafNodeCount) }; } public int calcByteSize() { int size = (int) SizeOf.sizeOf(this); if (this.treeRoot != null) { size += this.treeRoot.calcByteSize(); } return size; } public double[] getVotesForInstance(Instance inst) { if (treeRoot == null) { return new double[] {0}; } double prediction = treeRoot.getPrediction(inst, this); return new double[] {prediction}; } public double normalizeTargetValue(double value) { if (examplesSeen > 1) { double sd = Math.sqrt((sumOfSquares - ((sumOfValues * sumOfValues)/examplesSeen))/examplesSeen); double average = sumOfValues / examplesSeen; if (sd > 0 && examplesSeen > 1) return (value - average) / (3 * sd); else return 0.0; } return 0.0; } public double getNormalizedError(Instance inst) { double normalPrediction = normalizeTargetValue(treeRoot.getPrediction(inst, this)); double normalValue = normalizeTargetValue(inst.classValue()); return Math.abs(normalValue - normalPrediction); } /** * Method for updating (training) the model using a new instance */ public void trainOnInstanceImpl(Instance inst) { checkRoot(); examplesSeen++; sumOfValues += inst.classValue(); sumOfSquares += inst.classValue() * inst.classValue(); for (int i = 0; i < inst.numAttributes() - 1; i++) { int aIndex = modelAttIndexToInstanceAttIndex(i, inst); sumOfAttrValues.addToValue(aIndex, inst.value(aIndex)); sumOfAttrSquares.addToValue(aIndex, inst.value(aIndex) * inst.value(aIndex)); } processInstance(inst, treeRoot, treeRoot.getPrediction(inst, this), getNormalizedError(inst), true, false); } public void processInstance(Instance inst, Node node, double prediction, double normalError, boolean growthAllowed, boolean inAlternate) { Node currentNode = node; while (true) { if (currentNode instanceof LeafNode) { ((LeafNode) currentNode).learnFromInstance(inst, this, growthAllowed); break; } else { currentNode.examplesSeen++; currentNode.sumOfAbsErrors += normalError; SplitNode iNode = (SplitNode) currentNode; if (!inAlternate && iNode.alternateTree != null) { boolean altTree = true; double lossO = Math.pow(inst.classValue() - prediction, 2); double lossA = Math.pow(inst.classValue() - iNode.alternateTree.getPrediction(inst, this), 2); iNode.lossFadedSumOriginal = lossO + alternateTreeFadingFactorOption.getValue() * iNode.lossFadedSumOriginal; iNode.lossFadedSumAlternate = lossA + alternateTreeFadingFactorOption.getValue() * iNode.lossFadedSumAlternate; iNode.lossExamplesSeen++; double Qi = Math.log((iNode.lossFadedSumOriginal) / (iNode.lossFadedSumAlternate)); double previousQiAverage = iNode.lossSumQi / iNode.lossNumQiTests; iNode.lossSumQi += Qi; iNode.lossNumQiTests += 1; double QiAverage = iNode.lossSumQi / iNode.lossNumQiTests; if (iNode.lossExamplesSeen - iNode.previousWeight >= alternateTreeTMinOption.getValue()) { iNode.previousWeight = iNode.lossExamplesSeen; if (Qi > 0) { SplitNode parent = currentNode.getParent(); if (parent != null) { Node replacementTree = iNode.alternateTree; parent.setChild(parent.getChildIndex(currentNode), replacementTree); if (growthAllowed) replacementTree.restartChangeDetection(); } else { treeRoot = iNode.alternateTree; treeRoot.restartChangeDetection(); } currentNode = iNode.alternateTree; altTree = false; } else if ( (QiAverage < previousQiAverage && iNode.lossExamplesSeen >= (10 * this.gracePeriodOption.getValue())) || iNode.lossExamplesSeen >= alternateTreeTimeOption.getValue() ) { iNode.alternateTree = null; if (growthAllowed) iNode.restartChangeDetection(); altTree = false; } } if (altTree) { growthAllowed = false; processInstance(inst, iNode.alternateTree, prediction, normalError, true, true); } } if (iNode.changeDetection && !inAlternate) { if (iNode.PageHinckleyTest(normalError - iNode.sumOfAbsErrors / iNode.examplesSeen - PageHinckleyAlphaOption.getValue(), PageHinckleyThresholdOption.getValue())) { iNode.initializeAlternateTree(this); } } if (currentNode instanceof SplitNode) { currentNode = ((SplitNode) currentNode).getChild(iNode.instanceChildIndex(inst)); } else { // if the replaced alternate tree is just a leaf node ((LeafNode) currentNode).learnFromInstance(inst, this, growthAllowed); break; } } } } //region --- Object instatiation methods protected FIMTDDNumericAttributeClassObserver newNumericClassObserver() { return new FIMTDDNumericAttributeClassObserver(); } protected SplitNode newSplitNode(InstanceConditionalTest splitTest) { //maxID++; return new SplitNode(splitTest, this); } protected LeafNode newLeafNode() { maxID++; return new LeafNode(this); } protected FIMTDDPerceptron newLeafModel() { return new FIMTDDPerceptron(); } //endregion --- Object instatiation methods //region --- Processing methods protected void checkRoot() { if (treeRoot == null) { treeRoot = newLeafNode(); leafNodeCount = 1; } } public static double computeHoeffdingBound(double range, double confidence, double n) { return Math.sqrt(( (range * range) * Math.log(1 / confidence)) / (2.0 * n)); } public boolean buildingModelTree() { return !regressionTreeOption.isSet(); } protected void attemptToSplit(LeafNode node, SplitNode parent, int parentIndex) { // Set the split criterion to use to the SDR split criterion as described by Ikonomovska et al. SplitCriterion splitCriterion = (SplitCriterion) getPreparedClassOption(this.splitCriterionOption); // Using this criterion, find the best split per attribute and rank the results AttributeSplitSuggestion[] bestSplitSuggestions = node.getBestSplitSuggestions(splitCriterion, this); Arrays.sort(bestSplitSuggestions); // Declare a variable to determine if any of the splits should be performed boolean shouldSplit = false; // If only one split was returned, use it if (bestSplitSuggestions.length < 2) { shouldSplit = bestSplitSuggestions.length > 0; } else { // Otherwise, consider which of the splits proposed may be worth trying // Determine the Hoeffding bound value, used to select how many instances should be used to make a test decision // to feel reasonably confident that the test chosen by this sample is the same as what would be chosen using infinite examples double hoeffdingBound = computeHoeffdingBound(1, this.splitConfidenceOption.getValue(), node.examplesSeen()); // Determine the top two ranked splitting suggestions AttributeSplitSuggestion bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; AttributeSplitSuggestion secondBestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 2]; // If the upper bound of the sample mean for the ratio of SDR(best suggestion) to SDR(second best suggestion), // as determined using the Hoeffding bound, is less than 1, then the true mean is also less than 1, and thus at this // particular moment of observation the bestSuggestion is indeed the best split option with confidence 1-delta, and // splitting should occur. // Alternatively, if two or more splits are very similar or identical in terms of their splits, then a threshold limit // (default 0.05) is applied to the Hoeffding bound; if the Hoeffding bound is smaller than this limit then the two // competing attributes are equally good, and the split will be made on the one with the higher SDR value. if ((secondBestSuggestion.merit / bestSuggestion.merit < 1 - hoeffdingBound) || (hoeffdingBound < this.tieThresholdOption.getValue())) { shouldSplit = true; } // If the splitting criterion was not met, initiate pruning of the E-BST structures in each attribute observer else { for (int i = 0; i < node.attributeObservers.size(); i++) { FIMTDDNumericAttributeClassObserver obs = node.attributeObservers.get(i); if (obs != null) { obs.removeBadSplits(splitCriterion, secondBestSuggestion.merit / bestSuggestion.merit, bestSuggestion.merit, hoeffdingBound); } } } } // If the splitting criterion were met, split the current node using the chosen attribute test, and // make two new branches leading to (empty) leaves if (shouldSplit) { AttributeSplitSuggestion splitDecision = bestSplitSuggestions[bestSplitSuggestions.length - 1]; SplitNode newSplit = newSplitNode(splitDecision.splitTest); newSplit.copyStatistics(node); newSplit.changeDetection = node.changeDetection; newSplit.ID = node.ID; leafNodeCount--; for (int i = 0; i < splitDecision.numSplits(); i++) { LeafNode newChild = newLeafNode(); if (buildingModelTree()) { // Copy the splitting node's perceptron to it's children newChild.learningModel = new FIMTDDPerceptron((FIMTDDPerceptron) node.learningModel); } newChild.changeDetection = node.changeDetection; newChild.setParent(newSplit); newSplit.setChild(i, newChild); leafNodeCount++; } if (parent == null && node.originalNode == null) { treeRoot = newSplit; } else if (parent == null && node.originalNode != null) { node.originalNode.alternateTree = newSplit; } else { parent.setChild(parentIndex, newSplit); newSplit.setParent(parent); } splitNodeCount++; } } public double computeSD(double squaredVal, double val, double size) { if (size > 1) return Math.sqrt((squaredVal - ((val * val) / size)) / size); else return 0.0; } public double scalarProduct(DoubleVector u, DoubleVector v) { double ret = 0.0; for (int i = 0; i < Math.max(u.numValues(), v.numValues()); i++) { ret += u.getValue(i) * v.getValue(i); } return ret; } //endregion --- Processing methods //endregion ================ METHODS ================ }
Java
/* * HoeffdingAdaptiveTree.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import java.util.LinkedList; import java.util.List; import java.util.Random; import moa.classifiers.bayes.NaiveBayes; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.classifiers.core.driftdetection.ADWIN; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.MiscUtils; import moa.options.MultiChoiceOption; import weka.core.Instance; import weka.core.Utils; /** * Hoeffding Adaptive Tree for evolving data streams. * * <p>This adaptive Hoeffding Tree uses ADWIN to monitor performance of * branches on the tree and to replace them with new branches when their * accuracy decreases if the new branches are more accurate.</p> * See details in:</p> * <p>Adaptive Learning from Evolving Data Streams. Albert Bifet, Ricard Gavaldà. * IDA 2009</p> * * <ul> * <li> Same parameters as <code>HoeffdingTreeNBAdaptive</code></li> * <li> -l : Leaf prediction to use: MajorityClass (MC), Naive Bayes (NB) or NaiveBayes * adaptive (NBAdaptive). * </ul> * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class HoeffdingAdaptiveTree extends HoeffdingTree { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Hoeffding Adaptive Tree for evolving data streams that uses ADWIN to replace branches for new ones."; } /* public MultiChoiceOption leafpredictionOption = new MultiChoiceOption( "leafprediction", 'l', "Leaf prediction to use.", new String[]{ "MC", "NB", "NBAdaptive"}, new String[]{ "Majority class", "Naive Bayes", "Naive Bayes Adaptive"}, 2);*/ public interface NewNode { // Change for adwin //public boolean getErrorChange(); public int numberLeaves(); public double getErrorEstimation(); public double getErrorWidth(); public boolean isNullError(); public void killTreeChilds(HoeffdingAdaptiveTree ht); public void learnFromInstance(Instance inst, HoeffdingAdaptiveTree ht, SplitNode parent, int parentBranch); public void filterInstanceToLeaves(Instance inst, SplitNode myparent, int parentBranch, List<FoundNode> foundNodes, boolean updateSplitterCounts); } public static class AdaSplitNode extends SplitNode implements NewNode { private static final long serialVersionUID = 1L; protected Node alternateTree; protected ADWIN estimationErrorWeight; //public boolean isAlternateTree = false; public boolean ErrorChange = false; protected int randomSeed = 1; protected Random classifierRandom; //public boolean getErrorChange() { // return ErrorChange; //} @Override public int calcByteSizeIncludingSubtree() { int byteSize = calcByteSize(); if (alternateTree != null) { byteSize += alternateTree.calcByteSizeIncludingSubtree(); } if (estimationErrorWeight != null) { byteSize += estimationErrorWeight.measureByteSize(); } for (Node child : this.children) { if (child != null) { byteSize += child.calcByteSizeIncludingSubtree(); } } return byteSize; } public AdaSplitNode(InstanceConditionalTest splitTest, double[] classObservations, int size) { super(splitTest, classObservations, size); this.classifierRandom = new Random(this.randomSeed); } public AdaSplitNode(InstanceConditionalTest splitTest, double[] classObservations) { super(splitTest, classObservations); this.classifierRandom = new Random(this.randomSeed); } @Override public int numberLeaves() { int numLeaves = 0; for (Node child : this.children) { if (child != null) { numLeaves += ((NewNode) child).numberLeaves(); } } return numLeaves; } @Override public double getErrorEstimation() { return this.estimationErrorWeight.getEstimation(); } @Override public double getErrorWidth() { double w = 0.0; if (isNullError() == false) { w = this.estimationErrorWeight.getWidth(); } return w; } @Override public boolean isNullError() { return (this.estimationErrorWeight == null); } // SplitNodes can have alternative trees, but LearningNodes can't // LearningNodes can split, but SplitNodes can't // Parent nodes are allways SplitNodes @Override public void learnFromInstance(Instance inst, HoeffdingAdaptiveTree ht, SplitNode parent, int parentBranch) { int trueClass = (int) inst.classValue(); //New option vore int k = MiscUtils.poisson(1.0, this.classifierRandom); Instance weightedInst = (Instance) inst.copy(); if (k > 0) { //weightedInst.setWeight(inst.weight() * k); } //Compute ClassPrediction using filterInstanceToLeaf //int ClassPrediction = Utils.maxIndex(filterInstanceToLeaf(inst, null, -1).node.getClassVotes(inst, ht)); int ClassPrediction = 0; if (filterInstanceToLeaf(inst, parent, parentBranch).node != null) { ClassPrediction = Utils.maxIndex(filterInstanceToLeaf(inst, parent, parentBranch).node.getClassVotes(inst, ht)); } boolean blCorrect = (trueClass == ClassPrediction); if (this.estimationErrorWeight == null) { this.estimationErrorWeight = new ADWIN(); } double oldError = this.getErrorEstimation(); this.ErrorChange = this.estimationErrorWeight.setInput(blCorrect == true ? 0.0 : 1.0); if (this.ErrorChange == true && oldError > this.getErrorEstimation()) { //if error is decreasing, don't do anything this.ErrorChange = false; } // Check condition to build a new alternate tree //if (this.isAlternateTree == false) { if (this.ErrorChange == true) {//&& this.alternateTree == null) { //Start a new alternative tree : learning node this.alternateTree = ht.newLearningNode(); //this.alternateTree.isAlternateTree = true; ht.alternateTrees++; } // Check condition to replace tree else if (this.alternateTree != null && ((NewNode) this.alternateTree).isNullError() == false) { if (this.getErrorWidth() > 300 && ((NewNode) this.alternateTree).getErrorWidth() > 300) { double oldErrorRate = this.getErrorEstimation(); double altErrorRate = ((NewNode) this.alternateTree).getErrorEstimation(); double fDelta = .05; //if (gNumAlts>0) fDelta=fDelta/gNumAlts; double fN = 1.0 / ((double) ((NewNode) this.alternateTree).getErrorWidth()) + 1.0 / ((double) this.getErrorWidth()); double Bound = (double) Math.sqrt((double) 2.0 * oldErrorRate * (1.0 - oldErrorRate) * Math.log(2.0 / fDelta) * fN); if (Bound < oldErrorRate - altErrorRate) { // Switch alternate tree ht.activeLeafNodeCount -= this.numberLeaves(); ht.activeLeafNodeCount += ((NewNode) this.alternateTree).numberLeaves(); killTreeChilds(ht); if (parent != null) { parent.setChild(parentBranch, this.alternateTree); //((AdaSplitNode) parent.getChild(parentBranch)).alternateTree = null; } else { // Switch root tree ht.treeRoot = ((AdaSplitNode) ht.treeRoot).alternateTree; } ht.switchedAlternateTrees++; } else if (Bound < altErrorRate - oldErrorRate) { // Erase alternate tree if (this.alternateTree instanceof ActiveLearningNode) { this.alternateTree = null; //ht.activeLeafNodeCount--; } else if (this.alternateTree instanceof InactiveLearningNode) { this.alternateTree = null; //ht.inactiveLeafNodeCount--; } else { ((AdaSplitNode) this.alternateTree).killTreeChilds(ht); } ht.prunedAlternateTrees++; } } } //} //learnFromInstance alternate Tree and Child nodes if (this.alternateTree != null) { ((NewNode) this.alternateTree).learnFromInstance(weightedInst, ht, parent, parentBranch); } int childBranch = this.instanceChildIndex(inst); Node child = this.getChild(childBranch); if (child != null) { ((NewNode) child).learnFromInstance(weightedInst, ht, this, childBranch); } } @Override public void killTreeChilds(HoeffdingAdaptiveTree ht) { for (Node child : this.children) { if (child != null) { //Delete alternate tree if it exists if (child instanceof AdaSplitNode && ((AdaSplitNode) child).alternateTree != null) { ((NewNode) ((AdaSplitNode) child).alternateTree).killTreeChilds(ht); ht.prunedAlternateTrees++; } //Recursive delete of SplitNodes if (child instanceof AdaSplitNode) { ((NewNode) child).killTreeChilds(ht); } if (child instanceof ActiveLearningNode) { child = null; ht.activeLeafNodeCount--; } else if (child instanceof InactiveLearningNode) { child = null; ht.inactiveLeafNodeCount--; } } } } //New for option votes //@Override public void filterInstanceToLeaves(Instance inst, SplitNode myparent, int parentBranch, List<FoundNode> foundNodes, boolean updateSplitterCounts) { if (updateSplitterCounts) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); } int childIndex = instanceChildIndex(inst); if (childIndex >= 0) { Node child = getChild(childIndex); if (child != null) { ((NewNode) child).filterInstanceToLeaves(inst, this, childIndex, foundNodes, updateSplitterCounts); } else { foundNodes.add(new FoundNode(null, this, childIndex)); } } if (this.alternateTree != null) { ((NewNode) this.alternateTree).filterInstanceToLeaves(inst, this, -999, foundNodes, updateSplitterCounts); } } } public static class AdaLearningNode extends LearningNodeNBAdaptive implements NewNode { private static final long serialVersionUID = 1L; protected ADWIN estimationErrorWeight; public boolean ErrorChange = false; protected int randomSeed = 1; protected Random classifierRandom; @Override public int calcByteSize() { int byteSize = super.calcByteSize(); if (estimationErrorWeight != null) { byteSize += estimationErrorWeight.measureByteSize(); } return byteSize; } public AdaLearningNode(double[] initialClassObservations) { super(initialClassObservations); this.classifierRandom = new Random(this.randomSeed); } @Override public int numberLeaves() { return 1; } @Override public double getErrorEstimation() { if (this.estimationErrorWeight != null) { return this.estimationErrorWeight.getEstimation(); } else { return 0; } } @Override public double getErrorWidth() { return this.estimationErrorWeight.getWidth(); } @Override public boolean isNullError() { return (this.estimationErrorWeight == null); } @Override public void killTreeChilds(HoeffdingAdaptiveTree ht) { } @Override public void learnFromInstance(Instance inst, HoeffdingAdaptiveTree ht, SplitNode parent, int parentBranch) { int trueClass = (int) inst.classValue(); //New option vore int k = MiscUtils.poisson(1.0, this.classifierRandom); Instance weightedInst = (Instance) inst.copy(); if (k > 0) { weightedInst.setWeight(inst.weight() * k); } //Compute ClassPrediction using filterInstanceToLeaf int ClassPrediction = Utils.maxIndex(this.getClassVotes(inst, ht)); boolean blCorrect = (trueClass == ClassPrediction); if (this.estimationErrorWeight == null) { this.estimationErrorWeight = new ADWIN(); } double oldError = this.getErrorEstimation(); this.ErrorChange = this.estimationErrorWeight.setInput(blCorrect == true ? 0.0 : 1.0); if (this.ErrorChange == true && oldError > this.getErrorEstimation()) { this.ErrorChange = false; } //Update statistics learnFromInstance(weightedInst, ht); //inst //Check for Split condition double weightSeen = this.getWeightSeen(); if (weightSeen - this.getWeightSeenAtLastSplitEvaluation() >= ht.gracePeriodOption.getValue()) { ht.attemptToSplit(this, parent, parentBranch); this.setWeightSeenAtLastSplitEvaluation(weightSeen); } //learnFromInstance alternate Tree and Child nodes /*if (this.alternateTree != null) { this.alternateTree.learnFromInstance(inst,ht); } for (Node child : this.children) { if (child != null) { child.learnFromInstance(inst,ht); } }*/ } @Override public double[] getClassVotes(Instance inst, HoeffdingTree ht) { double[] dist; int predictionOption = ((HoeffdingAdaptiveTree) ht).leafpredictionOption.getChosenIndex(); if (predictionOption == 0) { //MC dist = this.observedClassDistribution.getArrayCopy(); } else if (predictionOption == 1) { //NB dist = NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } else { //NBAdaptive if (this.mcCorrectWeight > this.nbCorrectWeight) { dist = this.observedClassDistribution.getArrayCopy(); } else { dist = NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } } //New for option votes double distSum = Utils.sum(dist); if (distSum * this.getErrorEstimation() * this.getErrorEstimation() > 0.0) { Utils.normalize(dist, distSum * this.getErrorEstimation() * this.getErrorEstimation()); //Adding weight } return dist; } //New for option votes @Override public void filterInstanceToLeaves(Instance inst, SplitNode splitparent, int parentBranch, List<FoundNode> foundNodes, boolean updateSplitterCounts) { foundNodes.add(new FoundNode(this, splitparent, parentBranch)); } } protected int alternateTrees; protected int prunedAlternateTrees; protected int switchedAlternateTrees; @Override protected LearningNode newLearningNode(double[] initialClassObservations) { // IDEA: to choose different learning nodes depending on predictionOption return new AdaLearningNode(initialClassObservations); } @Override protected SplitNode newSplitNode(InstanceConditionalTest splitTest, double[] classObservations) { return new AdaSplitNode(splitTest, classObservations); } @Override protected SplitNode newSplitNode(InstanceConditionalTest splitTest, double[] classObservations, int size) { return new AdaSplitNode(splitTest, classObservations, size); } @Override public void trainOnInstanceImpl(Instance inst) { if (this.treeRoot == null) { this.treeRoot = newLearningNode(); this.activeLeafNodeCount = 1; } ((NewNode) this.treeRoot).learnFromInstance(inst, this, null, -1); } //New for options vote public FoundNode[] filterInstanceToLeaves(Instance inst, SplitNode parent, int parentBranch, boolean updateSplitterCounts) { List<FoundNode> nodes = new LinkedList<FoundNode>(); ((NewNode) this.treeRoot).filterInstanceToLeaves(inst, parent, parentBranch, nodes, updateSplitterCounts); return nodes.toArray(new FoundNode[nodes.size()]); } @Override public double[] getVotesForInstance(Instance inst) { if (this.treeRoot != null) { FoundNode[] foundNodes = filterInstanceToLeaves(inst, null, -1, false); DoubleVector result = new DoubleVector(); int predictionPaths = 0; for (FoundNode foundNode : foundNodes) { if (foundNode.parentBranch != -999) { Node leafNode = foundNode.node; if (leafNode == null) { leafNode = foundNode.parent; } double[] dist = leafNode.getClassVotes(inst, this); //Albert: changed for weights //double distSum = Utils.sum(dist); //if (distSum > 0.0) { // Utils.normalize(dist, distSum); //} result.addValues(dist); //predictionPaths++; } } //if (predictionPaths > this.maxPredictionPaths) { // this.maxPredictionPaths++; //} return result.getArrayRef(); } return new double[0]; } }
Java
/* * ASHoeffdingTree.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import weka.core.Instance; /** * Adaptive Size Hoeffding Tree used in Bagging using trees of different size. * The Adaptive-Size Hoeffding Tree (ASHT) is derived from the Hoeffding Tree * algorithm with the following differences: * <ul> * <li> it has a maximum number of split nodes, or size * <li> after one node splits, if the number of split nodes of the ASHT tree * is higher than the maximum value, then it deletes some nodes to reduce its size * </ul> * The intuition behind this method is as follows: smaller trees adapt * more quickly to changes, and larger trees do better during periods with * no or little change, simply because they were built on more data. Trees * limited to size s will be reset about twice as often as trees with a size * limit of 2s. This creates a set of different reset-speeds for an ensemble of such * trees, and therefore a subset of trees that are a good approximation for the * current rate of change. It is important to note that resets will happen all * the time, even for stationary datasets, but this behaviour should not have * a negative impact on the ensemble’s predictive performance. * When the tree size exceeds the maximun size value, there are two different * delete options: <ul> * <li> delete the oldest node, the root, and all of its children except the one * where the split has been made. After that, the root of the child not * deleted becomes the new root * <li> delete all the nodes of the tree, i.e., restart from a new root. * </ul> * The maximum allowed size for the n-th ASHT tree is twice the maximum * allowed size for the (n-1)-th tree. Moreover, each tree has a weight * proportional to the inverse of the square of its error, and it monitors its * error with an exponential weighted moving average (EWMA) with alpha = .01. * The size of the first tree is 2. * <br/><br/> * With this new method, it is attempted to improve bagging performance * by increasing tree diversity. It has been observed that boosting tends to * produce a more diverse set of classifiers than bagging, and this has been * cited as a factor in increased performance.<br/> * See more details in:<br/><br/> * Albert Bifet, Geoff Holmes, Bernhard Pfahringer, Richard Kirkby, * and Ricard Gavaldà. New ensemble methods for evolving data * streams. In 15th ACM SIGKDD International Conference on Knowledge * Discovery and Data Mining, 2009.<br/><br/> * The learner must be ASHoeffdingTree, a Hoeffding Tree with a maximum * size value.<br/><br/> * Example:<br/><br/> * <code>OzaBagASHT -l ASHoeffdingTree -s 10 -u -r </code> * Parameters:<ul> * <li>Same parameters as <code>OzaBag</code> * <li>-f : the size of first classifier in the bag. * <li>-u : Enable weight classifiers * <li>-r : Reset trees when size is higher than the max * </ul> * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class ASHoeffdingTree extends HoeffdingTree { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Adaptive Size Hoeffding Tree used in Bagging using trees of different size."; } protected int maxSize = 10000; //EXTENSION TO ASHT protected boolean resetTree = false; @Override public void resetLearningImpl() { this.treeRoot = null; this.decisionNodeCount = 0; this.activeLeafNodeCount = 0; this.inactiveLeafNodeCount = 0; this.inactiveLeafByteSizeEstimate = 0.0; this.activeLeafByteSizeEstimate = 0.0; this.byteSizeEstimateOverheadFraction = 1.0; this.growthAllowed = true; } @Override public void trainOnInstanceImpl(Instance inst) { if (this.treeRoot == null) { this.treeRoot = newLearningNode(); this.activeLeafNodeCount = 1; } FoundNode foundNode = this.treeRoot.filterInstanceToLeaf(inst, null, -1); Node leafNode = foundNode.node; if (leafNode == null) { leafNode = newLearningNode(); foundNode.parent.setChild(foundNode.parentBranch, leafNode); this.activeLeafNodeCount++; } if (leafNode instanceof LearningNode) { LearningNode learningNode = (LearningNode) leafNode; learningNode.learnFromInstance(inst, this); if (this.growthAllowed && (learningNode instanceof ActiveLearningNode)) { ActiveLearningNode activeLearningNode = (ActiveLearningNode) learningNode; double weightSeen = activeLearningNode.getWeightSeen(); if (weightSeen - activeLearningNode.getWeightSeenAtLastSplitEvaluation() >= this.gracePeriodOption.getValue()) { attemptToSplit(activeLearningNode, foundNode.parent, foundNode.parentBranch); //EXTENSION TO ASHT // if size too big, resize tree ONLY Split Nodes while (this.decisionNodeCount >= this.maxSize && this.treeRoot instanceof SplitNode) { if (this.resetTree == false) { resizeTree(this.treeRoot, ((SplitNode) this.treeRoot).instanceChildIndex(inst)); this.treeRoot = ((SplitNode) this.treeRoot).getChild(((SplitNode) this.treeRoot).instanceChildIndex(inst)); } else { resetLearningImpl(); } } activeLearningNode.setWeightSeenAtLastSplitEvaluation(weightSeen); } } } if (this.trainingWeightSeenByModel % this.memoryEstimatePeriodOption.getValue() == 0) { estimateModelByteSizes(); } } //EXTENSION TO ASHT public void setMaxSize(int mSize) { this.maxSize = mSize; } public void setResetTree() { this.resetTree = true; } public void deleteNode(Node node, int childIndex) { Node child = ((SplitNode) node).getChild(childIndex); //if (child != null) { //} if (child instanceof SplitNode) { for (int branch = 0; branch < ((SplitNode) child).numChildren(); branch++) { deleteNode(child, branch); } this.decisionNodeCount--; } else if (child instanceof InactiveLearningNode) { this.inactiveLeafNodeCount--; } else if (child instanceof ActiveLearningNode) { this.activeLeafNodeCount--; } child = null; } public void resizeTree(Node node, int childIndex) { //Assume that this is root node if (node instanceof SplitNode) { for (int branch = 0; branch < ((SplitNode) node).numChildren(); branch++) { if (branch != childIndex) { deleteNode(node, branch); } } } } }
Java
/* * HoeffdingOptionTree.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import weka.core.Instance; import weka.core.Utils; import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; import java.util.Arrays; import java.util.Comparator; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; import moa.AbstractMOAObject; import moa.classifiers.AbstractClassifier; import moa.classifiers.bayes.NaiveBayes; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.attributeclassobservers.DiscreteAttributeClassObserver; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.classifiers.core.attributeclassobservers.NullAttributeClassObserver; import moa.classifiers.core.conditionaltests.NumericAttributeBinaryTest; import moa.classifiers.core.attributeclassobservers.NumericAttributeClassObserver; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.Measurement; import moa.core.SizeOf; import moa.core.StringUtils; import moa.options.*; /** * Hoeffding Option Tree. * * <p> * Hoeffding Option Trees are regular Hoeffding trees containing additional * option nodes that allow several tests to be applied, leading to multiple * Hoeffding trees as separate paths. They consist of a single structure that * efficiently represents multiple trees. A particular example can travel down * multiple paths of the tree, contributing, in different ways, to different * options.</p> * * <p> * See for details:</p> * <p> * B. Pfahringer, G. Holmes, and R. Kirkby. New options for hoeffding trees. In * AI, pages 90–99, 2007.</p> * * <p> * Parameters:</p> <ul> <li>-o : Maximum number of option paths per node</li> * <li>-m : Maximum memory consumed by the tree</li> <li>-n : Numeric estimator * to use :</li> <ul> <li> Gaussian approximation evaluating 10 splitpoints</li> * <li> Gaussian approximation evaluating 100 splitpoints</li> <li> * Greenwald-Khanna quantile summary with 10 tuples</li> <li> Greenwald-Khanna * quantile summary with 100 tuples</li> <li> Greenwald-Khanna quantile summary * with 1000 tuples</li> <li> VFML method with 10 bins</li> <li> VFML method * with 100 bins</li> <li> VFML method with 1000 bins</li> <li> Exhaustive * binary tree</li> </ul> <li>-e : How many instances between memory consumption * checks</li> <li>-g : The number of instances a leaf should observe between * split attempts</li> <li>-s : Split criterion to use. Example : * InfoGainSplitCriterion</li> <li>-c : The allowable error in split decision, * values closer to 0 will take longer to decide</li> <li>-w : The allowable * error in secondary split decisions, values closer to 0 will take longer to * decide</li> <li>-t : Threshold below which a split will be forced to break * ties</li> <li>-b : Only allow binary splits</li> <li>-z : Memory strategy to * use</li> <li>-r : Disable poor attributes</li> <li>-p : Disable * pre-pruning</li> <li>-d : File to append option table to.</li> * <li> -l : Leaf prediction to use: MajorityClass (MC), Naive Bayes (NB) or * NaiveBayes adaptive (NBAdaptive).</li> * <li> -q : The number of instances a leaf should observe before permitting * Naive Bayes</li> * </ul> * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class HoeffdingOptionTree extends AbstractClassifier { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Hoeffding Option Tree: single tree that represents multiple trees."; } public IntOption maxOptionPathsOption = new IntOption("maxOptionPaths", 'o', "Maximum number of option paths per node.", 5, 1, Integer.MAX_VALUE); public IntOption maxByteSizeOption = new IntOption("maxByteSize", 'm', "Maximum memory consumed by the tree.", 33554432, 0, Integer.MAX_VALUE); /* * public MultiChoiceOption numericEstimatorOption = new MultiChoiceOption( * "numericEstimator", 'n', "Numeric estimator to use.", new String[]{ * "GAUSS10", "GAUSS100", "GK10", "GK100", "GK1000", "VFML10", "VFML100", * "VFML1000", "BINTREE"}, new String[]{ "Gaussian approximation evaluating * 10 splitpoints", "Gaussian approximation evaluating 100 splitpoints", * "Greenwald-Khanna quantile summary with 10 tuples", "Greenwald-Khanna * quantile summary with 100 tuples", "Greenwald-Khanna quantile summary * with 1000 tuples", "VFML method with 10 bins", "VFML method with 100 * bins", "VFML method with 1000 bins", "Exhaustive binary tree"}, 0); */ public ClassOption numericEstimatorOption = new ClassOption("numericEstimator", 'n', "Numeric estimator to use.", NumericAttributeClassObserver.class, "GaussianNumericAttributeClassObserver"); public ClassOption nominalEstimatorOption = new ClassOption("nominalEstimator", 'd', "Nominal estimator to use.", DiscreteAttributeClassObserver.class, "NominalAttributeClassObserver"); public IntOption memoryEstimatePeriodOption = new IntOption( "memoryEstimatePeriod", 'e', "How many instances between memory consumption checks.", 1000000, 0, Integer.MAX_VALUE); public IntOption gracePeriodOption = new IntOption( "gracePeriod", 'g', "The number of instances a leaf should observe between split attempts.", 200, 0, Integer.MAX_VALUE); public ClassOption splitCriterionOption = new ClassOption("splitCriterion", 's', "Split criterion to use.", SplitCriterion.class, "InfoGainSplitCriterion"); public FloatOption splitConfidenceOption = new FloatOption( "splitConfidence", 'c', "The allowable error in split decision, values closer to 0 will take longer to decide.", 0.0000001, 0.0, 1.0); public FloatOption secondarySplitConfidenceOption = new FloatOption( "secondarySplitConfidence", 'w', "The allowable error in secondary split decisions, values closer to 0 will take longer to decide.", 0.1, 0.0, 1.0); public FloatOption tieThresholdOption = new FloatOption("tieThreshold", 't', "Threshold below which a split will be forced to break ties.", 0.05, 0.0, 1.0); public FlagOption binarySplitsOption = new FlagOption("binarySplits", 'b', "Only allow binary splits."); public FlagOption removePoorAttsOption = new FlagOption("removePoorAtts", 'r', "Disable poor attributes."); public FlagOption noPrePruneOption = new FlagOption("noPrePrune", 'p', "Disable pre-pruning."); public FileOption dumpFileOption = new FileOption("dumpFile", 'f', "File to append option table to.", null, "csv", true); public IntOption memoryStrategyOption = new IntOption("memStrategy", 'z', "Memory strategy to use.", 2); public static class FoundNode { public Node node; public SplitNode parent; public int parentBranch; // set to -999 for option leaves public FoundNode(Node node, SplitNode parent, int parentBranch) { this.node = node; this.parent = parent; this.parentBranch = parentBranch; } } public static class Node extends AbstractMOAObject { private static final long serialVersionUID = 1L; protected DoubleVector observedClassDistribution; public Node(double[] classObservations) { this.observedClassDistribution = new DoubleVector(classObservations); } public int calcByteSize() { return (int) (SizeOf.sizeOf(this) + SizeOf.fullSizeOf(this.observedClassDistribution)); } public int calcByteSizeIncludingSubtree() { return calcByteSize(); } public boolean isLeaf() { return true; } public FoundNode[] filterInstanceToLeaves(Instance inst, SplitNode parent, int parentBranch, boolean updateSplitterCounts) { List<FoundNode> nodes = new LinkedList<FoundNode>(); filterInstanceToLeaves(inst, parent, parentBranch, nodes, updateSplitterCounts); return nodes.toArray(new FoundNode[nodes.size()]); } public void filterInstanceToLeaves(Instance inst, SplitNode splitparent, int parentBranch, List<FoundNode> foundNodes, boolean updateSplitterCounts) { foundNodes.add(new FoundNode(this, splitparent, parentBranch)); } public double[] getObservedClassDistribution() { return this.observedClassDistribution.getArrayCopy(); } public double[] getClassVotes(Instance inst, HoeffdingOptionTree ht) { double[] dist = this.observedClassDistribution.getArrayCopy(); double distSum = Utils.sum(dist); if (distSum > 0.0) { Utils.normalize(dist, distSum); } return dist; } public boolean observedClassDistributionIsPure() { return this.observedClassDistribution.numNonZeroEntries() < 2; } public void describeSubtree(HoeffdingOptionTree ht, StringBuilder out, int indent) { StringUtils.appendIndented(out, indent, "Leaf "); out.append(ht.getClassNameString()); out.append(" = "); out.append(ht.getClassLabelString(this.observedClassDistribution.maxIndex())); out.append(" weights: "); this.observedClassDistribution.getSingleLineDescription(out, ht.treeRoot.observedClassDistribution.numValues()); StringUtils.appendNewline(out); } public int subtreeDepth() { return 0; } public double calculatePromise() { double totalSeen = this.observedClassDistribution.sumOfValues(); return totalSeen > 0.0 ? (totalSeen - this.observedClassDistribution.getValue(this.observedClassDistribution.maxIndex())) : 0.0; } public void getDescription(StringBuilder sb, int indent) { describeSubtree(null, sb, indent); } } public static class SplitNode extends Node { private static final long serialVersionUID = 1L; protected InstanceConditionalTest splitTest; protected SplitNode parent; protected Node nextOption; protected int optionCount; // set to -999 for optional splits protected AutoExpandVector<Node> children = new AutoExpandVector<Node>(); @Override public int calcByteSize() { return super.calcByteSize() + (int) (SizeOf.sizeOf(this.children) + SizeOf.fullSizeOf(this.splitTest)); } @Override public int calcByteSizeIncludingSubtree() { int byteSize = calcByteSize(); for (Node child : this.children) { if (child != null) { byteSize += child.calcByteSizeIncludingSubtree(); } } if (this.nextOption != null) { byteSize += this.nextOption.calcByteSizeIncludingSubtree(); } return byteSize; } public SplitNode(InstanceConditionalTest splitTest, double[] classObservations) { super(classObservations); this.splitTest = splitTest; } public int numChildren() { return this.children.size(); } public void setChild(int index, Node child) { if ((this.splitTest.maxBranches() >= 0) && (index >= this.splitTest.maxBranches())) { throw new IndexOutOfBoundsException(); } this.children.set(index, child); } public Node getChild(int index) { return this.children.get(index); } public int instanceChildIndex(Instance inst) { return this.splitTest.branchForInstance(inst); } @Override public boolean isLeaf() { return false; } @Override public void filterInstanceToLeaves(Instance inst, SplitNode myparent, int parentBranch, List<FoundNode> foundNodes, boolean updateSplitterCounts) { if (updateSplitterCounts) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); } int childIndex = instanceChildIndex(inst); if (childIndex >= 0) { Node child = getChild(childIndex); if (child != null) { child.filterInstanceToLeaves(inst, this, childIndex, foundNodes, updateSplitterCounts); } else { foundNodes.add(new FoundNode(null, this, childIndex)); } } if (this.nextOption != null) { this.nextOption.filterInstanceToLeaves(inst, this, -999, foundNodes, updateSplitterCounts); } } @Override public void describeSubtree(HoeffdingOptionTree ht, StringBuilder out, int indent) { for (int branch = 0; branch < numChildren(); branch++) { Node child = getChild(branch); if (child != null) { StringUtils.appendIndented(out, indent, "if "); out.append(this.splitTest.describeConditionForBranch(branch, ht.getModelContext())); out.append(": "); out.append("** option count = " + this.optionCount); StringUtils.appendNewline(out); child.describeSubtree(ht, out, indent + 2); } } } @Override public int subtreeDepth() { int maxChildDepth = 0; for (Node child : this.children) { if (child != null) { int depth = child.subtreeDepth(); if (depth > maxChildDepth) { maxChildDepth = depth; } } } return maxChildDepth + 1; } public double computeMeritOfExistingSplit( SplitCriterion splitCriterion, double[] preDist) { double[][] postDists = new double[this.children.size()][]; for (int i = 0; i < this.children.size(); i++) { if (this.children.get(i) != null) { postDists[i] = this.children.get(i).getObservedClassDistribution(); } else { System.out.println("error"); } } return splitCriterion.getMeritOfSplit(preDist, postDists); } public void updateOptionCount(SplitNode source, HoeffdingOptionTree hot) { if (this.optionCount == -999) { this.parent.updateOptionCount(source, hot); } else { int maxChildCount = -999; SplitNode curr = this; while (curr != null) { for (Node child : curr.children) { if (child instanceof SplitNode) { SplitNode splitChild = (SplitNode) child; if (splitChild.optionCount > maxChildCount) { maxChildCount = splitChild.optionCount; } } } if ((curr.nextOption != null) && (curr.nextOption instanceof SplitNode)) { curr = (SplitNode) curr.nextOption; } else { curr = null; } } if (maxChildCount > this.optionCount) { // currently only works // one // way - adding, not // removing int delta = maxChildCount - this.optionCount; this.optionCount = maxChildCount; if (this.optionCount >= hot.maxOptionPathsOption.getValue()) { killOptionLeaf(hot); } curr = this; while (curr != null) { for (Node child : curr.children) { if (child instanceof SplitNode) { SplitNode splitChild = (SplitNode) child; if (splitChild != source) { splitChild.updateOptionCountBelow(delta, hot); } } } if ((curr.nextOption != null) && (curr.nextOption instanceof SplitNode)) { curr = (SplitNode) curr.nextOption; } else { curr = null; } } if (this.parent != null) { this.parent.updateOptionCount(this, hot); } } } } public void updateOptionCountBelow(int delta, HoeffdingOptionTree hot) { if (this.optionCount != -999) { this.optionCount += delta; if (this.optionCount >= hot.maxOptionPathsOption.getValue()) { killOptionLeaf(hot); } } for (Node child : this.children) { if (child instanceof SplitNode) { SplitNode splitChild = (SplitNode) child; splitChild.updateOptionCountBelow(delta, hot); } } if (this.nextOption instanceof SplitNode) { ((SplitNode) this.nextOption).updateOptionCountBelow(delta, hot); } } private void killOptionLeaf(HoeffdingOptionTree hot) { if (this.nextOption instanceof SplitNode) { ((SplitNode) this.nextOption).killOptionLeaf(hot); } else if (this.nextOption instanceof ActiveLearningNode) { this.nextOption = null; hot.activeLeafNodeCount--; } else if (this.nextOption instanceof InactiveLearningNode) { this.nextOption = null; hot.inactiveLeafNodeCount--; } } public int getHeadOptionCount() { SplitNode sn = this; while (sn.optionCount == -999) { sn = sn.parent; } return sn.optionCount; } } public static abstract class LearningNode extends Node { private static final long serialVersionUID = 1L; public LearningNode(double[] initialClassObservations) { super(initialClassObservations); } public abstract void learnFromInstance(Instance inst, HoeffdingOptionTree ht); } public static class InactiveLearningNode extends LearningNode { private static final long serialVersionUID = 1L; public InactiveLearningNode(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingOptionTree ht) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); } } public static class ActiveLearningNode extends LearningNode { private static final long serialVersionUID = 1L; protected double weightSeenAtLastSplitEvaluation; protected AutoExpandVector<AttributeClassObserver> attributeObservers = new AutoExpandVector<AttributeClassObserver>(); public ActiveLearningNode(double[] initialClassObservations) { super(initialClassObservations); this.weightSeenAtLastSplitEvaluation = getWeightSeen(); } @Override public int calcByteSize() { return super.calcByteSize() + (int) (SizeOf.fullSizeOf(this.attributeObservers)); } @Override public void learnFromInstance(Instance inst, HoeffdingOptionTree ht) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? ht.newNominalClassObserver() : ht.newNumericClassObserver(); this.attributeObservers.set(i, obs); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } } public double getWeightSeen() { return this.observedClassDistribution.sumOfValues(); } public double getWeightSeenAtLastSplitEvaluation() { return this.weightSeenAtLastSplitEvaluation; } public void setWeightSeenAtLastSplitEvaluation(double weight) { this.weightSeenAtLastSplitEvaluation = weight; } public AttributeSplitSuggestion[] getBestSplitSuggestions( SplitCriterion criterion, HoeffdingOptionTree ht) { List<AttributeSplitSuggestion> bestSuggestions = new LinkedList<AttributeSplitSuggestion>(); double[] preSplitDist = this.observedClassDistribution.getArrayCopy(); if (!ht.noPrePruneOption.isSet()) { // add null split as an option bestSuggestions.add(new AttributeSplitSuggestion(null, new double[0][], criterion.getMeritOfSplit( preSplitDist, new double[][]{preSplitDist}))); } for (int i = 0; i < this.attributeObservers.size(); i++) { AttributeClassObserver obs = this.attributeObservers.get(i); if (obs != null) { AttributeSplitSuggestion bestSuggestion = obs.getBestEvaluatedSplitSuggestion(criterion, preSplitDist, i, ht.binarySplitsOption.isSet()); if (bestSuggestion != null) { bestSuggestions.add(bestSuggestion); } } } return bestSuggestions.toArray(new AttributeSplitSuggestion[bestSuggestions.size()]); } public void disableAttribute(int attIndex) { this.attributeObservers.set(attIndex, new NullAttributeClassObserver()); } } protected Node treeRoot; protected int decisionNodeCount; protected int activeLeafNodeCount; protected int inactiveLeafNodeCount; protected double inactiveLeafByteSizeEstimate; protected double activeLeafByteSizeEstimate; protected double byteSizeEstimateOverheadFraction; protected int maxPredictionPaths; public int calcByteSize() { int size = (int) SizeOf.sizeOf(this); if (this.treeRoot != null) { size += this.treeRoot.calcByteSizeIncludingSubtree(); } return size; } @Override public int measureByteSize() { return calcByteSize(); } @Override public void resetLearningImpl() { this.treeRoot = null; this.decisionNodeCount = 0; this.activeLeafNodeCount = 0; this.inactiveLeafNodeCount = 0; this.inactiveLeafByteSizeEstimate = 0.0; this.activeLeafByteSizeEstimate = 0.0; this.byteSizeEstimateOverheadFraction = 1.0; this.maxPredictionPaths = 0; if (this.leafpredictionOption.getChosenIndex() > 0) { this.removePoorAttsOption = null; } } @Override public void trainOnInstanceImpl(Instance inst) { if (this.treeRoot == null) { this.treeRoot = newLearningNode(); this.activeLeafNodeCount = 1; } FoundNode[] foundNodes = this.treeRoot.filterInstanceToLeaves(inst, null, -1, true); for (FoundNode foundNode : foundNodes) { // option leaves will have a parentBranch of -999 // option splits will have an option count of -999 Node leafNode = foundNode.node; if (leafNode == null) { leafNode = newLearningNode(); foundNode.parent.setChild(foundNode.parentBranch, leafNode); this.activeLeafNodeCount++; } if (leafNode instanceof LearningNode) { LearningNode learningNode = (LearningNode) leafNode; learningNode.learnFromInstance(inst, this); if (learningNode instanceof ActiveLearningNode) { ActiveLearningNode activeLearningNode = (ActiveLearningNode) learningNode; double weightSeen = activeLearningNode.getWeightSeen(); if (weightSeen - activeLearningNode.getWeightSeenAtLastSplitEvaluation() >= this.gracePeriodOption.getValue()) { attemptToSplit(activeLearningNode, foundNode.parent, foundNode.parentBranch); activeLearningNode.setWeightSeenAtLastSplitEvaluation(weightSeen); } } } } if (this.trainingWeightSeenByModel % this.memoryEstimatePeriodOption.getValue() == 0) { estimateModelByteSizes(); } } @Override public double[] getVotesForInstance(Instance inst) { if (this.treeRoot != null) { FoundNode[] foundNodes = this.treeRoot.filterInstanceToLeaves(inst, null, -1, false); DoubleVector result = new DoubleVector(); int predictionPaths = 0; for (FoundNode foundNode : foundNodes) { if (foundNode.parentBranch != -999) { Node leafNode = foundNode.node; if (leafNode == null) { leafNode = foundNode.parent; } double[] dist = leafNode.getClassVotes(inst, this); //Albert: changed for weights //double distSum = Utils.sum(dist); //if (distSum > 0.0) { // Utils.normalize(dist, distSum); //} result.addValues(dist); predictionPaths++; } } if (predictionPaths > this.maxPredictionPaths) { this.maxPredictionPaths++; } return result.getArrayRef(); } return new double[0]; } @Override protected Measurement[] getModelMeasurementsImpl() { return new Measurement[]{ new Measurement("tree size (nodes)", this.decisionNodeCount + this.activeLeafNodeCount + this.inactiveLeafNodeCount), new Measurement("tree size (leaves)", this.activeLeafNodeCount + this.inactiveLeafNodeCount), new Measurement("active learning leaves", this.activeLeafNodeCount), new Measurement("tree depth", measureTreeDepth()), new Measurement("active leaf byte size estimate", this.activeLeafByteSizeEstimate), new Measurement("inactive leaf byte size estimate", this.inactiveLeafByteSizeEstimate), new Measurement("byte size estimate overhead", this.byteSizeEstimateOverheadFraction), new Measurement("maximum prediction paths used", this.maxPredictionPaths)}; } public int measureTreeDepth() { if (this.treeRoot != null) { return this.treeRoot.subtreeDepth(); } return 0; } @Override public void getModelDescription(StringBuilder out, int indent) { this.treeRoot.describeSubtree(this, out, indent); } @Override public boolean isRandomizable() { return false; } public static double computeHoeffdingBound(double range, double confidence, double n) { return Math.sqrt(((range * range) * Math.log(1.0 / confidence)) / (2.0 * n)); } protected AttributeClassObserver newNominalClassObserver() { AttributeClassObserver nominalClassObserver = (AttributeClassObserver) getPreparedClassOption(this.nominalEstimatorOption); return (AttributeClassObserver) nominalClassObserver.copy(); } protected AttributeClassObserver newNumericClassObserver() { AttributeClassObserver numericClassObserver = (AttributeClassObserver) getPreparedClassOption(this.numericEstimatorOption); return (AttributeClassObserver) numericClassObserver.copy(); } protected void attemptToSplit(ActiveLearningNode node, SplitNode parent, int parentIndex) { if (!node.observedClassDistributionIsPure()) { SplitCriterion splitCriterion = (SplitCriterion) getPreparedClassOption(this.splitCriterionOption); AttributeSplitSuggestion[] bestSplitSuggestions = node.getBestSplitSuggestions(splitCriterion, this); Arrays.sort(bestSplitSuggestions); boolean shouldSplit = false; if (parentIndex != -999) { if (bestSplitSuggestions.length < 2) { shouldSplit = bestSplitSuggestions.length > 0; } else { double hoeffdingBound = computeHoeffdingBound( splitCriterion.getRangeOfMerit(node.getObservedClassDistribution()), this.splitConfidenceOption.getValue(), node.getWeightSeen()); AttributeSplitSuggestion bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; AttributeSplitSuggestion secondBestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 2]; if ((bestSuggestion.merit - secondBestSuggestion.merit > hoeffdingBound) || (hoeffdingBound < this.tieThresholdOption.getValue())) { shouldSplit = true; } if ((this.removePoorAttsOption != null) && this.removePoorAttsOption.isSet()) { Set<Integer> poorAtts = new HashSet<Integer>(); // scan 1 - add any poor to set for (int i = 0; i < bestSplitSuggestions.length; i++) { if (bestSplitSuggestions[i].splitTest != null) { int[] splitAtts = bestSplitSuggestions[i].splitTest.getAttsTestDependsOn(); if (splitAtts.length == 1) { if (bestSuggestion.merit - bestSplitSuggestions[i].merit > hoeffdingBound) { poorAtts.add(new Integer(splitAtts[0])); } } } } // scan 2 - remove good ones from set for (int i = 0; i < bestSplitSuggestions.length; i++) { if (bestSplitSuggestions[i].splitTest != null) { int[] splitAtts = bestSplitSuggestions[i].splitTest.getAttsTestDependsOn(); if (splitAtts.length == 1) { if (bestSuggestion.merit - bestSplitSuggestions[i].merit < hoeffdingBound) { poorAtts.remove(new Integer( splitAtts[0])); } } } } for (int poorAtt : poorAtts) { node.disableAttribute(poorAtt); } } } } else if (bestSplitSuggestions.length > 0) { double hoeffdingBound = computeHoeffdingBound(splitCriterion.getRangeOfMerit(node.getObservedClassDistribution()), this.secondarySplitConfidenceOption.getValue(), node.getWeightSeen()); AttributeSplitSuggestion bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; // in option case, scan back through existing options to // find best SplitNode current = parent; double bestPreviousMerit = Double.NEGATIVE_INFINITY; double[] preDist = node.getObservedClassDistribution(); while (true) { double merit = current.computeMeritOfExistingSplit( splitCriterion, preDist); if (merit > bestPreviousMerit) { bestPreviousMerit = merit; } if (current.optionCount != -999) { break; } current = current.parent; } if (bestSuggestion.merit - bestPreviousMerit > hoeffdingBound) { shouldSplit = true; } } if (shouldSplit) { AttributeSplitSuggestion splitDecision = bestSplitSuggestions[bestSplitSuggestions.length - 1]; if (splitDecision.splitTest == null) { // preprune - null wins if (parentIndex != -999) { deactivateLearningNode(node, parent, parentIndex); } } else { SplitNode newSplit = new SplitNode(splitDecision.splitTest, node.getObservedClassDistribution()); newSplit.parent = parent; // add option procedure SplitNode optionHead = parent; if (parent != null) { while (optionHead.optionCount == -999) { optionHead = optionHead.parent; } } if ((parentIndex == -999) && (parent != null)) { // adding a new option newSplit.optionCount = -999; optionHead.updateOptionCountBelow(1, this); if (optionHead.parent != null) { optionHead.parent.updateOptionCount(optionHead, this); } addToOptionTable(splitDecision, optionHead.parent); } else { // adding a regular leaf if (optionHead == null) { newSplit.optionCount = 1; } else { newSplit.optionCount = optionHead.optionCount; } } int numOptions = 1; if (optionHead != null) { numOptions = optionHead.optionCount; } if (numOptions < this.maxOptionPathsOption.getValue()) { newSplit.nextOption = node; // preserve leaf // disable attribute just used int[] splitAtts = splitDecision.splitTest.getAttsTestDependsOn(); for (int i : splitAtts) { node.disableAttribute(i); } } else { this.activeLeafNodeCount--; } for (int i = 0; i < splitDecision.numSplits(); i++) { Node newChild = newLearningNode(splitDecision.resultingClassDistributionFromSplit(i)); newSplit.setChild(i, newChild); } this.decisionNodeCount++; this.activeLeafNodeCount += splitDecision.numSplits(); if (parent == null) { this.treeRoot = newSplit; } else { if (parentIndex != -999) { parent.setChild(parentIndex, newSplit); } else { parent.nextOption = newSplit; } } } // manage memory enforceTrackerLimit(); } } } private void addToOptionTable(AttributeSplitSuggestion bestSuggestion, SplitNode parent) { File dumpFile = this.dumpFileOption.getFile(); PrintStream immediateResultStream = null; if (dumpFile != null) { try { if (dumpFile.exists()) { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile, true), true); } else { immediateResultStream = new PrintStream( new FileOutputStream(dumpFile), true); } } catch (Exception ex) { throw new RuntimeException("Unable to open dump file: " + dumpFile, ex); } int splitAtt = bestSuggestion.splitTest.getAttsTestDependsOn()[0]; double splitVal = -1.0; if (bestSuggestion.splitTest instanceof NumericAttributeBinaryTest) { NumericAttributeBinaryTest test = (NumericAttributeBinaryTest) bestSuggestion.splitTest; splitVal = test.getSplitValue(); } int treeDepth = 0; while (parent != null) { parent = parent.parent; treeDepth++; } immediateResultStream.println(this.trainingWeightSeenByModel + "," + treeDepth + "," + splitAtt + "," + splitVal); immediateResultStream.flush(); immediateResultStream.close(); } } public void enforceTrackerLimit() { if ((this.inactiveLeafNodeCount > 0) || ((this.activeLeafNodeCount * this.activeLeafByteSizeEstimate + this.inactiveLeafNodeCount * this.inactiveLeafByteSizeEstimate) * this.byteSizeEstimateOverheadFraction > this.maxByteSizeOption.getValue())) { FoundNode[] learningNodes = findLearningNodes(); Arrays.sort(learningNodes, new Comparator<FoundNode>() { public int compare(FoundNode fn1, FoundNode fn2) { if (HoeffdingOptionTree.this.memoryStrategyOption.getValue() == 0) { // strategy 1 - every leaf treated equal return Double.compare(fn1.node.calculatePromise(), fn2.node.calculatePromise()); } else if (HoeffdingOptionTree.this.memoryStrategyOption.getValue() == 1) { // strategy 2 - internal leaves penalised double p1 = fn1.node.calculatePromise(); if (fn1.parentBranch == -999) { p1 /= fn1.parent.getHeadOptionCount(); } double p2 = fn2.node.calculatePromise(); if (fn2.parentBranch == -999) { p1 /= fn2.parent.getHeadOptionCount(); } return Double.compare(p1, p2); } else { // strategy 3 - all true leaves beat internal leaves if (fn1.parentBranch == -999) { if (fn2.parentBranch == -999) { return Double.compare(fn1.node.calculatePromise(), fn2.node.calculatePromise()); } return -1; // fn1 < fn2 } if (fn2.parentBranch == -999) { return 1; // fn1 > fn2 } return Double.compare(fn1.node.calculatePromise(), fn2.node.calculatePromise()); } } }); int maxActive = 0; while (maxActive < learningNodes.length) { maxActive++; if ((maxActive * this.activeLeafByteSizeEstimate + (learningNodes.length - maxActive) * this.inactiveLeafByteSizeEstimate) * this.byteSizeEstimateOverheadFraction > this.maxByteSizeOption.getValue()) { maxActive--; break; } } int cutoff = learningNodes.length - maxActive; for (int i = 0; i < cutoff; i++) { if (learningNodes[i].node instanceof ActiveLearningNode) { deactivateLearningNode( (ActiveLearningNode) learningNodes[i].node, learningNodes[i].parent, learningNodes[i].parentBranch); } } for (int i = cutoff; i < learningNodes.length; i++) { if (learningNodes[i].node instanceof InactiveLearningNode) { activateLearningNode( (InactiveLearningNode) learningNodes[i].node, learningNodes[i].parent, learningNodes[i].parentBranch); } } } } public void estimateModelByteSizes() { FoundNode[] learningNodes = findLearningNodes(); long totalActiveSize = 0; long totalInactiveSize = 0; for (FoundNode foundNode : learningNodes) { if (foundNode.node instanceof ActiveLearningNode) { totalActiveSize += SizeOf.fullSizeOf(foundNode.node); } else { totalInactiveSize += SizeOf.fullSizeOf(foundNode.node); } } if (totalActiveSize > 0) { this.activeLeafByteSizeEstimate = (double) totalActiveSize / this.activeLeafNodeCount; } if (totalInactiveSize > 0) { this.inactiveLeafByteSizeEstimate = (double) totalInactiveSize / this.inactiveLeafNodeCount; } int actualModelSize = this.measureByteSize(); double estimatedModelSize = (this.activeLeafNodeCount * this.activeLeafByteSizeEstimate + this.inactiveLeafNodeCount * this.inactiveLeafByteSizeEstimate); this.byteSizeEstimateOverheadFraction = actualModelSize / estimatedModelSize; if (actualModelSize > this.maxByteSizeOption.getValue()) { enforceTrackerLimit(); } } public void deactivateAllLeaves() { FoundNode[] learningNodes = findLearningNodes(); for (int i = 0; i < learningNodes.length; i++) { if (learningNodes[i].node instanceof ActiveLearningNode) { deactivateLearningNode( (ActiveLearningNode) learningNodes[i].node, learningNodes[i].parent, learningNodes[i].parentBranch); } } } protected void deactivateLearningNode(ActiveLearningNode toDeactivate, SplitNode parent, int parentBranch) { Node newLeaf = new InactiveLearningNode(toDeactivate.getObservedClassDistribution()); if (parent == null) { this.treeRoot = newLeaf; } else { if (parentBranch != -999) { parent.setChild(parentBranch, newLeaf); } else { parent.nextOption = newLeaf; } } this.activeLeafNodeCount--; this.inactiveLeafNodeCount++; } protected void activateLearningNode(InactiveLearningNode toActivate, SplitNode parent, int parentBranch) { Node newLeaf = newLearningNode(toActivate.getObservedClassDistribution()); if (parent == null) { this.treeRoot = newLeaf; } else { if (parentBranch != -999) { parent.setChild(parentBranch, newLeaf); } else { parent.nextOption = newLeaf; } } this.activeLeafNodeCount++; this.inactiveLeafNodeCount--; } protected FoundNode[] findLearningNodes() { List<FoundNode> foundList = new LinkedList<FoundNode>(); findLearningNodes(this.treeRoot, null, -1, foundList); return foundList.toArray(new FoundNode[foundList.size()]); } protected void findLearningNodes(Node node, SplitNode parent, int parentBranch, List<FoundNode> found) { if (node != null) { if (node instanceof LearningNode) { found.add(new FoundNode(node, parent, parentBranch)); } if (node instanceof SplitNode) { SplitNode splitNode = (SplitNode) node; for (int i = 0; i < splitNode.numChildren(); i++) { findLearningNodes(splitNode.getChild(i), splitNode, i, found); } findLearningNodes(splitNode.nextOption, splitNode, -999, found); } } } public MultiChoiceOption leafpredictionOption = new MultiChoiceOption( "leafprediction", 'l', "Leaf prediction to use.", new String[]{ "MC", "NB", "NBAdaptive"}, new String[]{ "Majority class", "Naive Bayes", "Naive Bayes Adaptive"}, 2); public IntOption nbThresholdOption = new IntOption( "nbThreshold", 'q', "The number of instances a leaf should observe before permitting Naive Bayes.", 0, 0, Integer.MAX_VALUE); public static class LearningNodeNB extends ActiveLearningNode { private static final long serialVersionUID = 1L; public LearningNodeNB(double[] initialClassObservations) { super(initialClassObservations); } @Override public double[] getClassVotes(Instance inst, HoeffdingOptionTree hot) { if (getWeightSeen() >= hot.nbThresholdOption.getValue()) { return NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } return super.getClassVotes(inst, hot); } @Override public void disableAttribute(int attIndex) { // should not disable poor atts - they are used in NB calc } } public static class LearningNodeNBAdaptive extends LearningNodeNB { private static final long serialVersionUID = 1L; protected double mcCorrectWeight = 0.0; protected double nbCorrectWeight = 0.0; public LearningNodeNBAdaptive(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingOptionTree hot) { int trueClass = (int) inst.classValue(); if (this.observedClassDistribution.maxIndex() == trueClass) { this.mcCorrectWeight += inst.weight(); } if (Utils.maxIndex(NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers)) == trueClass) { this.nbCorrectWeight += inst.weight(); } super.learnFromInstance(inst, hot); } @Override public double[] getClassVotes(Instance inst, HoeffdingOptionTree ht) { if (this.mcCorrectWeight > this.nbCorrectWeight) { return this.observedClassDistribution.getArrayCopy(); } return NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } } protected LearningNode newLearningNode() { return newLearningNode(new double[0]); } protected LearningNode newLearningNode(double[] initialClassObservations) { LearningNode ret; int predictionOption = this.leafpredictionOption.getChosenIndex(); if (predictionOption == 0) { //MC ret = new ActiveLearningNode(initialClassObservations); } else if (predictionOption == 1) { //NB ret = new LearningNodeNB(initialClassObservations); } else { //NBAdaptive ret = new LearningNodeNBAdaptive(initialClassObservations); } if (ret == null) { System.out.println("error"); } return ret; } }
Java
/* * DecisionStump.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import moa.classifiers.AbstractClassifier; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.attributeclassobservers.GaussianNumericAttributeClassObserver; import moa.classifiers.core.attributeclassobservers.NominalAttributeClassObserver; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.Measurement; import moa.options.ClassOption; import moa.options.FlagOption; import moa.options.IntOption; import weka.core.Instance; /** * Decision trees of one level.<br /> * * Parameters:</p> * <ul> * <li>-g : The number of instances to observe between model changes</li> * <li>-b : Only allow binary splits</li> * <li>-c : Split criterion to use. Example : InfoGainSplitCriterion</li> * <li>-r : Seed for random behaviour of the classifier</li> * </ul> * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class DecisionStump extends AbstractClassifier { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Decision trees of one level."; } public IntOption gracePeriodOption = new IntOption("gracePeriod", 'g', "The number of instances to observe between model changes.", 1000, 0, Integer.MAX_VALUE); public FlagOption binarySplitsOption = new FlagOption("binarySplits", 'b', "Only allow binary splits."); public ClassOption splitCriterionOption = new ClassOption("splitCriterion", 'c', "Split criterion to use.", SplitCriterion.class, "InfoGainSplitCriterion"); protected AttributeSplitSuggestion bestSplit; protected DoubleVector observedClassDistribution; protected AutoExpandVector<AttributeClassObserver> attributeObservers; protected double weightSeenAtLastSplit; @Override public void resetLearningImpl() { this.bestSplit = null; this.observedClassDistribution = new DoubleVector(); this.attributeObservers = new AutoExpandVector<AttributeClassObserver>(); this.weightSeenAtLastSplit = 0.0; } @Override protected Measurement[] getModelMeasurementsImpl() { return null; } @Override public void getModelDescription(StringBuilder out, int indent) { // TODO Auto-generated method stub } @Override public void trainOnInstanceImpl(Instance inst) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? newNominalClassObserver() : newNumericClassObserver(); this.attributeObservers.set(i, obs); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } if (this.trainingWeightSeenByModel - this.weightSeenAtLastSplit >= this.gracePeriodOption.getValue()) { this.bestSplit = findBestSplit((SplitCriterion) getPreparedClassOption(this.splitCriterionOption)); this.weightSeenAtLastSplit = this.trainingWeightSeenByModel; } } @Override public double[] getVotesForInstance(Instance inst) { if (this.bestSplit != null) { int branch = this.bestSplit.splitTest.branchForInstance(inst); if (branch >= 0) { return this.bestSplit.resultingClassDistributionFromSplit(branch); } } return this.observedClassDistribution.getArrayCopy(); } @Override public boolean isRandomizable() { return false; } protected AttributeClassObserver newNominalClassObserver() { return new NominalAttributeClassObserver(); } protected AttributeClassObserver newNumericClassObserver() { return new GaussianNumericAttributeClassObserver(); } protected AttributeSplitSuggestion findBestSplit(SplitCriterion criterion) { AttributeSplitSuggestion bestFound = null; double bestMerit = Double.NEGATIVE_INFINITY; double[] preSplitDist = this.observedClassDistribution.getArrayCopy(); for (int i = 0; i < this.attributeObservers.size(); i++) { AttributeClassObserver obs = this.attributeObservers.get(i); if (obs != null) { AttributeSplitSuggestion suggestion = obs.getBestEvaluatedSplitSuggestion(criterion, preSplitDist, i, this.binarySplitsOption.isSet()); if (suggestion.merit > bestMerit) { bestMerit = suggestion.merit; bestFound = suggestion; } } } return bestFound; } }
Java
/* * LimAttHoeffdingTree.java * Copyright (C) 2010 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import moa.classifiers.bayes.NaiveBayes; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import weka.core.Instance; import weka.core.Utils; /** * Hoeffding decision trees with a restricted number of attributes for data * streams. LimAttClassifier is the stacking method that can be used with these * decision trees. For more information see,<br/> <br/> Albert Bifet, Eibe * Frank, Geoffrey Holmes, Bernhard Pfahringer: Accurate Ensembles for Data * Streams: Combining Restricted Hoeffding Trees using Stacking. Journal of * Machine Learning Research - Proceedings Track 13: 225-240 (2010) * <!-- * technical-bibtex-start --> BibTeX: * <pre> * &#64;article{BifetFHP10, * author = {Albert Bifet and * Eibe Frank and * Geoffrey Holmes and * Bernhard Pfahringer}, * title = {Accurate Ensembles for Data Streams: Combining Restricted * Hoeffding Trees using Stacking}, * journal = {Journal of Machine Learning Research - Proceedings Track}, * volume = {13}, * year = {2010}, * pages = {225-240} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class LimAttHoeffdingTree extends HoeffdingTree { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Hoeffding decision trees with a restricted number of attributes for data streams."; } protected int[] listAttributes; public void setlistAttributes(int[] list) { this.listAttributes = list; } public static class LimAttLearningNode extends ActiveLearningNode { private static final long serialVersionUID = 1L; protected double weightSeenAtLastSplitEvaluation; protected int[] listAttributes; protected int numAttributes; public LimAttLearningNode(double[] initialClassObservations) { super(initialClassObservations); } public void setlistAttributes(int[] list) { this.listAttributes = list; this.numAttributes = list.length; } @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); if (this.listAttributes == null) { setlistAttributes(((LimAttHoeffdingTree) ht).listAttributes); } for (int j = 0; j < this.numAttributes; j++) { int i = this.listAttributes[j]; int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? ht.newNominalClassObserver() : ht.newNumericClassObserver(); this.attributeObservers.set(i, obs); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } } } public LimAttHoeffdingTree() { this.removePoorAttsOption = null; } public static class LearningNodeNB extends LimAttLearningNode { private static final long serialVersionUID = 1L; public LearningNodeNB(double[] initialClassObservations) { super(initialClassObservations); } @Override public double[] getClassVotes(Instance inst, HoeffdingTree ht) { if (getWeightSeen() >= ht.nbThresholdOption.getValue()) { return NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } return super.getClassVotes(inst, ht); } @Override public void disableAttribute(int attIndex) { // should not disable poor atts - they are used in NB calc } } public static class LearningNodeNBAdaptive extends LearningNodeNB { private static final long serialVersionUID = 1L; protected double mcCorrectWeight = 0.0; protected double nbCorrectWeight = 0.0; public LearningNodeNBAdaptive(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { int trueClass = (int) inst.classValue(); if (this.observedClassDistribution.maxIndex() == trueClass) { this.mcCorrectWeight += inst.weight(); } if (Utils.maxIndex(NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers)) == trueClass) { this.nbCorrectWeight += inst.weight(); } super.learnFromInstance(inst, ht); } @Override public double[] getClassVotes(Instance inst, HoeffdingTree ht) { if (this.mcCorrectWeight > this.nbCorrectWeight) { return this.observedClassDistribution.getArrayCopy(); } double ret[] = NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); for (int i = 0; i < ret.length; i++) { ret[i] *= this.observedClassDistribution.sumOfValues(); } return ret; } } @Override protected LearningNode newLearningNode(double[] initialClassObservations) { LearningNode ret; int predictionOption = this.leafpredictionOption.getChosenIndex(); if (predictionOption == 0) { //MC ret = new LimAttLearningNode(initialClassObservations); } else if (predictionOption == 1) { //NB ret = new LearningNodeNB(initialClassObservations); } else { //NBAdaptive ret = new LearningNodeNBAdaptive(initialClassObservations); } return ret; } @Override public boolean isRandomizable() { return true; } }
Java
/* * ORTO.java * Copyright (C) Jožef Stefan Institute, Ljubljana * @author Aljaž Osojnik * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ /* Based on the FIMTDD implementation by Katie de Lange, E. Almeida, J. Gama. See FIMTDD.java. * * Contact: aljaz.osojnik@ijs.si */ package moa.classifiers.trees; import java.util.Stack; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; import moa.AbstractMOAObject; import moa.classifiers.AbstractClassifier; import moa.classifiers.Regressor; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.core.attributeclassobservers.FIMTDDNumericAttributeClassObserver; import moa.classifiers.core.attributeclassobservers.NullAttributeClassObserver; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.classifiers.core.splitcriteria.VarianceReductionSplitCriterion; // import moa.classifiers.core.splitcriteria.SDRSplitCriterion; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.DoubleVector; import moa.core.AutoExpandVector; import moa.core.Measurement; import moa.core.SizeOf; import moa.core.StringUtils; import moa.options.*; import weka.core.Instance; // import weka.core.Utils; /* * Implementation of ORTO, option tree for data streams. */ public class ORTO extends AbstractClassifier implements Regressor{ private static final long serialVersionUID = 1L; //============================== INTERNALS ===============================// protected Node treeRoot; private int leafNodeCount = 0; private int innerNodeCount = 0; private int optionNodeCount = 0; private int numTrees = 1; protected int maxDepth = 0; protected double inactiveLeafByteSizeEstimate; protected double activeLeafByteSizeEstimate; protected double byteSizeEstimateOverheadFraction; // Store the lowest node (lowest level) in the tree that requires adaptation protected ArrayList<InnerNode> nodesToAdapt = new ArrayList<InnerNode>(); protected boolean Adaptable = true; protected double initLearnRate = 0.1; protected double learnRateDecay = 0.001; public int maxID = 0; private double learnTime = 0.0; private double predictTime = 0.0; //============================ END INTERNALS =============================// //============================= SET OPTIONS ==============================// public FloatOption PageHinckleyAlphaOption = new FloatOption( "PageHinckleyAlpha", 'a', "The alpha value to use in the Page Hinckley change detection tests.", 0.005, 0.0, 1.0); public IntOption PageHinckleyThresholdOption = new IntOption( "PageHinckleyThreshold", 'h', "The threshold value to be used in the Page Hinckley change detection tests.", 50, 0, Integer.MAX_VALUE); public FloatOption AlternateTreeFadingFactorOption = new FloatOption( "AlternateTreeFadingFactor", 'f', "The fading factor to use when deciding if an alternate tree should replace an original.", 0.995, 0.0, 1.0); public IntOption AlternateTreeTMinOption = new IntOption( "AlternateTreeTMin", 'y', "The Tmin value to use when deciding if an alternate tree should replace an original.", 150, 0, Integer.MAX_VALUE); public IntOption AlternateTreeTimeOption = new IntOption( "AlternateTreeTime", 'u', "The 'time' (in terms of number of instances) value to use when deciding if an alternate tree should be discarded.", 1500, 0, Integer.MAX_VALUE); public FloatOption LearningRatioOption = new FloatOption( "LearningRatio", 'w', "Learning ratio to use for training the Perceptrons in the leaves.", 0.01, 0.0, 1.0); public FlagOption LearningRatioDecayOrConstOption = new FlagOption( "LearningRatioDecayOrConst", 'j', "learning Ratio Decay or const parameter."); public IntOption MaxTreesOption = new IntOption( "MaxTrees", 'm', "The maximum number of trees contained in the option tree.", 10, 1, Integer.MAX_VALUE); public IntOption MaxOptionLevelOption = new IntOption( "MaxOptionLevel", 'l', "The maximal depth at which option nodes can be created.", 10, 0, Integer.MAX_VALUE); public FloatOption OptionDecayFactorOption = new FloatOption( "OptionDecayFactor", 'd', "The option decay factor that determines how many options can be selected at a given level.", 0.9, 0.0, 1.0); public ClassOption splitCriterionOption = new ClassOption( "splitCriterion", 's', "Split criterion to use.", VarianceReductionSplitCriterion.class, "VarianceReductionSplitCriterion"); public ClassOption numericEstimatorOption = new ClassOption( "numericEstimator", 'n', "Numeric estimator to use.", FIMTDDNumericAttributeClassObserver.class, "FIMTDDNumericAttributeClassObserver"); public IntOption gracePeriodOption = new IntOption( "gracePeriod", 'g', "The number of instances a leaf should observe between split attempts.", 200, 0, Integer.MAX_VALUE); public FloatOption splitConfidenceOption = new FloatOption( "splitConfidence", 'c', "The allowable error in split decision, values closer to 0 will take longer to decide.", 0.0000001, 0.0, 1.0); public FloatOption tieThresholdOption = new FloatOption( "tieThreshold", 't', "Threshold below which a split will be forced to break ties.", 0.05, 0.0, 1.0); public FlagOption removePoorAttsOption = new FlagOption( "removePoorAtts", 'p', "Disable poor attributes."); public MultiChoiceOption OptionNodeAggregationOption = new MultiChoiceOption( "OptionNodeAggregation", 'o', "The aggregation method used to combine predictions in option nodes.", new String[]{"average", "bestTree"}, new String[]{"Average", "Best tree"}, 0); public FloatOption OptionFadingFactorOption = new FloatOption( "OptionFadingFactor", 'q', "The fading factor used for comparing subtrees of an option node.", 0.9995, 0.0, 1.0); //============================= END OPTIONS ==============================// //=============================== CLASSES ================================// public abstract static class Node extends AbstractMOAObject /*implements AdaptationCompatibleNode*/ { private static final long serialVersionUID = 1L; public int ID; protected InnerNode parent; protected Node alternateTree; protected boolean Alternate = false; protected boolean Adaptable = true; public Node(int id) { this.ID = id; } @Override public void getDescription(StringBuilder sb, int indent) { } public int calcByteSize() { return (int) SizeOf.fullSizeOf(this); } public boolean isLeaf() { return true; } public void calculateDetph(ORTO tree) { int level = this.getLevel(); if (level > tree.maxDepth) { tree.maxDepth = level; } } public int getLevel() { Node target = (Node) this.getParent(); while (target instanceof OptionNode) { target = (Node) target.getParent(); } if (target == null) { if (!Alternate) { // Actual tree root return 0; } else { // Root of alternate tree return alternateTree.getLevel(); } } else { return target.getLevel() + 1; } } /** * Set the parent node */ public void setParent(InnerNode parent) { this.parent = parent; } /** * Return the parent node */ public InnerNode getParent() { return parent; } public void setChild(int parentBranch, Node node) { } public int getChildIndex(Node child) { return 0; } public int getNumSubtrees() { return 1; } public double[] processInstance(Instance inst, ORTO tree) { // The returned values represent (by index): // 0: the prediction of the node // 1: the faded MSE // 2: examples seen // 3: the back propagated PH error return new double[] {0.0, 0.0, 0.0, 0.0}; } public double[] getPrediction(Instance inst, ORTO tree) { // The returned values represent (by index): // 0: the prediction of the node // 1: the faded MSE // 2: examples seen return new double[] {0.0, 0.0, 0.0}; } public void setAdaptable(boolean value) { Adaptable = value; } public void setAlternate(boolean value) { Alternate = value; } } public abstract static class InnerNode extends Node { private static final long serialVersionUID = 1L; protected AutoExpandVector<Node> children = new AutoExpandVector<Node>(); protected double PHmT = 0; protected double PHMT = Double.MAX_VALUE; // Keep track of the statistics for loss error calculations protected DoubleVector lossStatistics = new DoubleVector(); protected int weightSeen = 0; protected int previousWeight = 0; public InnerNode(int id) { super(id); } public int numChildren() { return this.children.size(); } public Node getChild(int index) { return this.children.get(index); } public int getChildIndex(Node child) { return this.children.indexOf(child); } public void setChild(int index, Node child) { this.children.set(index, child); } public void setAlternateTree(Node tree) { this.alternateTree = tree; } public Node getAlternateTree() { return this.alternateTree; } public int calcByteSize() { return (int) SizeOf.fullSizeOf(this) + (int) SizeOf.fullSizeOf(children); } public void calculateDetph(ORTO tree) { if (this.getLevel() > tree.maxDepth) { tree.maxDepth = this.getLevel(); } for (Node child : children) { child.calculateDetph(tree); } } /** * Check to see if the tree needs updating */ public boolean PageHinckleyTest(double error, double threshold) { // Update the cumulative mT sum PHmT += error; // Update the minimum mT value if the new mT is // smaller than the current minimum if(PHmT < PHMT) { PHMT = PHmT; } // Return true if the cumulative value - the current minimum is // greater than the current threshold (in which case we should adapt) return PHmT - PHMT > threshold; } public void setAdaptable(boolean value) { Adaptable = value; for (Node child : children) { child.setAdaptable(value); } } public void setAlternate(boolean value) { Alternate = value; for (Node child : children) { child.setAlternate(value); } } } public static class SplitNode extends InnerNode { private static final long serialVersionUID = 1L; protected InstanceConditionalTest splitTest; public void setChild(int index, Node child) { if ((this.splitTest.maxBranches() >= 0) && (index >= this.splitTest.maxBranches())) { throw new IndexOutOfBoundsException(); } this.children.set(index, child); } public SplitNode(InstanceConditionalTest splitTest, int id) { super(id); this.splitTest = splitTest; } public int instanceChildIndex(Instance inst) { return this.splitTest.branchForInstance(inst); } @Override public boolean isLeaf() { return false; } public int getNumSubtrees() { int num = 1; for (Node child : children) { num += child.getNumSubtrees(); } num -= children.size(); return num; } public double[] processInstance(Instance inst, ORTO tree) { int branch = splitTest.branchForInstance(inst); Node child = children.get(branch); if (child == null) { tree.maxID++; child = new ActiveLearningNode(tree.maxID); this.setChild(branch, child); child.setParent(this); } double[] processed = child.processInstance(inst, tree); weightSeen++; // Convert any nominal attributes to numeric ones??? // If no model exists yet, begin with an empty leaf node (the root) // Take the current example and traverse it through the tree to a leaf if (Adaptable) { if (this.alternateTree == null) { // Retrieve the error for the found leaf node // currentNode.learnFromInstance(inst, this); double PHerror = processed[3] - tree.PageHinckleyAlphaOption.getValue(); // Back-propagate the error through all the parent nodes if(PageHinckleyTest(PHerror, tree.PageHinckleyThresholdOption.getValue())) { if (tree.nodesToAdapt.contains(child)) { tree.nodesToAdapt.remove(child); } tree.nodesToAdapt.add(this); } } else if (this.alternateTree != null) { // If an alternate tree already exists, check if the current tree should be replaced with it, // or if the alternate tree should be discarded. // this.alternateTree.checkRoot(); double[] processedAlt = this.alternateTree.processInstance(inst, tree); // Update the loss statistics for the alternate tree double qAlt = processedAlt[1]; double qOrg = processed[1]; // Compute the Qi statistics double Qi = Math.log(qOrg / qAlt); lossStatistics.addToValue(0,1); lossStatistics.addToValue(1,Qi); double QiAverage = lossStatistics.getValue(1) / lossStatistics.getValue(0); if(weightSeen - previousWeight >= tree.AlternateTreeTMinOption.getValue()) { // Update the weight at which a decision was tested for previousWeight = weightSeen; // If appropriate, replace the current tree with the alternate tree if(Qi > 0) { // Replace the main FIMT-DD tree at a subtree alternateTree.setAdaptable(true); alternateTree.Alternate = false; if(parent != null) { parent.setChild(parent.getChildIndex(this), alternateTree); alternateTree.setParent(parent); tree.numTrees = tree.numTrees - this.getNumSubtrees() + alternateTree.getNumSubtrees(); alternateTree.alternateTree = null; } else { // Or occasionally at the root of the tree tree.numTrees = tree.numTrees - this.getNumSubtrees() + alternateTree.getNumSubtrees(); tree.treeRoot = alternateTree; alternateTree.alternateTree = null; } tree.removeExcessTrees(); } // Otherwise, check if the alternate tree should be discarded else if (QiAverage < lossStatistics.getValue(2) && lossStatistics.getValue(0) >= (10 * tree.AlternateTreeTMinOption.getValue()) || weightSeen >= tree.AlternateTreeTimeOption.getValue()) { // tree.nodesToAdapt.remove(tree.nodesToAdapt.indexOf(this)); this.alternateTree = null; setAdaptable(true); } lossStatistics.setValue(2, QiAverage); } } } return processed; } public double[] getPrediction(Instance inst, ORTO tree) { int branch = splitTest.branchForInstance(inst); Node child = children.get(branch); if (child == null) { tree.maxID++; child = new ActiveLearningNode(tree.maxID); this.setChild(branch, child); child.setParent(this); } return child.getPrediction(inst, tree); } } public static class OptionNode extends InnerNode { private static final long serialVersionUID = 1L; protected double[] optionFFSSL; protected double[] optionFFSeen; // protected double[] optionBaseFFSSL; public OptionNode(int id) { super(id); } public void resetFF() { this.optionFFSSL = new double[this.children.size()]; this.optionFFSeen = new double[this.children.size()]; // this.optionBaseFFSSL = new double[this.children.size()]; for (int i = 0; i < this.children.size(); i++) { this.optionFFSSL[i] = 0.0; this.optionFFSeen[i] = 0.0; // this.optionBaseFFSSL[i] = 0.0; } } @Override public boolean isLeaf() { return false; } public int getNumSubtrees() { int num = 0; for (Node child : children) { num += child.getNumSubtrees(); } return num; } public int directionForBestTree() { int d = 0; double tmp = 0.0, min = Double.MAX_VALUE; for (int i = 0; i < children.size(); i++) { tmp = optionFFSSL[i] / optionFFSeen[i]; if (tmp < min) { min = tmp; d = i; } } return d; } public double[] getPrediction(Instance inst, ORTO tree) { double[][] predictions = new double[this.children.size()][]; if (tree.OptionNodeAggregationOption.getChosenIndex() != 1) { int i = 0; for (i = 0; i < this.children.size(); i++) { predictions[i] = this.getChild(i).getPrediction(inst, tree); } return aggregate(predictions, tree); } else { int d = directionForBestTree(); return this.getChild(d).getPrediction(inst, tree); } } public double[] processInstance(Instance inst, ORTO tree) { double[][] processed = new double[this.numChildren()][]; int i = 0; for (i = 0; i < this.numChildren(); i++) { processed[i] = this.getChild(i).processInstance(inst, tree); // All the children get to see the instance } double[] prediction; if (tree.OptionNodeAggregationOption.getChosenIndex() != 1) { prediction = aggregate(processed, tree); } else { prediction = processed[directionForBestTree()]; } if (Adaptable) { if (this.alternateTree == null) { // Retrieve the error for the found leaf node double PHerror = inst.classValue() - prediction[0]; // Back-propagate the error through all the parent nodes if(PageHinckleyTest(PHerror, tree.PageHinckleyThresholdOption.getValue())) { for (Node node : tree.nodesToAdapt) { if (children.contains(node)) { tree.nodesToAdapt.remove(node); } } tree.nodesToAdapt.add(this); } } else { // If an alternate tree already exists, check if the current tree should be replaced with it, // or if the alternate tree should be discarded. double[] predictionAlt = this.alternateTree.processInstance(inst, tree); // Update the loss statistics for the alternate tree double qOrg = prediction[1]; double qAlt = predictionAlt[1]; // Compute the Qi statistics double Qi = Math.log(qOrg / qAlt); lossStatistics.addToValue(0,1); lossStatistics.addToValue(1,Qi); double QiAverage = lossStatistics.getValue(1) / lossStatistics.getValue(0); if (weightSeen - previousWeight >= tree.AlternateTreeTMinOption.getValue()) { // Update the weight at which a decision was tested for previousWeight = weightSeen; // If appropriate, replace the current tree with the alternate tree if(Qi > 0) { alternateTree.setAdaptable(true); alternateTree.Alternate = false; if (parent != null) { // Replace the main tree at a subtree parent.setChild(parent.getChildIndex(this), alternateTree); tree.numTrees = tree.numTrees - this.getNumSubtrees() + alternateTree.getNumSubtrees(); alternateTree.setParent(parent); this.alternateTree = null; } else { // Or occasionally at the root of the tree tree.treeRoot = this.alternateTree; tree.numTrees = tree.numTrees - this.getNumSubtrees() + alternateTree.getNumSubtrees(); tree.Adaptable = true; this.alternateTree = null; } tree.removeExcessTrees(); } // Otherwise, check if the alternate tree should be discarded else if (QiAverage < lossStatistics.getValue(2) && lossStatistics.getValue(0) >= (10 * tree.AlternateTreeTMinOption.getValue()) || weightSeen >= tree.AlternateTreeTimeOption.getValue()) { // tree.nodesToAdapt.remove(tree.nodesToAdapt.indexOf(this)); ? this.alternateTree = null; setAdaptable(true); } lossStatistics.setValue(2, QiAverage); } } double sqLoss; if (weightSeen + 1 > tree.gracePeriodOption.getValue() + 50) { for (i = 0; i < this.children.size(); i++) { sqLoss = Math.pow(processed[i][0] - inst.classValue(), 2); optionFFSSL[i] = optionFFSSL[i] * tree.OptionFadingFactorOption.getValue() + sqLoss; optionFFSeen[i] = optionFFSeen[i] * tree.OptionFadingFactorOption.getValue() + 1; } } } weightSeen++; return prediction; } private double[] aggregate(double[][] predictions, ORTO tree) { if (tree.OptionNodeAggregationOption.getChosenIndex() == 0) { // Average double[] average = new double[predictions[0].length]; for (int i = 0; i < predictions[0].length; i++) { average[i] = 0.0; } for (int i = 0; i < predictions[0].length; i++) { for (int j = 0; j < predictions.length; j++) { average[i] += predictions[j][i]; } average[i] = average[i] / predictions.length; } return average; } else { assert false : tree.OptionNodeAggregationOption.getChosenLabel(); return new double[] {0.0}; } } public double getFFRatio(int childIndex) { return optionFFSSL[childIndex] / optionFFSeen[childIndex]; } } public static class ActiveLearningNode extends Node { private static final long serialVersionUID = 1L; // Create a Perceptron model that carries out the actual learning in each node public ORTOPerceptron learningModel = new ORTOPerceptron(); // The statistics for this node: // Sum of y values // Sum of squared y values protected DoubleVector nodeStatistics = new DoubleVector(); protected DoubleVector splitRatioStatistics = new DoubleVector(); // The error values for the Page Hinckley test // PHmT = the cumulative sum of the errors // PHMT = the minimum error value seen so far protected double PHmT = 0; protected double PHMT = Double.MAX_VALUE; protected int examplesSeenAtLastSplitEvaluation; protected int examplesSeen = 0; protected AutoExpandVector<AttributeClassObserver> attributeObservers = new AutoExpandVector<AttributeClassObserver>(); public ActiveLearningNode(int id) { super(id); this.learningModel = new ORTOPerceptron(); } @Override public int calcByteSize() { return super.calcByteSize() + (int) (SizeOf.fullSizeOf(this.attributeObservers)) + (int) (SizeOf.fullSizeOf(this.learningModel)); } /** * Return the best split suggestions for this node using the given split criteria */ public AttributeSplitSuggestion[] getBestSplitSuggestions(SplitCriterion criterion, ORTO tree) { List<AttributeSplitSuggestion> bestSuggestions = new LinkedList<AttributeSplitSuggestion>(); // Set the nodeStatistics up as the preSplitDistribution, rather than the observedClassDistribution double[] nodeSplitDist = this.nodeStatistics.getArrayCopy(); for (int i = 0; i < this.attributeObservers.size(); i++) { AttributeClassObserver obs = this.attributeObservers.get(i); if (obs != null) { // AT THIS STAGE NON-NUMERIC ATTRIBUTES ARE IGNORED AttributeSplitSuggestion bestSuggestion = null; if (obs instanceof FIMTDDNumericAttributeClassObserver) { bestSuggestion = obs.getBestEvaluatedSplitSuggestion(criterion, nodeSplitDist, i, true /*ht.binarySplitsOption.isSet()*/); } if (bestSuggestion != null) { bestSuggestions.add(bestSuggestion); } } } return bestSuggestions.toArray(new AttributeSplitSuggestion[bestSuggestions.size()]); } public void disableAttribute(int attIndex) { this.attributeObservers.set(attIndex, new NullAttributeClassObserver()); } public double getPHError(Instance inst) { double sd = Math.sqrt((nodeStatistics.getValue(2) - ((nodeStatistics.getValue(1) * nodeStatistics.getValue(1))/nodeStatistics.getValue(1)))/examplesSeen); double mean = nodeStatistics.getValue(2) / nodeStatistics.getValue(1); // AbsErr(inst) - (SumAbsErr + AbsErr(inst)) / (N + 1) || SumAbsErr only contains errors for the first N examples and not the last one return Math.abs( (inst.classValue() - learningModel.prediction(inst)) / sd ) - ((nodeStatistics.getValue(3) + Math.abs(((inst.classValue()-mean)/sd) - ((learningModel.prediction(inst)-mean)/sd))) / (nodeStatistics.getValue(1)+1)); } /** * Returns the squared error, for use in determining if an alternate tree is performing better than an original * tree, or if the alternate tree should be deleted */ public double getSquaredError() { return nodeStatistics.getValue(4); } /** * Return the error for a given instance */ public double getError(Instance inst) { return inst.classValue() - learningModel.prediction(inst); } public double[] processInstance(Instance inst, ORTO tree) { double prediction = getPrediction(inst, tree)[0]; examplesSeen++; // Update the statistics for this node // number of instances passing through the node nodeStatistics.addToValue(0, 1); // sum of y values // sum of squared y values nodeStatistics.addToValue(2, inst.classValue() * inst.classValue()); // sum of absolute errors // Normalize values prior to calculating absolute error double sd = Math.sqrt((nodeStatistics.getValue(2) - ((nodeStatistics.getValue(1) * nodeStatistics.getValue(1))/examplesSeen))/examplesSeen); double error = this.getError(inst); nodeStatistics.addToValue(3, Math.abs(error / sd)); // sum of squared errors // nodeStatistics.addToValue(4, error * error); nodeStatistics.setValue(4, nodeStatistics.getValue(4) * tree.AlternateTreeFadingFactorOption.getValue() + error * error); double ph = getPHError(inst); learningModel.trainOnInstanceImpl(inst, tree); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { // At this stage all nominal attributes are ignored if (inst.attribute(instAttIndex).isNumeric()) { obs = tree.newNumericClassObserver(); this.attributeObservers.set(i, obs); } } if (obs != null) { ((FIMTDDNumericAttributeClassObserver) obs).observeAttributeClass(inst.value(instAttIndex), inst.classValue(), inst.weight()); } } // If it has seen Nmin examples since it was last tested for splitting, attempt a split of this node if (examplesSeen - examplesSeenAtLastSplitEvaluation >= tree.gracePeriodOption.getValue()) { // Set the split criterion to use to the SDR split criterion as described by Ikonomovska et al. SplitCriterion splitCriterion = (SplitCriterion) tree.getPreparedClassOption(tree.splitCriterionOption); // Using this criterion, find the best split per attribute and rank the results AttributeSplitSuggestion[] bestSplitSuggestions = getBestSplitSuggestions(splitCriterion, tree); List<AttributeSplitSuggestion> acceptedSplits = new LinkedList<AttributeSplitSuggestion>(); Arrays.sort(bestSplitSuggestions); // Declare a variable to determine the number of splits to be performed int numSplits = 0; // If only one split was returned, use it if (bestSplitSuggestions.length == 1) { numSplits = 1; acceptedSplits.add(bestSplitSuggestions[0]); } else if (bestSplitSuggestions.length > 1) { // Otherwise, consider which of the splits proposed may be worth trying // Determine the Hoeffding bound value, used to select how many instances should be used to make a test decision // to feel reasonably confident that the test chosen by this sample is the same as what would be chosen using infinite examples double hoeffdingBound = computeHoeffdingBound(1, tree.splitConfidenceOption.getValue(), examplesSeen); // Determine the top two ranked splitting suggestions AttributeSplitSuggestion bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; AttributeSplitSuggestion secondBestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 2]; // assert false : bestSuggestion.merit; // If the upper bound of the sample mean for the ratio of SDR(best suggestion) to SDR(second best suggestion), // as determined using the Hoeffding bound, is less than 1, then the true mean is also less than 1, and thus at this // particular moment of observation the bestSuggestion is indeed the best split option with confidence 1-delta, and // splitting should occur. // Alternatively, if two or more splits are very similar or identical in terms of their splits, then a threshold limit // (default 0.05) is applied to the Hoeffding bound; if the Hoeffding bound is smaller than this limit then the two // competing attributes are equally good, and the split will be made on the one with the higher SDR value. if (secondBestSuggestion.merit / bestSuggestion.merit < 1 - hoeffdingBound) { numSplits = 1; acceptedSplits.add(bestSuggestion); } else if (tree.numTrees < tree.MaxTreesOption.getValue() && getLevel() <= tree.MaxOptionLevelOption.getValue()) { for (AttributeSplitSuggestion suggestion : bestSplitSuggestions) { if (suggestion.merit / bestSuggestion.merit >= 1 - hoeffdingBound) { numSplits++; acceptedSplits.add(suggestion); } } } else if (hoeffdingBound < tree.tieThresholdOption.getValue()) { numSplits = 1; acceptedSplits.add(bestSplitSuggestions[0]); } else { // If the splitting criterion was not met, initiate pruning of the E-BST structures in each attribute observer for (int i = 0; i < attributeObservers.size(); i++) { AttributeClassObserver obs = attributeObservers.get(i); if (obs != null) { ((FIMTDDNumericAttributeClassObserver) obs).removeBadSplits(splitCriterion, secondBestSuggestion.merit / bestSuggestion.merit, bestSuggestion.merit, hoeffdingBound); } } } // If the user has selected this option, it is also possible to remove poor attributes at this stage if ((tree.removePoorAttsOption != null) && tree.removePoorAttsOption.isSet()) { Set<Integer> poorAtts = new HashSet<Integer>(); for (int i = 0; i < bestSplitSuggestions.length; i++) { // scan 1 - add any poor to set if (bestSplitSuggestions[i].splitTest != null) { int[] splitAtts = bestSplitSuggestions[i].splitTest.getAttsTestDependsOn(); if (splitAtts.length == 1) { if (((bestSuggestion.merit / secondBestSuggestion.merit) + hoeffdingBound) < 1) { poorAtts.add(new Integer(splitAtts[0])); } } } } for (int i = 0; i < bestSplitSuggestions.length; i++) { // scan 2 - remove good ones from set if (bestSplitSuggestions[i].splitTest != null) { int[] splitAtts = bestSplitSuggestions[i].splitTest.getAttsTestDependsOn(); if (splitAtts.length == 1) { if (((bestSuggestion.merit / secondBestSuggestion.merit) + hoeffdingBound) < 1) { poorAtts.remove(new Integer(splitAtts[0])); } } } } for (int poorAtt : poorAtts) { this.disableAttribute(poorAtt); } } } // assert numSplits == 0 : numSplits; // If the splitting criterion were met, split the current node using the chosen attribute test, and // make two new branches leading to (empty) leaves if (numSplits > 0) { double optionFactor = numSplits * Math.pow(tree.OptionDecayFactorOption.getValue(), (double) getLevel()); // Deactivate this node if the best split was to do nothing if (numSplits == 1 || optionFactor < 2.0 || tree.MaxTreesOption.getValue() - tree.numTrees <= 1) { AttributeSplitSuggestion splitDecision = acceptedSplits.get(0); tree.maxID++; SplitNode newSplit = new SplitNode(splitDecision.splitTest, tree.maxID); newSplit.Adaptable = Adaptable; for (int i = 0; i < splitDecision.numSplits(); i++) { tree.maxID++; ActiveLearningNode newChild = new ActiveLearningNode(tree.maxID); newChild.setParent(newSplit); newChild.Adaptable = Adaptable; newSplit.setChild(i, newChild); } tree.leafNodeCount--; tree.innerNodeCount++; tree.leafNodeCount += splitDecision.numSplits(); if (parent == null) { tree.treeRoot = newSplit; } else { parent.setChild(parent.getChildIndex(this), newSplit); newSplit.setParent(parent); } } else { tree.maxID++; OptionNode optionNode = new OptionNode(tree.maxID); optionNode.Adaptable = Adaptable; tree.leafNodeCount--; int j = 0; for (AttributeSplitSuggestion splitDecision : acceptedSplits) { if (j > optionFactor || tree.MaxTreesOption.getValue() - tree.numTrees <= 0) { break; } tree.maxID++; SplitNode newSplit = new SplitNode(splitDecision.splitTest, tree.maxID); newSplit.Adaptable = Adaptable; for (int i = 0; i < splitDecision.numSplits(); i++) { tree.maxID++; ActiveLearningNode newChild = new ActiveLearningNode(tree.maxID); newChild.setParent(newSplit); newSplit.setChild(i, newChild); newChild.Adaptable = Adaptable; } tree.leafNodeCount += splitDecision.numSplits(); tree.innerNodeCount++; tree.numTrees++; newSplit.setParent(optionNode); optionNode.setChild(j, newSplit); j++; } tree.innerNodeCount++; tree.optionNodeCount++; if (parent == null) { tree.treeRoot = optionNode; } else { parent.setChild(parent.getChildIndex(this), optionNode); optionNode.setParent(parent); } optionNode.resetFF(); } } // Take note of how many instances were seen when this split evaluation was made, so we know when to perform the next split evaluation examplesSeenAtLastSplitEvaluation = examplesSeen; } return new double[] {prediction, nodeStatistics.getValue(4), examplesSeen, ph}; } public double[] getPrediction(Instance inst, ORTO tree) { return new double[] {this.learningModel.prediction(inst), this.nodeStatistics.getValue(4), this.examplesSeen}; } } /** * A Perceptron classifier modified to conform to the specifications of Ikonomovska et al. */ public static class ORTOPerceptron extends AbstractMOAObject { private static final long serialVersionUID = 1L; // The Perception weights protected double[] weightAttribute; // Statistics used for error calculations protected DoubleVector attributeStatistics = new DoubleVector(); protected DoubleVector squaredAttributeStatistics = new DoubleVector(); // The number of instances contributing to this model protected int instancesSeen = 0; // If the model should be reset or not protected boolean reset; @Override public void getDescription(StringBuilder sb, int indent) { //TODO Auto-generated method stub } public ORTOPerceptron(ORTOPerceptron copy) { this.weightAttribute = copy.getWeights(); } public ORTOPerceptron() { this.reset = true; } public void setWeights(double[] w) { this.weightAttribute = w; } public double[] getWeights() { return this.weightAttribute; } /** * A method to reset the model */ public void resetLearningImpl() { this.reset = true; } /** * Update the model using the provided instance */ public void trainOnInstanceImpl(Instance inst, ORTO ft) { // Initialize Perceptron if necessary if (this.reset == true) { this.reset = false; this.weightAttribute = new double[inst.numAttributes()]; this.instancesSeen = 0; this.attributeStatistics = new DoubleVector(); this.squaredAttributeStatistics = new DoubleVector(); for (int j = 0; j < inst.numAttributes(); j++) { weightAttribute[j] = 2 * ft.classifierRandom.nextDouble() - 1; } } // Update attribute statistics instancesSeen++; for(int j = 0; j < inst.numAttributes() -1; j++) { attributeStatistics.addToValue(j, inst.value(j)); squaredAttributeStatistics.addToValue(j, inst.value(j)*inst.value(j)); } // Update weights double learningRatio = 0.0; if(ft.LearningRatioDecayOrConstOption.isSet()){ learningRatio = ft.LearningRatioOption.getValue(); } else { learningRatio = ft.initLearnRate / (1 + instancesSeen * ft.learnRateDecay); } // double learningRatio = ft.learningRatioOption.getValue(); double actualClass = inst.classValue(); double predictedClass = this.prediction(inst); // SET DELTA TO ACTUAL - PREDICTED, NOT PREDICTED - ACTUAL AS SAID IN PAPER double delta = actualClass - predictedClass; for (int j = 0; j < inst.numAttributes() - 1; j++) { if (inst.attribute(j).isNumeric()) { // Update weights. Ensure attribute values are normalized first double sd = Math.sqrt((squaredAttributeStatistics.getValue(j) - ((attributeStatistics.getValue(j) * attributeStatistics.getValue(j))/instancesSeen))/instancesSeen); double instanceValue = 0; if (sd > 0.0000001) { // Limit found in implementation by Ikonomovska et al (2011) instanceValue = (inst.value(j) - (attributeStatistics.getValue(j)/instancesSeen))/(3*sd); } this.weightAttribute[j] += learningRatio * delta * instanceValue; } } this.weightAttribute[inst.numAttributes() - 1] += learningRatio * delta; } /** * Output the prediction made by this perceptron on the given instance */ public double prediction(Instance inst) { double prediction = 0; if (this.reset == false) { for (int j = 0; j < inst.numAttributes() - 1; j++) { if(inst.attribute(j).isNumeric()) { prediction += this.weightAttribute[j] * inst.value(j); } } prediction += this.weightAttribute[inst.numAttributes() - 1]; } // Return prediction to 3dp return (double)Math.round(prediction * 1000) / 1000; } } //============================= END CLASSES ==============================// //=============================== METHODS ================================// @Override public String getPurposeString() { return "Implementation of the ORTO tree as described by Ikonomovska et al."; } // For the moment at least, force the split criterion to be SDRSplitCriterion and the // numeric estimator to be FIMTLDDNumericAttributeClassObserver public ORTO() { // numericEstimatorOption = new ClassOption("numericEstimator", // 'n', "Numeric estimator to use.", FIMTDDNumericAttributeClassObserver.class, // "FIMTDDNumericAttributeClassObserver"); splitCriterionOption = new ClassOption("splitCriterion", 's', "Split criterion to use.", VarianceReductionSplitCriterion.class, "VarianceReductionSplitCriterion"); } @Override public void resetLearningImpl() { this.treeRoot = null; this.numTrees = 1; this.innerNodeCount = 0; this.leafNodeCount = 0; this.optionNodeCount = 0; this.maxID = 0; this.learnTime = 0.0; this.predictTime = 0.0; } public boolean isRandomizable() { return true; } protected void checkRoot() { if (treeRoot == null) { maxID++; treeRoot = new ActiveLearningNode(maxID); leafNodeCount = 1; } } @Override protected Measurement[] getModelMeasurementsImpl() { // if (this.treeRoot != null) { // this.treeRoot.calculateDetph(this); // } return new Measurement[]{ new Measurement("number of subtrees", this.numTrees), new Measurement("tree size (nodes)", this.leafNodeCount + this.innerNodeCount), new Measurement("tree size (leaves)", this.leafNodeCount), new Measurement("number of option nodes", this.optionNodeCount), // new Measurement("tree depth", this.maxDepth), // new Measurement("option count", this.countOptions()), // new Measurement("learning time", learnTime), // new Measurement("prediction time", predictTime), /*new Measurement("tree depth", measureTreeDepth()), new Measurement("active leaf byte size estimate", this.activeLeafByteSizeEstimate), new Measurement("inactive leaf byte size estimate", this.inactiveLeafByteSizeEstimate), new Measurement("byte size estimate overhead", this.byteSizeEstimateOverheadFraction), new Measurement("maximum prediction paths used", this.maxPredictionPaths) */ }; } public int calcByteSize() { int size = (int) SizeOf.sizeOf(this); if (this.treeRoot != null) { size += this.treeRoot.calcByteSize(); } return size; } @Override public void getModelDescription(StringBuilder out, int indent) { } @Override public double[] getVotesForInstance(Instance inst) { if (this.treeRoot != null) { double start = System.nanoTime(); double[] out = {this.treeRoot.getPrediction(inst, this)[0]}; predictTime += System.nanoTime() - start; return out; } return new double[0]; } //================= TRAIN and TEST ================// /** * Method for updating (training) the model using a new instance */ @Override public void trainOnInstanceImpl(Instance inst) { double start = System.nanoTime(); checkRoot(); treeRoot.processInstance(inst, this); for (InnerNode node : nodesToAdapt) { if (node.Adaptable) { maxID++; node.alternateTree = new ActiveLearningNode(maxID); node.alternateTree.Adaptable = false; node.alternateTree.Alternate = true; node.alternateTree.alternateTree = node; node.setAdaptable(false); // Reset the node statistics node.lossStatistics.setValue(0,0); node.lossStatistics.setValue(1,0); node.lossStatistics.setValue(2,0); node.PHmT = 0; node.PHMT = Double.MAX_VALUE; // TODO Does this reset too? node.weightSeen = 0; node.previousWeight = 0; } } nodesToAdapt = new ArrayList<InnerNode>(); learnTime += System.nanoTime() - start; } protected AttributeClassObserver newNumericClassObserver() { AttributeClassObserver numericClassObserver = (AttributeClassObserver) getPreparedClassOption(numericEstimatorOption); // FIXME fix this // AttributeClassObserver observer = new FIMTDDNumericAttributeClassObserver(); return (AttributeClassObserver) numericClassObserver; } public static double computeHoeffdingBound(double range, double confidence, double n) { return Math.sqrt(((range * range) * Math.log(1.0 / confidence)) / (2.0 * n)); } protected Node findWorstOption() { Stack<Node> stack = new Stack<Node>(); stack.add(this.treeRoot); double ratio = Double.MIN_VALUE; Node out = null; while (!stack.empty()) { Node node = stack.pop(); if (node.parent instanceof OptionNode) { OptionNode myParent = (OptionNode) node.parent; int myIndex = myParent.getChildIndex(node); double myRatio = myParent.getFFRatio(myIndex); if (myRatio > ratio) { ratio = myRatio; out = node; } } if (node instanceof InnerNode) { for (Node child : ((InnerNode) node).children) { stack.add(child); } } } return out; } protected void removeExcessTrees() { while (numTrees > MaxTreesOption.getValue()) { Node option = findWorstOption(); OptionNode parent = (OptionNode) option.parent; int index = parent.getChildIndex(option); if (parent.children.size() == 2) { parent.children.remove(index); for (Node chld : parent.children) { chld.parent = parent.parent; parent.parent.setChild(parent.parent.getChildIndex(parent), chld); } } else { AutoExpandVector<Node> children = new AutoExpandVector<Node>(); double[] optionFFSSL = new double[parent.children.size() - 1]; double[] optionFFSeen = new double[parent.children.size() - 1]; int seen = 0; for (int i = 0; i < parent.children.size() - 1; i++) { if (parent.getChild(i) != option) { children.add(parent.getChild(i)); optionFFSSL[i] = parent.optionFFSSL[i + seen]; optionFFSeen[i] = parent.optionFFSeen[i + seen]; } else { seen = 1; } } parent.children = children; parent.optionFFSSL = optionFFSSL; parent.optionFFSeen = optionFFSeen; assert parent.children.size() == parent.optionFFSSL.length; } numTrees--; } } }
Java
/* * HoeffdingTree.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import java.util.Arrays; import java.util.Comparator; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; import moa.AbstractMOAObject; import moa.classifiers.AbstractClassifier; import moa.classifiers.bayes.NaiveBayes; import moa.classifiers.core.attributeclassobservers.AttributeClassObserver; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.attributeclassobservers.DiscreteAttributeClassObserver; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.classifiers.core.attributeclassobservers.NullAttributeClassObserver; import moa.classifiers.core.attributeclassobservers.NumericAttributeClassObserver; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.Measurement; import moa.core.StringUtils; import moa.options.ClassOption; import moa.options.FlagOption; import moa.options.FloatOption; import moa.options.IntOption; import moa.core.SizeOf; import moa.options.*; import weka.core.Instance; import weka.core.Utils; /** * Hoeffding Tree or VFDT. * * A Hoeffding tree is an incremental, anytime decision tree induction algorithm * that is capable of learning from massive data streams, assuming that the * distribution generating examples does not change over time. Hoeffding trees * exploit the fact that a small sample can often be enough to choose an optimal * splitting attribute. This idea is supported mathematically by the Hoeffding * bound, which quantifies the number of observations (in our case, examples) * needed to estimate some statistics within a prescribed precision (in our * case, the goodness of an attribute).</p> <p>A theoretically appealing feature * of Hoeffding Trees not shared by other incremental decision tree learners is * that it has sound guarantees of performance. Using the Hoeffding bound one * can show that its output is asymptotically nearly identical to that of a * non-incremental learner using infinitely many examples. See for details:</p> * * <p>G. Hulten, L. Spencer, and P. Domingos. Mining time-changing data streams. * In KDD’01, pages 97–106, San Francisco, CA, 2001. ACM Press.</p> * * <p>Parameters:</p> <ul> <li> -m : Maximum memory consumed by the tree</li> * <li> -n : Numeric estimator to use : <ul> <li>Gaussian approximation * evaluating 10 splitpoints</li> <li>Gaussian approximation evaluating 100 * splitpoints</li> <li>Greenwald-Khanna quantile summary with 10 tuples</li> * <li>Greenwald-Khanna quantile summary with 100 tuples</li> * <li>Greenwald-Khanna quantile summary with 1000 tuples</li> <li>VFML method * with 10 bins</li> <li>VFML method with 100 bins</li> <li>VFML method with * 1000 bins</li> <li>Exhaustive binary tree</li> </ul> </li> <li> -e : How many * instances between memory consumption checks</li> <li> -g : The number of * instances a leaf should observe between split attempts</li> <li> -s : Split * criterion to use. Example : InfoGainSplitCriterion</li> <li> -c : The * allowable error in split decision, values closer to 0 will take longer to * decide</li> <li> -t : Threshold below which a split will be forced to break * ties</li> <li> -b : Only allow binary splits</li> <li> -z : Stop growing as * soon as memory limit is hit</li> <li> -r : Disable poor attributes</li> <li> * -p : Disable pre-pruning</li> * <li> -l : Leaf prediction to use: MajorityClass (MC), Naive Bayes (NB) or NaiveBayes * adaptive (NBAdaptive).</li> * <li> -q : The number of instances a leaf should observe before * permitting Naive Bayes</li> * </ul> * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class HoeffdingTree extends AbstractClassifier { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Hoeffding Tree or VFDT."; } public IntOption maxByteSizeOption = new IntOption("maxByteSize", 'm', "Maximum memory consumed by the tree.", 33554432, 0, Integer.MAX_VALUE); /* * public MultiChoiceOption numericEstimatorOption = new MultiChoiceOption( * "numericEstimator", 'n', "Numeric estimator to use.", new String[]{ * "GAUSS10", "GAUSS100", "GK10", "GK100", "GK1000", "VFML10", "VFML100", * "VFML1000", "BINTREE"}, new String[]{ "Gaussian approximation evaluating * 10 splitpoints", "Gaussian approximation evaluating 100 splitpoints", * "Greenwald-Khanna quantile summary with 10 tuples", "Greenwald-Khanna * quantile summary with 100 tuples", "Greenwald-Khanna quantile summary * with 1000 tuples", "VFML method with 10 bins", "VFML method with 100 * bins", "VFML method with 1000 bins", "Exhaustive binary tree"}, 0); */ public ClassOption numericEstimatorOption = new ClassOption("numericEstimator", 'n', "Numeric estimator to use.", NumericAttributeClassObserver.class, "GaussianNumericAttributeClassObserver"); public ClassOption nominalEstimatorOption = new ClassOption("nominalEstimator", 'd', "Nominal estimator to use.", DiscreteAttributeClassObserver.class, "NominalAttributeClassObserver"); public IntOption memoryEstimatePeriodOption = new IntOption( "memoryEstimatePeriod", 'e', "How many instances between memory consumption checks.", 1000000, 0, Integer.MAX_VALUE); public IntOption gracePeriodOption = new IntOption( "gracePeriod", 'g', "The number of instances a leaf should observe between split attempts.", 200, 0, Integer.MAX_VALUE); public ClassOption splitCriterionOption = new ClassOption("splitCriterion", 's', "Split criterion to use.", SplitCriterion.class, "InfoGainSplitCriterion"); public FloatOption splitConfidenceOption = new FloatOption( "splitConfidence", 'c', "The allowable error in split decision, values closer to 0 will take longer to decide.", 0.0000001, 0.0, 1.0); public FloatOption tieThresholdOption = new FloatOption("tieThreshold", 't', "Threshold below which a split will be forced to break ties.", 0.05, 0.0, 1.0); public FlagOption binarySplitsOption = new FlagOption("binarySplits", 'b', "Only allow binary splits."); public FlagOption stopMemManagementOption = new FlagOption( "stopMemManagement", 'z', "Stop growing as soon as memory limit is hit."); public FlagOption removePoorAttsOption = new FlagOption("removePoorAtts", 'r', "Disable poor attributes."); public FlagOption noPrePruneOption = new FlagOption("noPrePrune", 'p', "Disable pre-pruning."); public static class FoundNode { public Node node; public SplitNode parent; public int parentBranch; public FoundNode(Node node, SplitNode parent, int parentBranch) { this.node = node; this.parent = parent; this.parentBranch = parentBranch; } } public static class Node extends AbstractMOAObject { private static final long serialVersionUID = 1L; protected DoubleVector observedClassDistribution; public Node(double[] classObservations) { this.observedClassDistribution = new DoubleVector(classObservations); } public int calcByteSize() { return (int) (SizeOf.sizeOf(this) + SizeOf.fullSizeOf(this.observedClassDistribution)); } public int calcByteSizeIncludingSubtree() { return calcByteSize(); } public boolean isLeaf() { return true; } public FoundNode filterInstanceToLeaf(Instance inst, SplitNode parent, int parentBranch) { return new FoundNode(this, parent, parentBranch); } public double[] getObservedClassDistribution() { return this.observedClassDistribution.getArrayCopy(); } public double[] getClassVotes(Instance inst, HoeffdingTree ht) { return this.observedClassDistribution.getArrayCopy(); } public boolean observedClassDistributionIsPure() { return this.observedClassDistribution.numNonZeroEntries() < 2; } public void describeSubtree(HoeffdingTree ht, StringBuilder out, int indent) { StringUtils.appendIndented(out, indent, "Leaf "); out.append(ht.getClassNameString()); out.append(" = "); out.append(ht.getClassLabelString(this.observedClassDistribution.maxIndex())); out.append(" weights: "); this.observedClassDistribution.getSingleLineDescription(out, ht.treeRoot.observedClassDistribution.numValues()); StringUtils.appendNewline(out); } public int subtreeDepth() { return 0; } public double calculatePromise() { double totalSeen = this.observedClassDistribution.sumOfValues(); return totalSeen > 0.0 ? (totalSeen - this.observedClassDistribution.getValue(this.observedClassDistribution.maxIndex())) : 0.0; } @Override public void getDescription(StringBuilder sb, int indent) { describeSubtree(null, sb, indent); } } public static class SplitNode extends Node { private static final long serialVersionUID = 1L; protected InstanceConditionalTest splitTest; protected AutoExpandVector<Node> children; // = new AutoExpandVector<Node>(); @Override public int calcByteSize() { return super.calcByteSize() + (int) (SizeOf.sizeOf(this.children) + SizeOf.fullSizeOf(this.splitTest)); } @Override public int calcByteSizeIncludingSubtree() { int byteSize = calcByteSize(); for (Node child : this.children) { if (child != null) { byteSize += child.calcByteSizeIncludingSubtree(); } } return byteSize; } public SplitNode(InstanceConditionalTest splitTest, double[] classObservations, int size) { super(classObservations); this.splitTest = splitTest; this.children = new AutoExpandVector<Node>(size); } public SplitNode(InstanceConditionalTest splitTest, double[] classObservations) { super(classObservations); this.splitTest = splitTest; this.children = new AutoExpandVector<Node>(); } public int numChildren() { return this.children.size(); } public void setChild(int index, Node child) { if ((this.splitTest.maxBranches() >= 0) && (index >= this.splitTest.maxBranches())) { throw new IndexOutOfBoundsException(); } this.children.set(index, child); } public Node getChild(int index) { return this.children.get(index); } public int instanceChildIndex(Instance inst) { return this.splitTest.branchForInstance(inst); } @Override public boolean isLeaf() { return false; } @Override public FoundNode filterInstanceToLeaf(Instance inst, SplitNode parent, int parentBranch) { int childIndex = instanceChildIndex(inst); if (childIndex >= 0) { Node child = getChild(childIndex); if (child != null) { return child.filterInstanceToLeaf(inst, this, childIndex); } return new FoundNode(null, this, childIndex); } return new FoundNode(this, parent, parentBranch); } @Override public void describeSubtree(HoeffdingTree ht, StringBuilder out, int indent) { for (int branch = 0; branch < numChildren(); branch++) { Node child = getChild(branch); if (child != null) { StringUtils.appendIndented(out, indent, "if "); out.append(this.splitTest.describeConditionForBranch(branch, ht.getModelContext())); out.append(": "); StringUtils.appendNewline(out); child.describeSubtree(ht, out, indent + 2); } } } @Override public int subtreeDepth() { int maxChildDepth = 0; for (Node child : this.children) { if (child != null) { int depth = child.subtreeDepth(); if (depth > maxChildDepth) { maxChildDepth = depth; } } } return maxChildDepth + 1; } } public static abstract class LearningNode extends Node { private static final long serialVersionUID = 1L; public LearningNode(double[] initialClassObservations) { super(initialClassObservations); } public abstract void learnFromInstance(Instance inst, HoeffdingTree ht); } public static class InactiveLearningNode extends LearningNode { private static final long serialVersionUID = 1L; public InactiveLearningNode(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); } } public static class ActiveLearningNode extends LearningNode { private static final long serialVersionUID = 1L; protected double weightSeenAtLastSplitEvaluation; protected AutoExpandVector<AttributeClassObserver> attributeObservers = new AutoExpandVector<AttributeClassObserver>(); protected boolean isInitialized; public ActiveLearningNode(double[] initialClassObservations) { super(initialClassObservations); this.weightSeenAtLastSplitEvaluation = getWeightSeen(); this.isInitialized = false; } @Override public int calcByteSize() { return super.calcByteSize() + (int) (SizeOf.fullSizeOf(this.attributeObservers)); } @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { if (this.isInitialized == false) { this.attributeObservers = new AutoExpandVector<AttributeClassObserver>(inst.numAttributes()); this.isInitialized = true; } this.observedClassDistribution.addToValue((int) inst.classValue(), inst.weight()); for (int i = 0; i < inst.numAttributes() - 1; i++) { int instAttIndex = modelAttIndexToInstanceAttIndex(i, inst); AttributeClassObserver obs = this.attributeObservers.get(i); if (obs == null) { obs = inst.attribute(instAttIndex).isNominal() ? ht.newNominalClassObserver() : ht.newNumericClassObserver(); this.attributeObservers.set(i, obs); } obs.observeAttributeClass(inst.value(instAttIndex), (int) inst.classValue(), inst.weight()); } } public double getWeightSeen() { return this.observedClassDistribution.sumOfValues(); } public double getWeightSeenAtLastSplitEvaluation() { return this.weightSeenAtLastSplitEvaluation; } public void setWeightSeenAtLastSplitEvaluation(double weight) { this.weightSeenAtLastSplitEvaluation = weight; } public AttributeSplitSuggestion[] getBestSplitSuggestions( SplitCriterion criterion, HoeffdingTree ht) { List<AttributeSplitSuggestion> bestSuggestions = new LinkedList<AttributeSplitSuggestion>(); double[] preSplitDist = this.observedClassDistribution.getArrayCopy(); if (!ht.noPrePruneOption.isSet()) { // add null split as an option bestSuggestions.add(new AttributeSplitSuggestion(null, new double[0][], criterion.getMeritOfSplit( preSplitDist, new double[][]{preSplitDist}))); } for (int i = 0; i < this.attributeObservers.size(); i++) { AttributeClassObserver obs = this.attributeObservers.get(i); if (obs != null) { AttributeSplitSuggestion bestSuggestion = obs.getBestEvaluatedSplitSuggestion(criterion, preSplitDist, i, ht.binarySplitsOption.isSet()); if (bestSuggestion != null) { bestSuggestions.add(bestSuggestion); } } } return bestSuggestions.toArray(new AttributeSplitSuggestion[bestSuggestions.size()]); } public void disableAttribute(int attIndex) { this.attributeObservers.set(attIndex, new NullAttributeClassObserver()); } } protected Node treeRoot; protected int decisionNodeCount; protected int activeLeafNodeCount; protected int inactiveLeafNodeCount; protected double inactiveLeafByteSizeEstimate; protected double activeLeafByteSizeEstimate; protected double byteSizeEstimateOverheadFraction; protected boolean growthAllowed; public int calcByteSize() { int size = (int) SizeOf.sizeOf(this); if (this.treeRoot != null) { size += this.treeRoot.calcByteSizeIncludingSubtree(); } return size; } @Override public int measureByteSize() { return calcByteSize(); } @Override public void resetLearningImpl() { this.treeRoot = null; this.decisionNodeCount = 0; this.activeLeafNodeCount = 0; this.inactiveLeafNodeCount = 0; this.inactiveLeafByteSizeEstimate = 0.0; this.activeLeafByteSizeEstimate = 0.0; this.byteSizeEstimateOverheadFraction = 1.0; this.growthAllowed = true; if (this.leafpredictionOption.getChosenIndex()>0) { this.removePoorAttsOption = null; } } @Override public void trainOnInstanceImpl(Instance inst) { if (this.treeRoot == null) { this.treeRoot = newLearningNode(); this.activeLeafNodeCount = 1; } FoundNode foundNode = this.treeRoot.filterInstanceToLeaf(inst, null, -1); Node leafNode = foundNode.node; if (leafNode == null) { leafNode = newLearningNode(); foundNode.parent.setChild(foundNode.parentBranch, leafNode); this.activeLeafNodeCount++; } if (leafNode instanceof LearningNode) { LearningNode learningNode = (LearningNode) leafNode; learningNode.learnFromInstance(inst, this); if (this.growthAllowed && (learningNode instanceof ActiveLearningNode)) { ActiveLearningNode activeLearningNode = (ActiveLearningNode) learningNode; double weightSeen = activeLearningNode.getWeightSeen(); if (weightSeen - activeLearningNode.getWeightSeenAtLastSplitEvaluation() >= this.gracePeriodOption.getValue()) { attemptToSplit(activeLearningNode, foundNode.parent, foundNode.parentBranch); activeLearningNode.setWeightSeenAtLastSplitEvaluation(weightSeen); } } } if (this.trainingWeightSeenByModel % this.memoryEstimatePeriodOption.getValue() == 0) { estimateModelByteSizes(); } } @Override public double[] getVotesForInstance(Instance inst) { if (this.treeRoot != null) { FoundNode foundNode = this.treeRoot.filterInstanceToLeaf(inst, null, -1); Node leafNode = foundNode.node; if (leafNode == null) { leafNode = foundNode.parent; } return leafNode.getClassVotes(inst, this); } return new double[0]; } @Override protected Measurement[] getModelMeasurementsImpl() { return new Measurement[]{ new Measurement("tree size (nodes)", this.decisionNodeCount + this.activeLeafNodeCount + this.inactiveLeafNodeCount), new Measurement("tree size (leaves)", this.activeLeafNodeCount + this.inactiveLeafNodeCount), new Measurement("active learning leaves", this.activeLeafNodeCount), new Measurement("tree depth", measureTreeDepth()), new Measurement("active leaf byte size estimate", this.activeLeafByteSizeEstimate), new Measurement("inactive leaf byte size estimate", this.inactiveLeafByteSizeEstimate), new Measurement("byte size estimate overhead", this.byteSizeEstimateOverheadFraction)}; } public int measureTreeDepth() { if (this.treeRoot != null) { return this.treeRoot.subtreeDepth(); } return 0; } @Override public void getModelDescription(StringBuilder out, int indent) { this.treeRoot.describeSubtree(this, out, indent); } @Override public boolean isRandomizable() { return false; } public static double computeHoeffdingBound(double range, double confidence, double n) { return Math.sqrt(((range * range) * Math.log(1.0 / confidence)) / (2.0 * n)); } //Procedure added for Hoeffding Adaptive Trees (ADWIN) protected SplitNode newSplitNode(InstanceConditionalTest splitTest, double[] classObservations, int size) { return new SplitNode(splitTest, classObservations, size); } protected SplitNode newSplitNode(InstanceConditionalTest splitTest, double[] classObservations) { return new SplitNode(splitTest, classObservations); } protected AttributeClassObserver newNominalClassObserver() { AttributeClassObserver nominalClassObserver = (AttributeClassObserver) getPreparedClassOption(this.nominalEstimatorOption); return (AttributeClassObserver) nominalClassObserver.copy(); } protected AttributeClassObserver newNumericClassObserver() { AttributeClassObserver numericClassObserver = (AttributeClassObserver) getPreparedClassOption(this.numericEstimatorOption); return (AttributeClassObserver) numericClassObserver.copy(); } protected void attemptToSplit(ActiveLearningNode node, SplitNode parent, int parentIndex) { if (!node.observedClassDistributionIsPure()) { SplitCriterion splitCriterion = (SplitCriterion) getPreparedClassOption(this.splitCriterionOption); AttributeSplitSuggestion[] bestSplitSuggestions = node.getBestSplitSuggestions(splitCriterion, this); Arrays.sort(bestSplitSuggestions); boolean shouldSplit = false; if (bestSplitSuggestions.length < 2) { shouldSplit = bestSplitSuggestions.length > 0; } else { double hoeffdingBound = computeHoeffdingBound(splitCriterion.getRangeOfMerit(node.getObservedClassDistribution()), this.splitConfidenceOption.getValue(), node.getWeightSeen()); AttributeSplitSuggestion bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; AttributeSplitSuggestion secondBestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 2]; if ((bestSuggestion.merit - secondBestSuggestion.merit > hoeffdingBound) || (hoeffdingBound < this.tieThresholdOption.getValue())) { shouldSplit = true; } // } if ((this.removePoorAttsOption != null) && this.removePoorAttsOption.isSet()) { Set<Integer> poorAtts = new HashSet<Integer>(); // scan 1 - add any poor to set for (int i = 0; i < bestSplitSuggestions.length; i++) { if (bestSplitSuggestions[i].splitTest != null) { int[] splitAtts = bestSplitSuggestions[i].splitTest.getAttsTestDependsOn(); if (splitAtts.length == 1) { if (bestSuggestion.merit - bestSplitSuggestions[i].merit > hoeffdingBound) { poorAtts.add(new Integer(splitAtts[0])); } } } } // scan 2 - remove good ones from set for (int i = 0; i < bestSplitSuggestions.length; i++) { if (bestSplitSuggestions[i].splitTest != null) { int[] splitAtts = bestSplitSuggestions[i].splitTest.getAttsTestDependsOn(); if (splitAtts.length == 1) { if (bestSuggestion.merit - bestSplitSuggestions[i].merit < hoeffdingBound) { poorAtts.remove(new Integer(splitAtts[0])); } } } } for (int poorAtt : poorAtts) { node.disableAttribute(poorAtt); } } } if (shouldSplit) { AttributeSplitSuggestion splitDecision = bestSplitSuggestions[bestSplitSuggestions.length - 1]; if (splitDecision.splitTest == null) { // preprune - null wins deactivateLearningNode(node, parent, parentIndex); } else { SplitNode newSplit = newSplitNode(splitDecision.splitTest, node.getObservedClassDistribution(),splitDecision.numSplits() ); for (int i = 0; i < splitDecision.numSplits(); i++) { Node newChild = newLearningNode(splitDecision.resultingClassDistributionFromSplit(i)); newSplit.setChild(i, newChild); } this.activeLeafNodeCount--; this.decisionNodeCount++; this.activeLeafNodeCount += splitDecision.numSplits(); if (parent == null) { this.treeRoot = newSplit; } else { parent.setChild(parentIndex, newSplit); } } // manage memory enforceTrackerLimit(); } } } public void enforceTrackerLimit() { if ((this.inactiveLeafNodeCount > 0) || ((this.activeLeafNodeCount * this.activeLeafByteSizeEstimate + this.inactiveLeafNodeCount * this.inactiveLeafByteSizeEstimate) * this.byteSizeEstimateOverheadFraction > this.maxByteSizeOption.getValue())) { if (this.stopMemManagementOption.isSet()) { this.growthAllowed = false; return; } FoundNode[] learningNodes = findLearningNodes(); Arrays.sort(learningNodes, new Comparator<FoundNode>() { @Override public int compare(FoundNode fn1, FoundNode fn2) { return Double.compare(fn1.node.calculatePromise(), fn2.node.calculatePromise()); } }); int maxActive = 0; while (maxActive < learningNodes.length) { maxActive++; if ((maxActive * this.activeLeafByteSizeEstimate + (learningNodes.length - maxActive) * this.inactiveLeafByteSizeEstimate) * this.byteSizeEstimateOverheadFraction > this.maxByteSizeOption.getValue()) { maxActive--; break; } } int cutoff = learningNodes.length - maxActive; for (int i = 0; i < cutoff; i++) { if (learningNodes[i].node instanceof ActiveLearningNode) { deactivateLearningNode( (ActiveLearningNode) learningNodes[i].node, learningNodes[i].parent, learningNodes[i].parentBranch); } } for (int i = cutoff; i < learningNodes.length; i++) { if (learningNodes[i].node instanceof InactiveLearningNode) { activateLearningNode( (InactiveLearningNode) learningNodes[i].node, learningNodes[i].parent, learningNodes[i].parentBranch); } } } } public void estimateModelByteSizes() { FoundNode[] learningNodes = findLearningNodes(); long totalActiveSize = 0; long totalInactiveSize = 0; for (FoundNode foundNode : learningNodes) { if (foundNode.node instanceof ActiveLearningNode) { totalActiveSize += SizeOf.fullSizeOf(foundNode.node); } else { totalInactiveSize += SizeOf.fullSizeOf(foundNode.node); } } if (totalActiveSize > 0) { this.activeLeafByteSizeEstimate = (double) totalActiveSize / this.activeLeafNodeCount; } if (totalInactiveSize > 0) { this.inactiveLeafByteSizeEstimate = (double) totalInactiveSize / this.inactiveLeafNodeCount; } int actualModelSize = this.measureByteSize(); double estimatedModelSize = (this.activeLeafNodeCount * this.activeLeafByteSizeEstimate + this.inactiveLeafNodeCount * this.inactiveLeafByteSizeEstimate); this.byteSizeEstimateOverheadFraction = actualModelSize / estimatedModelSize; if (actualModelSize > this.maxByteSizeOption.getValue()) { enforceTrackerLimit(); } } public void deactivateAllLeaves() { FoundNode[] learningNodes = findLearningNodes(); for (int i = 0; i < learningNodes.length; i++) { if (learningNodes[i].node instanceof ActiveLearningNode) { deactivateLearningNode( (ActiveLearningNode) learningNodes[i].node, learningNodes[i].parent, learningNodes[i].parentBranch); } } } protected void deactivateLearningNode(ActiveLearningNode toDeactivate, SplitNode parent, int parentBranch) { Node newLeaf = new InactiveLearningNode(toDeactivate.getObservedClassDistribution()); if (parent == null) { this.treeRoot = newLeaf; } else { parent.setChild(parentBranch, newLeaf); } this.activeLeafNodeCount--; this.inactiveLeafNodeCount++; } protected void activateLearningNode(InactiveLearningNode toActivate, SplitNode parent, int parentBranch) { Node newLeaf = newLearningNode(toActivate.getObservedClassDistribution()); if (parent == null) { this.treeRoot = newLeaf; } else { parent.setChild(parentBranch, newLeaf); } this.activeLeafNodeCount++; this.inactiveLeafNodeCount--; } protected FoundNode[] findLearningNodes() { List<FoundNode> foundList = new LinkedList<FoundNode>(); findLearningNodes(this.treeRoot, null, -1, foundList); return foundList.toArray(new FoundNode[foundList.size()]); } protected void findLearningNodes(Node node, SplitNode parent, int parentBranch, List<FoundNode> found) { if (node != null) { if (node instanceof LearningNode) { found.add(new FoundNode(node, parent, parentBranch)); } if (node instanceof SplitNode) { SplitNode splitNode = (SplitNode) node; for (int i = 0; i < splitNode.numChildren(); i++) { findLearningNodes(splitNode.getChild(i), splitNode, i, found); } } } } public MultiChoiceOption leafpredictionOption = new MultiChoiceOption( "leafprediction", 'l', "Leaf prediction to use.", new String[]{ "MC", "NB", "NBAdaptive"}, new String[]{ "Majority class", "Naive Bayes", "Naive Bayes Adaptive"}, 2); public IntOption nbThresholdOption = new IntOption( "nbThreshold", 'q', "The number of instances a leaf should observe before permitting Naive Bayes.", 0, 0, Integer.MAX_VALUE); public static class LearningNodeNB extends ActiveLearningNode { private static final long serialVersionUID = 1L; public LearningNodeNB(double[] initialClassObservations) { super(initialClassObservations); } @Override public double[] getClassVotes(Instance inst, HoeffdingTree ht) { if (getWeightSeen() >= ht.nbThresholdOption.getValue()) { return NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } return super.getClassVotes(inst, ht); } @Override public void disableAttribute(int attIndex) { // should not disable poor atts - they are used in NB calc } } public static class LearningNodeNBAdaptive extends LearningNodeNB { private static final long serialVersionUID = 1L; protected double mcCorrectWeight = 0.0; protected double nbCorrectWeight = 0.0; public LearningNodeNBAdaptive(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingTree ht) { int trueClass = (int) inst.classValue(); if (this.observedClassDistribution.maxIndex() == trueClass) { this.mcCorrectWeight += inst.weight(); } if (Utils.maxIndex(NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers)) == trueClass) { this.nbCorrectWeight += inst.weight(); } super.learnFromInstance(inst, ht); } @Override public double[] getClassVotes(Instance inst, HoeffdingTree ht) { if (this.mcCorrectWeight > this.nbCorrectWeight) { return this.observedClassDistribution.getArrayCopy(); } return NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } } protected LearningNode newLearningNode() { return newLearningNode(new double[0]); } protected LearningNode newLearningNode(double[] initialClassObservations) { LearningNode ret; int predictionOption = this.leafpredictionOption.getChosenIndex(); if (predictionOption == 0) { //MC ret = new ActiveLearningNode(initialClassObservations); } else if (predictionOption == 1) { //NB ret = new LearningNodeNB(initialClassObservations); } else { //NBAdaptive ret = new LearningNodeNBAdaptive(initialClassObservations); } return ret; } }
Java
/* * AdaHoeffdingOptionTree.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.trees; import moa.classifiers.bayes.NaiveBayes; import weka.core.Instance; import weka.core.Utils; /** * Adaptive decision option tree for streaming data with adaptive Naive * Bayes classification at leaves. * An Adaptive Hoeffding Option Tree is a Hoeffding Option Tree with the * following improvement: each leaf stores an estimation of the current error. * It uses an EWMA estimator with alpha = .2. The weight of each node in the * voting process is proportional to the square of the inverse of the error. * <br/><br/> * Example:<br/> * <code>AdaHoeffdingOptionTree -o 50 </code> * Parameters:<ul> * <li>Same parameters as <code>HoeffdingOptionTreeNB<code></ul> * * @author Albert Bifet (abifet at cs dot waikato dot ac dot nz) * @version $Revision: 7 $ */ public class AdaHoeffdingOptionTree extends HoeffdingOptionTree { private static final long serialVersionUID = 1L; @Override public String getPurposeString() { return "Adaptive decision option tree for streaming data with adaptive Naive Bayes classification at leaves."; } public static class AdaLearningNode extends LearningNodeNB { private static final long serialVersionUID = 1L; protected double mcCorrectWeight = 0.0; protected double nbCorrectWeight = 0.0; protected double CorrectWeight = 0.0; protected double alpha = 0.2; public AdaLearningNode(double[] initialClassObservations) { super(initialClassObservations); } @Override public void learnFromInstance(Instance inst, HoeffdingOptionTree hot) { int trueClass = (int) inst.classValue(); boolean blCorrect = false; if (this.observedClassDistribution.maxIndex() == trueClass) { this.mcCorrectWeight += inst.weight(); if (this.mcCorrectWeight > this.nbCorrectWeight) { blCorrect = true; } } if (Utils.maxIndex(NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers)) == trueClass) { this.nbCorrectWeight += inst.weight(); if (this.mcCorrectWeight <= this.nbCorrectWeight) { blCorrect = true; } } if (blCorrect == true) { this.CorrectWeight += alpha * (1.0 - this.CorrectWeight); //EWMA } else { this.CorrectWeight -= alpha * this.CorrectWeight; //EWMA } super.learnFromInstance(inst, hot); } @Override public double[] getClassVotes(Instance inst, HoeffdingOptionTree ht) { double[] dist; if (this.mcCorrectWeight > this.nbCorrectWeight) { dist = this.observedClassDistribution.getArrayCopy(); } else { dist = NaiveBayes.doNaiveBayesPrediction(inst, this.observedClassDistribution, this.attributeObservers); } double distSum = Utils.sum(dist); if (distSum * (1.0 - this.CorrectWeight) * (1.0 - this.CorrectWeight) > 0.0) { Utils.normalize(dist, distSum * (1.0 - this.CorrectWeight) * (1.0 - this.CorrectWeight)); //Adding weight } return dist; } } @Override protected LearningNode newLearningNode(double[] initialClassObservations) { return new AdaLearningNode(initialClassObservations); } }
Java
/* * AbstractClassifier.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.Random; import moa.core.InstancesHeader; import moa.core.Measurement; import moa.core.ObjectRepository; import moa.core.StringUtils; import moa.gui.AWTRenderer; import moa.options.AbstractOptionHandler; import moa.options.IntOption; import moa.tasks.TaskMonitor; import weka.core.Instance; import weka.core.Instances; import weka.core.Utils; /** * Abstract Classifier. All learners for nominal prediction in * MOA extend this class. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public abstract class AbstractClassifier extends AbstractOptionHandler implements Classifier { @Override public String getPurposeString() { return "MOA Classifier: " + getClass().getCanonicalName(); } /** Header of the instances of the data stream */ protected InstancesHeader modelContext; /** Sum of the weights of the instances trained by this model */ protected double trainingWeightSeenByModel = 0.0; /** Random seed used in randomizable learners */ protected int randomSeed = 1; /** Option for randomizable learners to change the random seed */ public IntOption randomSeedOption; /** Random Generator used in randomizable learners */ public Random classifierRandom; /** * Creates an classifier and setups the random seed option * if the classifier is randomizable. */ public AbstractClassifier() { if (isRandomizable()) { this.randomSeedOption = new IntOption("randomSeed", 'r', "Seed for random behaviour of the classifier.", 1); } } @Override public void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { if (this.randomSeedOption != null) { this.randomSeed = this.randomSeedOption.getValue(); } if (!trainingHasStarted()) { resetLearning(); } } @Override public void setModelContext(InstancesHeader ih) { if ((ih != null) && (ih.classIndex() < 0)) { throw new IllegalArgumentException( "Context for a classifier must include a class to learn"); } if (trainingHasStarted() && (this.modelContext != null) && ((ih == null) || !contextIsCompatible(this.modelContext, ih))) { throw new IllegalArgumentException( "New context is not compatible with existing model"); } this.modelContext = ih; } @Override public InstancesHeader getModelContext() { return this.modelContext; } @Override public void setRandomSeed(int s) { this.randomSeed = s; if (this.randomSeedOption != null) { // keep option consistent this.randomSeedOption.setValue(s); } } @Override public boolean trainingHasStarted() { return this.trainingWeightSeenByModel > 0.0; } @Override public double trainingWeightSeenByModel() { return this.trainingWeightSeenByModel; } @Override public void resetLearning() { this.trainingWeightSeenByModel = 0.0; if (isRandomizable()) { this.classifierRandom = new Random(this.randomSeed); } resetLearningImpl(); } @Override public void trainOnInstance(Instance inst) { boolean isTraining = (inst.weight() > 0.0); if (this instanceof SemiSupervisedLearner == false && inst.classIsMissing() == true){ isTraining = false; } if (isTraining) { this.trainingWeightSeenByModel += inst.weight(); trainOnInstanceImpl(inst); } } @Override public Measurement[] getModelMeasurements() { List<Measurement> measurementList = new LinkedList<Measurement>(); measurementList.add(new Measurement("model training instances", trainingWeightSeenByModel())); measurementList.add(new Measurement("model serialized size (bytes)", measureByteSize())); Measurement[] modelMeasurements = getModelMeasurementsImpl(); if (modelMeasurements != null) { measurementList.addAll(Arrays.asList(modelMeasurements)); } // add average of sub-model measurements Classifier[] subModels = getSubClassifiers(); if ((subModels != null) && (subModels.length > 0)) { List<Measurement[]> subMeasurements = new LinkedList<Measurement[]>(); for (Classifier subModel : subModels) { if (subModel != null) { subMeasurements.add(subModel.getModelMeasurements()); } } Measurement[] avgMeasurements = Measurement.averageMeasurements(subMeasurements.toArray(new Measurement[subMeasurements.size()][])); measurementList.addAll(Arrays.asList(avgMeasurements)); } return measurementList.toArray(new Measurement[measurementList.size()]); } @Override public void getDescription(StringBuilder out, int indent) { StringUtils.appendIndented(out, indent, "Model type: "); out.append(this.getClass().getName()); StringUtils.appendNewline(out); Measurement.getMeasurementsDescription(getModelMeasurements(), out, indent); StringUtils.appendNewlineIndented(out, indent, "Model description:"); StringUtils.appendNewline(out); if (trainingHasStarted()) { getModelDescription(out, indent); } else { StringUtils.appendIndented(out, indent, "Model has not been trained."); } } @Override public Classifier[] getSubClassifiers() { return null; } @Override public Classifier copy() { return (Classifier) super.copy(); } @Override public boolean correctlyClassifies(Instance inst) { return Utils.maxIndex(getVotesForInstance(inst)) == (int) inst.classValue(); } /** * Gets the name of the attribute of the class from the header. * * @return the string with name of the attribute of the class */ public String getClassNameString() { return InstancesHeader.getClassNameString(this.modelContext); } /** * Gets the name of a label of the class from the header. * * @param classLabelIndex the label index * @return the name of the label of the class */ public String getClassLabelString(int classLabelIndex) { return InstancesHeader.getClassLabelString(this.modelContext, classLabelIndex); } /** * Gets the name of an attribute from the header. * * @param attIndex the attribute index * @return the name of the attribute */ public String getAttributeNameString(int attIndex) { return InstancesHeader.getAttributeNameString(this.modelContext, attIndex); } /** * Gets the name of a value of an attribute from the header. * * @param attIndex the attribute index * @param valIndex the value of the attribute * @return the name of the value of the attribute */ public String getNominalValueString(int attIndex, int valIndex) { return InstancesHeader.getNominalValueString(this.modelContext, attIndex, valIndex); } /** * Returns if two contexts or headers of instances are compatible.<br><br> * * Two contexts are compatible if they follow the following rules:<br> * Rule 1: num classes can increase but never decrease<br> * Rule 2: num attributes can increase but never decrease<br> * Rule 3: num nominal attribute values can increase but never decrease<br> * Rule 4: attribute types must stay in the same order (although class * can move; is always skipped over)<br><br> * * Attribute names are free to change, but should always still represent * the original attributes. * * @param originalContext the first context to compare * @param newContext the second context to compare * @return true if the two contexts are compatible. */ public static boolean contextIsCompatible(InstancesHeader originalContext, InstancesHeader newContext) { if (newContext.numClasses() < originalContext.numClasses()) { return false; // rule 1 } if (newContext.numAttributes() < originalContext.numAttributes()) { return false; // rule 2 } int oPos = 0; int nPos = 0; while (oPos < originalContext.numAttributes()) { if (oPos == originalContext.classIndex()) { oPos++; if (!(oPos < originalContext.numAttributes())) { break; } } if (nPos == newContext.classIndex()) { nPos++; } if (originalContext.attribute(oPos).isNominal()) { if (!newContext.attribute(nPos).isNominal()) { return false; // rule 4 } if (newContext.attribute(nPos).numValues() < originalContext.attribute(oPos).numValues()) { return false; // rule 3 } } else { assert (originalContext.attribute(oPos).isNumeric()); if (!newContext.attribute(nPos).isNumeric()) { return false; // rule 4 } } oPos++; nPos++; } return true; // all checks clear } /** * Returns the AWT Renderer * * @return the AWT Renderer */ @Override public AWTRenderer getAWTRenderer() { // TODO should return a default renderer here // - or should null be interpreted as the default? return null; } /** * Resets this classifier. It must be similar to * starting a new classifier from scratch. <br><br> * * The reason for ...Impl methods: ease programmer burden by not requiring * them to remember calls to super in overridden methods. * Note that this will produce compiler errors if not overridden. */ public abstract void resetLearningImpl(); /** * Trains this classifier incrementally using the given instance.<br><br> * * The reason for ...Impl methods: ease programmer burden by not requiring * them to remember calls to super in overridden methods. * Note that this will produce compiler errors if not overridden. * * @param inst the instance to be used for training */ public abstract void trainOnInstanceImpl(Instance inst); /** * Gets the current measurements of this classifier.<br><br> * * The reason for ...Impl methods: ease programmer burden by not requiring * them to remember calls to super in overridden methods. * Note that this will produce compiler errors if not overridden. * * @return an array of measurements to be used in evaluation tasks */ protected abstract Measurement[] getModelMeasurementsImpl(); /** * Returns a string representation of the model. * * @param out the stringbuilder to add the description * @param indent the number of characters to indent */ public abstract void getModelDescription(StringBuilder out, int indent); /** * Gets the index of the attribute in the instance, * given the index of the attribute in the learner. * * @param index the index of the attribute in the learner * @param inst the instance * @return the index in the instance */ protected static int modelAttIndexToInstanceAttIndex(int index, Instance inst) { return inst.classIndex() > index ? index : index + 1; } /** * Gets the index of the attribute in a set of instances, * given the index of the attribute in the learner. * * @param index the index of the attribute in the learner * @param insts the instances * @return the index of the attribute in the instances */ protected static int modelAttIndexToInstanceAttIndex(int index, Instances insts) { return insts.classIndex() > index ? index : index + 1; } }
Java
/* * AttributeSplitSuggestion.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core; import moa.classifiers.core.conditionaltests.InstanceConditionalTest; import moa.AbstractMOAObject; /** * Class for computing attribute split suggestions given a split test. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class AttributeSplitSuggestion extends AbstractMOAObject implements Comparable<AttributeSplitSuggestion> { private static final long serialVersionUID = 1L; public InstanceConditionalTest splitTest; public double[][] resultingClassDistributions; public double merit; public AttributeSplitSuggestion(InstanceConditionalTest splitTest, double[][] resultingClassDistributions, double merit) { this.splitTest = splitTest; this.resultingClassDistributions = resultingClassDistributions.clone(); this.merit = merit; } public int numSplits() { return this.resultingClassDistributions.length; } public double[] resultingClassDistributionFromSplit(int splitIndex) { return this.resultingClassDistributions[splitIndex].clone(); } @Override public int compareTo(AttributeSplitSuggestion comp) { return Double.compare(this.merit, comp.merit); } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } }
Java
/* * InstanceConditionalBinaryTest.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.conditionaltests; /** * Abstract binary conditional test for instances to use to split nodes in Hoeffding trees. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public abstract class InstanceConditionalBinaryTest extends InstanceConditionalTest { @Override public int maxBranches() { return 2; } }
Java
/* * InstanceConditionalTest.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.conditionaltests; import moa.AbstractMOAObject; import moa.core.InstancesHeader; import weka.core.Instance; /** * Abstract conditional test for instances to use to split nodes in Hoeffding trees. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public abstract class InstanceConditionalTest extends AbstractMOAObject { /** * Returns the number of the branch for an instance, -1 if unknown. * * @param inst the instance to be used * @return the number of the branch for an instance, -1 if unknown. */ public abstract int branchForInstance(Instance inst); /** * Gets whether the number of the branch for an instance is known. * * @param inst * @return true if the number of the branch for an instance is known */ public boolean resultKnownForInstance(Instance inst) { return branchForInstance(inst) >= 0; } /** * Gets the number of maximum branches, -1 if unknown. * * @return the number of maximum branches, -1 if unknown.. */ public abstract int maxBranches(); /** * Gets the text that describes the condition of a branch. It is used to describe the branch. * * @param branch the number of the branch to describe * @param context the context or header of the data stream * @return the text that describes the condition of the branch */ public abstract String describeConditionForBranch(int branch, InstancesHeader context); /** * Returns an array with the attributes that the test depends on. * * @return an array with the attributes that the test depends on */ public abstract int[] getAttsTestDependsOn(); }
Java
/* * NominalAttributeBinaryTest.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.conditionaltests; import moa.core.InstancesHeader; import weka.core.Instance; /** * Nominal binary conditional test for instances to use to split nodes in Hoeffding trees. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class NominalAttributeBinaryTest extends InstanceConditionalBinaryTest { private static final long serialVersionUID = 1L; protected int attIndex; protected int attValue; public NominalAttributeBinaryTest(int attIndex, int attValue) { this.attIndex = attIndex; this.attValue = attValue; } @Override public int branchForInstance(Instance inst) { int instAttIndex = this.attIndex < inst.classIndex() ? this.attIndex : this.attIndex + 1; return inst.isMissing(instAttIndex) ? -1 : ((int) inst.value(instAttIndex) == this.attValue ? 0 : 1); } @Override public String describeConditionForBranch(int branch, InstancesHeader context) { if ((branch == 0) || (branch == 1)) { return InstancesHeader.getAttributeNameString(context, this.attIndex) + (branch == 0 ? " = " : " != ") + InstancesHeader.getNominalValueString(context, this.attIndex, this.attValue); } throw new IndexOutOfBoundsException(); } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override public int[] getAttsTestDependsOn() { return new int[]{this.attIndex}; } }
Java
/* * NumericAttributeBinaryTest.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.conditionaltests; import moa.core.InstancesHeader; import weka.core.Instance; /** * Numeric binary conditional test for instances to use to split nodes in Hoeffding trees. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class NumericAttributeBinaryTest extends InstanceConditionalBinaryTest { private static final long serialVersionUID = 1L; protected int attIndex; protected double attValue; protected boolean equalsPassesTest; public NumericAttributeBinaryTest(int attIndex, double attValue, boolean equalsPassesTest) { this.attIndex = attIndex; this.attValue = attValue; this.equalsPassesTest = equalsPassesTest; } @Override public int branchForInstance(Instance inst) { int instAttIndex = this.attIndex < inst.classIndex() ? this.attIndex : this.attIndex + 1; if (inst.isMissing(instAttIndex)) { return -1; } double v = inst.value(instAttIndex); if (v == this.attValue) { return this.equalsPassesTest ? 0 : 1; } return v < this.attValue ? 0 : 1; } @Override public String describeConditionForBranch(int branch, InstancesHeader context) { if ((branch == 0) || (branch == 1)) { char compareChar = branch == 0 ? '<' : '>'; int equalsBranch = this.equalsPassesTest ? 0 : 1; return InstancesHeader.getAttributeNameString(context, this.attIndex) + ' ' + compareChar + (branch == equalsBranch ? "= " : " ") + InstancesHeader.getNumericValueString(context, this.attIndex, this.attValue); } throw new IndexOutOfBoundsException(); } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override public int[] getAttsTestDependsOn() { return new int[]{this.attIndex}; } public double getSplitValue() { return this.attValue; } }
Java
/* * NominalAttributeMultiwayTest.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.conditionaltests; import moa.core.InstancesHeader; import weka.core.Instance; /** * Nominal multi way conditional test for instances to use to split nodes in Hoeffding trees. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class NominalAttributeMultiwayTest extends InstanceConditionalTest { private static final long serialVersionUID = 1L; protected int attIndex; public NominalAttributeMultiwayTest(int attIndex) { this.attIndex = attIndex; } @Override public int branchForInstance(Instance inst) { int instAttIndex = this.attIndex < inst.classIndex() ? this.attIndex : this.attIndex + 1; return inst.isMissing(instAttIndex) ? -1 : (int) inst.value(instAttIndex); } @Override public String describeConditionForBranch(int branch, InstancesHeader context) { return InstancesHeader.getAttributeNameString(context, this.attIndex) + " = " + InstancesHeader.getNominalValueString(context, this.attIndex, branch); } @Override public int maxBranches() { return -1; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override public int[] getAttsTestDependsOn() { return new int[]{this.attIndex}; } }
Java
/* * SplitCriterion.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.splitcriteria; import moa.options.OptionHandler; /** * Interface for computing splitting criteria. * with respect to distributions of class values. * The split criterion is used as a parameter on * decision trees and decision stumps. * The two split criteria most used are * Information Gain and Gini. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface SplitCriterion extends OptionHandler { /** * Computes the merit of splitting for a given * ditribution before the split and after it. * * @param preSplitDist the class distribution before the split * @param postSplitDist the class distribution after the split * @return value of the merit of splitting */ public double getMeritOfSplit(double[] preSplitDist, double[][] postSplitDists); /** * Computes the range of splitting merit * * @param preSplitDist the class distribution before the split * @return value of the range of splitting merit */ public double getRangeOfMerit(double[] preSplitDist); }
Java
/* * InfoGainSplitCriterion.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.splitcriteria; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; import moa.options.FloatOption; import moa.tasks.TaskMonitor; import weka.core.Utils; /** * Class for computing splitting criteria using information gain * with respect to distributions of class values. * The split criterion is used as a parameter on * decision trees and decision stumps. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class InfoGainSplitCriterion extends AbstractOptionHandler implements SplitCriterion { private static final long serialVersionUID = 1L; public FloatOption minBranchFracOption = new FloatOption("minBranchFrac", 'f', "Minimum fraction of weight required down at least two branches.", 0.01, 0.0, 0.5); @Override public double getMeritOfSplit(double[] preSplitDist, double[][] postSplitDists) { if (numSubsetsGreaterThanFrac(postSplitDists, this.minBranchFracOption.getValue()) < 2) { return Double.NEGATIVE_INFINITY; } return computeEntropy(preSplitDist) - computeEntropy(postSplitDists); } @Override public double getRangeOfMerit(double[] preSplitDist) { int numClasses = preSplitDist.length > 2 ? preSplitDist.length : 2; return Utils.log2(numClasses); } public static double computeEntropy(double[] dist) { double entropy = 0.0; double sum = 0.0; for (double d : dist) { if (d > 0.0) { // TODO: how small can d be before log2 overflows? entropy -= d * Utils.log2(d); sum += d; } } return sum > 0.0 ? (entropy + sum * Utils.log2(sum)) / sum : 0.0; } public static double computeEntropy(double[][] dists) { double totalWeight = 0.0; double[] distWeights = new double[dists.length]; for (int i = 0; i < dists.length; i++) { distWeights[i] = Utils.sum(dists[i]); totalWeight += distWeights[i]; } double entropy = 0.0; for (int i = 0; i < dists.length; i++) { entropy += distWeights[i] * computeEntropy(dists[i]); } return entropy / totalWeight; } public static int numSubsetsGreaterThanFrac(double[][] distributions, double minFrac) { double totalWeight = 0.0; double[] distSums = new double[distributions.length]; for (int i = 0; i < distSums.length; i++) { for (int j = 0; j < distributions[i].length; j++) { distSums[i] += distributions[i][j]; } totalWeight += distSums[i]; } int numGreater = 0; for (double d : distSums) { double frac = d / totalWeight; if (frac > minFrac) { numGreater++; } } return numGreater; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } }
Java
/* * SDRSplitCriterion.java * Copyright (C) 2013 University of Porto, Portugal * @author Katie de Lange, E. Almeida, J. Gama * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * */ /* Project Knowledge Discovery from Data Streams, FCT LIAAD-INESC TEC, * * Contact: jgama@fep.up.pt */ package moa.classifiers.core.splitcriteria; public class SDRSplitCriterion extends VarianceReductionSplitCriterion { private static final long serialVersionUID = 1L; public static double computeSD(double[] dist) { int N = (int)dist[0]; double sum = dist[1]; double sumSq = dist[2]; return Math.sqrt((sumSq - ((sum * sum)/N))/N); } }
Java
/* * VarianceReductionSplitCriterion.java * Copyright (C) 2013 University of Porto, Portugal * @author Katie de Lange, E. Almeida, J. Gama * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * */ /* Project Knowledge Discovery from Data Streams, FCT LIAAD-INESC TEC, * * Contact: jgama@fep.up.pt */ package moa.classifiers.core.splitcriteria; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; import moa.tasks.TaskMonitor; public class VarianceReductionSplitCriterion extends AbstractOptionHandler implements SplitCriterion { private static final long serialVersionUID = 1L; /* @Override public double getMeritOfSplit(double[] preSplitDist, double[][] postSplitDists) { double N = preSplitDist[0]; double SDR = computeSD(preSplitDist); // System.out.print("postSplitDists.length"+postSplitDists.length+"\n"); for(int i = 0; i < postSplitDists.length; i++) { double Ni = postSplitDists[i][0]; SDR -= (Ni/N)*computeSD(postSplitDists[i]); } return SDR; }*/ @Override public double getMeritOfSplit(double[] preSplitDist, double[][] postSplitDists) { double SDR=0.0; double N = preSplitDist[0]; int count = 0; for(int i = 0; i < postSplitDists.length; i++) { double Ni = postSplitDists[i][0]; if(Ni >=5.0){ count = count +1; } } if(count == postSplitDists.length){ SDR = computeSD(preSplitDist); for(int i = 0; i < postSplitDists.length; i++) { double Ni = postSplitDists[i][0]; SDR -= (Ni/N)*computeSD(postSplitDists[i]); } } return SDR; } @Override public double getRangeOfMerit(double[] preSplitDist) { return 1; } public static double computeSD(double[] dist) { int N = (int)dist[0]; double sum = dist[1]; double sumSq = dist[2]; // return Math.sqrt((sumSq - ((sum * sum)/N))/N); return (sumSq - ((sum * sum)/N))/N; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } }
Java
/* * InfoGainSplitCriterionMultilabel.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Jesse Read (jesse@tsc.uc3m.es) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.splitcriteria; import weka.core.Utils; /** * Class for computing splitting criteria using information gain with respect to * distributions of class values for Multilabel data. The split criterion is * used as a parameter on decision trees and decision stumps. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Jesse Read (jesse@tsc.uc3m.es) * @version $Revision: 1 $ */ public class InfoGainSplitCriterionMultilabel extends InfoGainSplitCriterion { private static final long serialVersionUID = 1L; public static double computeEntropy(double[] dist) { double entropy = 0.0; double sum = 0.0; for (double d : dist) { sum += d; } if (sum > 0.0) { for (double num : dist) { double d = num / sum; if (d > 0.0) { // TODO: how small can d be before log2 overflows? entropy -= d * Utils.log2(d) + (1 - d) * Utils.log2(1 - d); //Extension to Multilabel } } } return sum > 0.0 ? entropy : 0.0; } }
Java
/* * GiniSplitCriterion.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.splitcriteria; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; import moa.tasks.TaskMonitor; import weka.core.Utils; /** * Class for computing splitting criteria using Gini * with respect to distributions of class values. * The split criterion is used as a parameter on * decision trees and decision stumps. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class GiniSplitCriterion extends AbstractOptionHandler implements SplitCriterion { private static final long serialVersionUID = 1L; @Override public double getMeritOfSplit(double[] preSplitDist, double[][] postSplitDists) { double totalWeight = 0.0; double[] distWeights = new double[postSplitDists.length]; for (int i = 0; i < postSplitDists.length; i++) { distWeights[i] = Utils.sum(postSplitDists[i]); totalWeight += distWeights[i]; } double gini = 0.0; for (int i = 0; i < postSplitDists.length; i++) { gini += (distWeights[i] / totalWeight) * computeGini(postSplitDists[i], distWeights[i]); } return 1.0 - gini; } @Override public double getRangeOfMerit(double[] preSplitDist) { return 1.0; } public static double computeGini(double[] dist, double distSumOfWeights) { double gini = 1.0; for (int i = 0; i < dist.length; i++) { double relFreq = dist[i] / distSumOfWeights; gini -= relFreq * relFreq; } return gini; } public static double computeGini(double[] dist) { return computeGini(dist, Utils.sum(dist)); } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } }
Java
/* * GreenwaldKhannaNumericAttributeClassObserver.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.attributeclassobservers; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.conditionaltests.NumericAttributeBinaryTest; import moa.classifiers.core.splitcriteria.SplitCriterion; import weka.core.Utils; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.GreenwaldKhannaQuantileSummary; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; import moa.options.IntOption; import moa.tasks.TaskMonitor; /** * Class for observing the class data distribution for a numeric attribute using Greenwald and Khanna methodology. * This observer monitors the class distribution of a given attribute. * Used in naive Bayes and decision trees to monitor data statistics on leaves. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class GreenwaldKhannaNumericAttributeClassObserver extends AbstractOptionHandler implements NumericAttributeClassObserver { private static final long serialVersionUID = 1L; protected AutoExpandVector<GreenwaldKhannaQuantileSummary> attValDistPerClass = new AutoExpandVector<GreenwaldKhannaQuantileSummary>(); public IntOption numTuplesOption = new IntOption("numTuples", 'n', "The number of tuples.", 10, 1, Integer.MAX_VALUE); @Override public void observeAttributeClass(double attVal, int classVal, double weight) { if (Utils.isMissingValue(attVal)) { } else { GreenwaldKhannaQuantileSummary valDist = this.attValDistPerClass.get(classVal); if (valDist == null) { valDist = new GreenwaldKhannaQuantileSummary(this.numTuplesOption.getValue()); this.attValDistPerClass.set(classVal, valDist); } // TODO: not taking weight into account valDist.insert(attVal); } } @Override public double probabilityOfAttributeValueGivenClass(double attVal, int classVal) { // TODO: NaiveBayes broken until implemented return 0.0; } @Override public AttributeSplitSuggestion getBestEvaluatedSplitSuggestion( SplitCriterion criterion, double[] preSplitDist, int attIndex, boolean binaryOnly) { AttributeSplitSuggestion bestSuggestion = null; for (GreenwaldKhannaQuantileSummary qs : this.attValDistPerClass) { if (qs != null) { double[] cutpoints = qs.getSuggestedCutpoints(); for (double cutpoint : cutpoints) { double[][] postSplitDists = getClassDistsResultingFromBinarySplit(cutpoint); double merit = criterion.getMeritOfSplit(preSplitDist, postSplitDists); if ((bestSuggestion == null) || (merit > bestSuggestion.merit)) { bestSuggestion = new AttributeSplitSuggestion( new NumericAttributeBinaryTest(attIndex, cutpoint, true), postSplitDists, merit); } } } } return bestSuggestion; } // assume all values equal to splitValue go to lhs public double[][] getClassDistsResultingFromBinarySplit(double splitValue) { DoubleVector lhsDist = new DoubleVector(); DoubleVector rhsDist = new DoubleVector(); for (int i = 0; i < this.attValDistPerClass.size(); i++) { GreenwaldKhannaQuantileSummary estimator = this.attValDistPerClass.get(i); if (estimator != null) { long countBelow = estimator.getCountBelow(splitValue); lhsDist.addToValue(i, countBelow); rhsDist.addToValue(i, estimator.getTotalCount() - countBelow); } } return new double[][]{lhsDist.getArrayRef(), rhsDist.getArrayRef()}; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } @Override public void observeAttributeTarget(double attVal, double target) { throw new UnsupportedOperationException("Not supported yet."); } }
Java
/* * DiscreteAttributeClassObserver.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.attributeclassobservers; /** * Interface for observing the class data distribution for a discrete (nominal) attribute. * This observer monitors the class distribution of a given attribute. * Used in naive Bayes and decision trees to monitor data statistics on leaves. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface DiscreteAttributeClassObserver extends AttributeClassObserver { }
Java
/* * FIMTDDNumericAttributeClassObserver.java * Copyright (C) 2013 University of Porto, Portugal * @author Katie de Lange, E. Almeida, J. Gama * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * */ /* Project Knowledge Discovery from Data Streams, FCT LIAAD-INESC TEC, * * Contact: jgama@fep.up.pt */ package moa.classifiers.core.attributeclassobservers; import java.io.Serializable; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.conditionaltests.NumericAttributeBinaryTest; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.DoubleVector; import moa.core.ObjectRepository; import moa.tasks.TaskMonitor; public class FIMTDDNumericAttributeClassObserver extends BinaryTreeNumericAttributeClassObserver implements NumericAttributeClassObserver { private static final long serialVersionUID = 1L; protected class Node implements Serializable { private static final long serialVersionUID = 1L; // The split point to use public double cut_point; // E-BST statistics public DoubleVector leftStatistics = new DoubleVector(); public DoubleVector rightStatistics = new DoubleVector(); // Child nodes public Node left; public Node right; public Node(double val, double label, double weight) { this.cut_point = val; this.leftStatistics.addToValue(0, 1); this.leftStatistics.addToValue(1, label); this.leftStatistics.addToValue(2, label * label); } /** * Insert a new value into the tree, updating both the sum of values and * sum of squared values arrays */ public void insertValue(double val, double label, double weight) { // If the new value equals the value stored in a node, update // the left (<=) node information if (val == this.cut_point) { this.leftStatistics.addToValue(0, 1); this.leftStatistics.addToValue(1, label); this.leftStatistics.addToValue(2, label * label); } // If the new value is less than the value in a node, update the // left distribution and send the value down to the left child node. // If no left child exists, create one else if (val <= this.cut_point) { this.leftStatistics.addToValue(0, 1); this.leftStatistics.addToValue(1, label); this.leftStatistics.addToValue(2, label * label); if (this.left == null) { this.left = new Node(val, label, weight); } else { this.left.insertValue(val, label, weight); } } // If the new value is greater than the value in a node, update the // right (>) distribution and send the value down to the right child node. // If no right child exists, create one else { // val > cut_point this.rightStatistics.addToValue(0, 1); this.rightStatistics.addToValue(1, label); this.rightStatistics.addToValue(2, label * label); if (this.right == null) { this.right = new Node(val, label, weight); } else { this.right.insertValue(val, label, weight); } } } } // Root node of the E-BST structure for this attribute protected Node root = null; // Global variables for use in the FindBestSplit algorithm double sumTotalLeft; double sumTotalRight; double sumSqTotalLeft; double sumSqTotalRight; double countRightTotal; double countLeftTotal; public void observeAttributeClass(double attVal, double classVal, double weight) { if (Double.isNaN(attVal)) { //Instance.isMissingValue(attVal) } else { if (this.root == null) { this.root = new Node(attVal, classVal, weight); } else { this.root.insertValue(attVal, classVal, weight); } } } @Override public double probabilityOfAttributeValueGivenClass(double attVal, int classVal) { // TODO: NaiveBayes broken until implemented return 0.0; } @Override public AttributeSplitSuggestion getBestEvaluatedSplitSuggestion(SplitCriterion criterion, double[] preSplitDist, int attIndex, boolean binaryOnly) { // Initialise global variables sumTotalLeft = 0; sumTotalRight = preSplitDist[1]; sumSqTotalLeft = 0; sumSqTotalRight = preSplitDist[2]; countLeftTotal = 0; countRightTotal = preSplitDist[0]; return searchForBestSplitOption(this.root, null, criterion, attIndex); } /** * Implementation of the FindBestSplit algorithm from E.Ikonomovska et al. */ protected AttributeSplitSuggestion searchForBestSplitOption(Node currentNode, AttributeSplitSuggestion currentBestOption, SplitCriterion criterion, int attIndex) { // Return null if the current node is null or we have finished looking through all the possible splits if (currentNode == null || countRightTotal == 0.0) { return currentBestOption; } if (currentNode.left != null) { currentBestOption = searchForBestSplitOption(currentNode.left, currentBestOption, criterion, attIndex); } sumTotalLeft += currentNode.leftStatistics.getValue(1); sumTotalRight -= currentNode.leftStatistics.getValue(1); sumSqTotalLeft += currentNode.leftStatistics.getValue(2); sumSqTotalRight -= currentNode.leftStatistics.getValue(2); countLeftTotal += currentNode.leftStatistics.getValue(0); countRightTotal -= currentNode.leftStatistics.getValue(0); double[][] postSplitDists = new double[][]{{countLeftTotal, sumTotalLeft, sumSqTotalLeft}, {countRightTotal, sumTotalRight, sumSqTotalRight}}; double[] preSplitDist = new double[]{(countLeftTotal + countRightTotal), (sumTotalLeft + sumTotalRight), (sumSqTotalLeft + sumSqTotalRight)}; double merit = criterion.getMeritOfSplit(preSplitDist, postSplitDists); if ((currentBestOption == null) || (merit > currentBestOption.merit)) { currentBestOption = new AttributeSplitSuggestion( new NumericAttributeBinaryTest(attIndex, currentNode.cut_point, true), postSplitDists, merit); } if (currentNode.right != null) { currentBestOption = searchForBestSplitOption(currentNode.right, currentBestOption, criterion, attIndex); } sumTotalLeft -= currentNode.leftStatistics.getValue(1); sumTotalRight += currentNode.leftStatistics.getValue(1); sumSqTotalLeft -= currentNode.leftStatistics.getValue(2); sumSqTotalRight += currentNode.leftStatistics.getValue(2); countLeftTotal -= currentNode.leftStatistics.getValue(0); countRightTotal += currentNode.leftStatistics.getValue(0); return currentBestOption; } /** * A method to remove all nodes in the E-BST in which it and all it's * children represent 'bad' split points */ public void removeBadSplits(SplitCriterion criterion, double lastCheckRatio, double lastCheckSDR, double lastCheckE) { removeBadSplitNodes(criterion, this.root, lastCheckRatio, lastCheckSDR, lastCheckE); } /** * Recursive method that first checks all of a node's children before * deciding if it is 'bad' and may be removed */ private boolean removeBadSplitNodes(SplitCriterion criterion, Node currentNode, double lastCheckRatio, double lastCheckSDR, double lastCheckE) { boolean isBad = false; if (currentNode == null) { return true; } if (currentNode.left != null) { isBad = removeBadSplitNodes(criterion, currentNode.left, lastCheckRatio, lastCheckSDR, lastCheckE); } if (currentNode.right != null && isBad) { isBad = removeBadSplitNodes(criterion, currentNode.left, lastCheckRatio, lastCheckSDR, lastCheckE); } if (isBad) { double[][] postSplitDists = new double[][]{{currentNode.leftStatistics.getValue(0), currentNode.leftStatistics.getValue(1), currentNode.leftStatistics.getValue(2)}, {currentNode.rightStatistics.getValue(0), currentNode.rightStatistics.getValue(1), currentNode.rightStatistics.getValue(2)}}; double[] preSplitDist = new double[]{(currentNode.leftStatistics.getValue(0) + currentNode.rightStatistics.getValue(0)), (currentNode.leftStatistics.getValue(1) + currentNode.rightStatistics.getValue(1)), (currentNode.leftStatistics.getValue(2) + currentNode.rightStatistics.getValue(2))}; double merit = criterion.getMeritOfSplit(preSplitDist, postSplitDists); if ((merit / lastCheckSDR) < (lastCheckRatio - (2 * lastCheckE))) { currentNode = null; return true; } } return false; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } }
Java
/* * VFMLNumericAttributeClassObserver.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.attributeclassobservers; import weka.core.Utils; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.conditionaltests.NumericAttributeBinaryTest; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.DoubleVector; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; import moa.options.IntOption; import moa.tasks.TaskMonitor; /** * Class for observing the class data distribution for a numeric attribute as in VFML. * Used in naive Bayes and decision trees to monitor data statistics on leaves. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class VFMLNumericAttributeClassObserver extends AbstractOptionHandler implements NumericAttributeClassObserver { private static final long serialVersionUID = 1L; @Override public void observeAttributeTarget(double attVal, double target) { throw new UnsupportedOperationException("Not supported yet."); } protected class Bin implements Serializable { private static final long serialVersionUID = 1L; public double lowerBound, upperBound; public DoubleVector classWeights = new DoubleVector(); public int boundaryClass; public double boundaryWeight; } protected List<Bin> binList = new ArrayList<Bin>(); public IntOption numBinsOption = new IntOption("numBins", 'n', "The number of bins.", 10, 1, Integer.MAX_VALUE); @Override public void observeAttributeClass(double attVal, int classVal, double weight) { if (Utils.isMissingValue(attVal)) { } else { if (this.binList.size() < 1) { // create the first bin Bin newBin = new Bin(); newBin.classWeights.addToValue(classVal, weight); newBin.boundaryClass = classVal; newBin.boundaryWeight = weight; newBin.upperBound = attVal; newBin.lowerBound = attVal; this.binList.add(newBin); } else { // find bin containing new example with binary search int index = -1; boolean found = false; int min = 0; int max = this.binList.size() - 1; index = 0; while ((min <= max) && !found) { int i = (min + max) / 2; Bin bin = this.binList.get(i); if (((attVal >= bin.lowerBound) && (attVal < bin.upperBound)) || ((i == this.binList.size() - 1) && (attVal >= bin.lowerBound) && (attVal <= bin.upperBound))) { found = true; index = i; } else if (attVal < bin.lowerBound) { max = i - 1; } else { min = i + 1; } } boolean first = false; boolean last = false; if (!found) { // determine if it is before or after the existing range Bin bin = this.binList.get(0); if (bin.lowerBound > attVal) { // go before the first bin index = 0; first = true; } else { // if we haven't found it yet value must be > last bins // upperBound index = this.binList.size() - 1; last = true; } } Bin bin = this.binList.get(index); // VLIndex(ct->bins, index); if ((bin.lowerBound == attVal) || (this.binList.size() >= this.numBinsOption.getValue())) {// Option.getValue()) // {//1000) // { // if this is the exact same boundary and class as the bin // boundary or we aren't adding new bins any more then // increment // boundary counts bin.classWeights.addToValue(classVal, weight); if ((bin.boundaryClass == classVal) && (bin.lowerBound == attVal)) { // if it is also the same class then special case it bin.boundaryWeight += weight; } } else { // create a new bin Bin newBin = new Bin(); newBin.classWeights.addToValue(classVal, weight); newBin.boundaryWeight = weight; newBin.boundaryClass = classVal; newBin.upperBound = bin.upperBound; newBin.lowerBound = attVal; double percent = 0.0; // estimate initial counts with a linear interpolation if (!((bin.upperBound - bin.lowerBound == 0) || last || first)) { percent = 1.0 - ((attVal - bin.lowerBound) / (bin.upperBound - bin.lowerBound)); } // take out the boundry points, they stay with the old bin bin.classWeights.addToValue(bin.boundaryClass, -bin.boundaryWeight); DoubleVector weightToShift = new DoubleVector( bin.classWeights); weightToShift.scaleValues(percent); newBin.classWeights.addValues(weightToShift); bin.classWeights.subtractValues(weightToShift); // put the boundry examples back in bin.classWeights.addToValue(bin.boundaryClass, bin.boundaryWeight); // insert the new bin in the right place if (last) { bin.upperBound = attVal; newBin.upperBound = attVal; this.binList.add(newBin); } else if (first) { newBin.upperBound = bin.lowerBound; this.binList.add(0, newBin); } else { newBin.upperBound = bin.upperBound; bin.upperBound = attVal; this.binList.add(index + 1, newBin); } } } } } @Override public double probabilityOfAttributeValueGivenClass(double attVal, int classVal) { // TODO: NaiveBayes broken until implemented return 0.0; } @Override public AttributeSplitSuggestion getBestEvaluatedSplitSuggestion( SplitCriterion criterion, double[] preSplitDist, int attIndex, boolean binaryOnly) { AttributeSplitSuggestion bestSuggestion = null; DoubleVector rightDist = new DoubleVector(); for (Bin bin : this.binList) { rightDist.addValues(bin.classWeights); } DoubleVector leftDist = new DoubleVector(); for (Bin bin : this.binList) { leftDist.addValues(bin.classWeights); rightDist.subtractValues(bin.classWeights); double[][] postSplitDists = new double[][]{ leftDist.getArrayCopy(), rightDist.getArrayCopy()}; double merit = criterion.getMeritOfSplit(preSplitDist, postSplitDists); if ((bestSuggestion == null) || (merit > bestSuggestion.merit)) { bestSuggestion = new AttributeSplitSuggestion( new NumericAttributeBinaryTest(attIndex, bin.upperBound, false), postSplitDists, merit); } } return bestSuggestion; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } }
Java
/* * BinaryTreeNumericAttributeClassObserverRegression.java * Copyright (C) 2013 University of Porto, Portugal * @author E. Almeida, J. Gama * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * */ package moa.classifiers.core.attributeclassobservers; import java.io.Serializable; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.ObjectRepository; import moa.options.AbstractOptionHandler; import moa.tasks.TaskMonitor; /** * Class for observing the class data distribution for a numeric attribute using a binary tree. * This observer monitors the class distribution of a given attribute. * * <p>Learning Adaptive Model Rules from High-Speed Data Streams, ECML 2013, E. Almeida, C. Ferreira, P. Kosina and J. Gama; </p> * * @author E. Almeida, J. Gama * @version $Revision: 2$ */ public class BinaryTreeNumericAttributeClassObserverRegression extends AbstractOptionHandler implements NumericAttributeClassObserver { public static final long serialVersionUID = 1L; public class Node implements Serializable { private static final long serialVersionUID = 1L; public double cut_point; public double[] lessThan; //This array maintains statistics for the instance reaching the node with attribute values less than or iqual to the cutpoint. public double[] greaterThan; //This array maintains statistics for the instance reaching the node with attribute values greater than to the cutpoint. public Node left; public Node right; public Node(double val, double target) { this.cut_point = val; this.lessThan = new double[3]; this.greaterThan = new double[3]; this.lessThan[0] = target; //The sum of their target attribute values. this.lessThan[1] = target * target; //The sum of the squared target attribute values. this.lessThan[2] = 1.0; //A counter of the number of instances that have reached the node. this.greaterThan[0] = 0.0; this.greaterThan[1] = 0.0; this.greaterThan[2] = 0.0; } public void insertValue(double val, double target) { if (val == this.cut_point) { this.lessThan[0] = this.lessThan[0] + target; this.lessThan[1] = this.lessThan[1] + (target * target); this.lessThan[2] = this.lessThan[2] + 1; } else if (val <= this.cut_point) { this.lessThan[0] = this.lessThan[0] + target; this.lessThan[1] = this.lessThan[1] + (target * target); this.lessThan[2] = this.lessThan[2] + 1; if (this.left == null) { this.left = new Node(val, target); } else { this.left.insertValue(val, target); } } else { this.greaterThan[0] = this.greaterThan[0] + target; this.greaterThan[1] = this.greaterThan[1] + (target*target); this.greaterThan[2] = this.greaterThan[2] + 1; if (this.right == null) { this.right = new Node(val, target); } else { this.right.insertValue(val, target); } } } } public Node root1 = null; public void observeAttributeTarget(double attVal, double target){ if (Double.isNaN(attVal)) { } else { if (this.root1 == null) { this.root1 = new Node(attVal, target); } else { this.root1.insertValue(attVal, target); } } } @Override public void observeAttributeClass(double attVal, int classVal, double weight) { } @Override public double probabilityOfAttributeValueGivenClass(double attVal, int classVal) { // TODO: NaiveBayes broken until implemented return 0.0; } @Override public AttributeSplitSuggestion getBestEvaluatedSplitSuggestion( SplitCriterion criterion, double[] preSplitDist, int attIndex, boolean binaryOnly) { return searchForBestSplitOption(this.root1, null, null, null, null, false, criterion, preSplitDist, attIndex); } protected AttributeSplitSuggestion searchForBestSplitOption( Node currentNode, AttributeSplitSuggestion currentBestOption, double[] actualParentLeft, double[] parentLeft, double[] parentRight, boolean leftChild, SplitCriterion criterion, double[] preSplitDist, int attIndex) { return currentBestOption; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } }
Java
/* * GaussianNumericAttributeClassObserver.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.attributeclassobservers; import moa.core.ObjectRepository; import moa.tasks.TaskMonitor; import weka.core.Utils; import java.util.Set; import java.util.TreeSet; import moa.classifiers.core.AttributeSplitSuggestion; import moa.classifiers.core.conditionaltests.NumericAttributeBinaryTest; import moa.classifiers.core.splitcriteria.SplitCriterion; import moa.core.AutoExpandVector; import moa.core.DoubleVector; import moa.core.GaussianEstimator; import moa.options.AbstractOptionHandler; import moa.options.IntOption; /** * Class for observing the class data distribution for a numeric attribute using gaussian estimators. * This observer monitors the class distribution of a given attribute. * Used in naive Bayes and decision trees to monitor data statistics on leaves. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public class GaussianNumericAttributeClassObserver extends AbstractOptionHandler implements NumericAttributeClassObserver { private static final long serialVersionUID = 1L; protected DoubleVector minValueObservedPerClass = new DoubleVector(); protected DoubleVector maxValueObservedPerClass = new DoubleVector(); protected AutoExpandVector<GaussianEstimator> attValDistPerClass = new AutoExpandVector<GaussianEstimator>(); public IntOption numBinsOption = new IntOption("numBins", 'n', "The number of bins.", 10, 1, Integer.MAX_VALUE); @Override public void observeAttributeClass(double attVal, int classVal, double weight) { if (Utils.isMissingValue(attVal)) { } else { GaussianEstimator valDist = this.attValDistPerClass.get(classVal); if (valDist == null) { valDist = new GaussianEstimator(); this.attValDistPerClass.set(classVal, valDist); this.minValueObservedPerClass.setValue(classVal, attVal); this.maxValueObservedPerClass.setValue(classVal, attVal); } else { if (attVal < this.minValueObservedPerClass.getValue(classVal)) { this.minValueObservedPerClass.setValue(classVal, attVal); } if (attVal > this.maxValueObservedPerClass.getValue(classVal)) { this.maxValueObservedPerClass.setValue(classVal, attVal); } } valDist.addObservation(attVal, weight); } } @Override public double probabilityOfAttributeValueGivenClass(double attVal, int classVal) { GaussianEstimator obs = this.attValDistPerClass.get(classVal); return obs != null ? obs.probabilityDensity(attVal) : 0.0; } @Override public AttributeSplitSuggestion getBestEvaluatedSplitSuggestion( SplitCriterion criterion, double[] preSplitDist, int attIndex, boolean binaryOnly) { AttributeSplitSuggestion bestSuggestion = null; double[] suggestedSplitValues = getSplitPointSuggestions(); for (double splitValue : suggestedSplitValues) { double[][] postSplitDists = getClassDistsResultingFromBinarySplit(splitValue); double merit = criterion.getMeritOfSplit(preSplitDist, postSplitDists); if ((bestSuggestion == null) || (merit > bestSuggestion.merit)) { bestSuggestion = new AttributeSplitSuggestion( new NumericAttributeBinaryTest(attIndex, splitValue, true), postSplitDists, merit); } } return bestSuggestion; } public double[] getSplitPointSuggestions() { Set<Double> suggestedSplitValues = new TreeSet<Double>(); double minValue = Double.POSITIVE_INFINITY; double maxValue = Double.NEGATIVE_INFINITY; for (int i = 0; i < this.attValDistPerClass.size(); i++) { GaussianEstimator estimator = this.attValDistPerClass.get(i); if (estimator != null) { if (this.minValueObservedPerClass.getValue(i) < minValue) { minValue = this.minValueObservedPerClass.getValue(i); } if (this.maxValueObservedPerClass.getValue(i) > maxValue) { maxValue = this.maxValueObservedPerClass.getValue(i); } } } if (minValue < Double.POSITIVE_INFINITY) { double range = maxValue - minValue; for (int i = 0; i < this.numBinsOption.getValue(); i++) { double splitValue = range / (this.numBinsOption.getValue() + 1.0) * (i + 1) + minValue; if ((splitValue > minValue) && (splitValue < maxValue)) { suggestedSplitValues.add(splitValue); } } } double[] suggestions = new double[suggestedSplitValues.size()]; int i = 0; for (double suggestion : suggestedSplitValues) { suggestions[i++] = suggestion; } return suggestions; } // assume all values equal to splitValue go to lhs public double[][] getClassDistsResultingFromBinarySplit(double splitValue) { DoubleVector lhsDist = new DoubleVector(); DoubleVector rhsDist = new DoubleVector(); for (int i = 0; i < this.attValDistPerClass.size(); i++) { GaussianEstimator estimator = this.attValDistPerClass.get(i); if (estimator != null) { if (splitValue < this.minValueObservedPerClass.getValue(i)) { rhsDist.addToValue(i, estimator.getTotalWeightObserved()); } else if (splitValue >= this.maxValueObservedPerClass.getValue(i)) { lhsDist.addToValue(i, estimator.getTotalWeightObserved()); } else { double[] weightDist = estimator.estimatedWeight_LessThan_EqualTo_GreaterThan_Value(splitValue); lhsDist.addToValue(i, weightDist[0] + weightDist[1]); rhsDist.addToValue(i, weightDist[2]); } } } return new double[][]{lhsDist.getArrayRef(), rhsDist.getArrayRef()}; } @Override public void getDescription(StringBuilder sb, int indent) { // TODO Auto-generated method stub } @Override protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) { // TODO Auto-generated method stub } @Override public void observeAttributeTarget(double attVal, double target) { throw new UnsupportedOperationException("Not supported yet."); } }
Java
/* * NumericAttributeClassObserver.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package moa.classifiers.core.attributeclassobservers; /** * Interface for observing the class data distribution for a numeric attribute. * This observer monitors the class distribution of a given attribute. * Used in naive Bayes and decision trees to monitor data statistics on leaves. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 7 $ */ public interface NumericAttributeClassObserver extends AttributeClassObserver { }
Java