repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
multeval
multeval-master/src/multeval/metrics/SuffStats.java
package multeval.metrics; public abstract class SuffStats<T> { public abstract void add(T other); public abstract SuffStats<T> create(); // hack around generics by erasure @SuppressWarnings("unchecked") public void add(SuffStats<?> other) { add((T) other); } }
280
19.071429
40
java
multeval
multeval-master/src/multeval/metrics/TER.java
package multeval.metrics; import jannopts.*; import java.util.*; import multeval.util.*; import ter.*; import com.google.common.base.*; public class TER extends Metric<IntStats> { @Option(shortName = "P", longName = "ter.punctuation", usage = "Use punctuation in TER?", defaultValue = "false") boolean punctuation; @Option(shortName = "b", longName = "ter.beamWidth", usage = "Beam width for TER", defaultValue = "20") int beamWidth; @Option(shortName = "d", longName = "ter.maxShiftDistance", usage = "Maximum shift distance for TER", defaultValue = "50") int maxShiftDistance; @Option(shortName = "M", longName = "ter.matchCost", usage = "Match cost for TER", defaultValue = "0.0") float matchCost; @Option(shortName = "D", longName = "ter.deleteCost", usage = "Delete cost for TER", defaultValue = "1.0") float deleteCost; @Option(shortName = "B", longName = "ter.substituteCost", usage = "Substitute cost for TER", defaultValue = "1.0") float substituteCost; @Option(shortName = "I", longName = "ter.insertCost", usage = "Insert cost for TER", defaultValue = "1.0") float insertCost; @Option(shortName = "T", longName = "ter.shiftCost", usage = "Shift cost for TER", defaultValue = "1.0") float shiftCost; private TERcost costfunc; private TERcalc calc = new TERcalc(); private Configurator opts = null; @Override public String getMetricDescription() { return "Translation Error Rate (TER) V0.8.0"; } @Override public IntStats stats(String hyp, List<String> refs) { double totwords = 0; TERalignment bestResult = null; // TER makes heinous use of static methods. we must synchronize to prevent disaster // when multi-threading at the hypothesis level (as we do in n-best scoring) // even so, it's better to just thread at the hypothesis level // number of words is average over references calc.setRefLen(refs); /* For each reference, compute the TER */ for(int i = 0; i < refs.size(); ++i) { String ref = refs.get(i); TERalignment alignResult = calc.TER(hyp, ref, costfunc); if ((bestResult == null) || (bestResult.numEdits > alignResult.numEdits)) { bestResult = alignResult; } totwords += alignResult.numWords; } bestResult.numWords = ((double) totwords) / ((double) refs.size()); // if(!refids.isEmpty()) bestResult.bestRef = bestref; // now save the minimal sufficient statistics IntStats result = new IntStats(2); result.arr[0] = (int) bestResult.numEdits; result.arr[1] = (int) bestResult.numWords; return result; } @Override public double score(IntStats suffStats) { Preconditions.checkArgument(suffStats.arr.length == 2, "TER sufficient stats must be of length 2"); int edits = suffStats.arr[0]; int words = suffStats.arr[1]; double score = ((double) edits / (double) words); return score * 100; } @Override public String toString() { return "TER"; } @Override public void configure(Configurator opts) throws ConfigurationException { this.opts = opts; LibUtil.checkLibrary("ter.TERpara", "TER"); opts.configure(this); configureLibTer(); } private void configureLibTer() { costfunc = new TERcost(); costfunc._delete_cost = deleteCost; costfunc._insert_cost = insertCost; costfunc._shift_cost = shiftCost; costfunc._match_cost = matchCost; costfunc._substitute_cost = substituteCost; // TERcalc.setNormalize(normalized); // TERcalc.setCase(caseon); calc.setPunct(punctuation); calc.setBeamWidth(beamWidth); calc.setShiftDist(maxShiftDistance); } @Override public boolean isBiggerBetter() { return false; } @Override public Metric<?> threadClone() { TER ter = new TER(); try { opts.configure(ter); ter.configureLibTer(); } catch(ConfigurationException e) { // if this should happen, it should have already happened during the initial call to configure, never here throw new RuntimeException(e); } return ter; } }
4,067
28.693431
124
java
multeval
multeval-master/src/multeval/output/AsciiTable.java
package multeval.output; import java.io.PrintStream; import multeval.ResultsManager; import multeval.ResultsManager.Type; public class AsciiTable { public void write(ResultsManager results, PrintStream out) { String[] columns = new String[results.metricNames.length+1]; columns[0] = String.format("n=%d", results.numOptRuns); for(int i=0; i<results.metricNames.length; i++) { columns[i+1] = results.metricNames[i] +" (s_sel/s_opt/p)"; } print(out, columns); String[] metrics = results.metricNames; String[] systems = results.sysNames; int sysCount = systems.length; for(int iSys=0; iSys<sysCount; iSys++) { columns[0] = systems[iSys]; // system name for(int iMetric=0; iMetric<metrics.length; iMetric++) { double avg = results.get(iMetric, iSys, Type.AVG); // avg metric score double sSel = results.get(iMetric, iSys, Type.RESAMPLED_STDDEV_AVG); double sTest = results.get(iMetric, iSys, Type.STDDEV); if (iSys == 0) { // baseline has no p-value if (results.numOptRuns > 1) { columns[iMetric+1] = String.format("%2.1f (%.1f/%.1f/-)", avg, sSel, sTest); } else { columns[iMetric+1] = String.format("%2.1f (%.1f/*/-)", avg, sSel); } } else { // TODO: Just show improvements over baseline? double p = results.get(iMetric, iSys, Type.P_VALUE); if (results.numOptRuns > 1) { columns[iMetric+1] = String.format("%2.1f (%.1f/%.1f/%.2f)", avg, sSel, sTest, p); } else { columns[iMetric+1] = String.format("%2.1f (%.1f/*/**)", avg, sSel); } } } print(out, columns); } if (results.numOptRuns < 2) { out.println(" * Indicates no estimate of optimizer instability due to single optimizer run. Consider multiple optimizer runs."); if (sysCount > 1) { out.println(" ** Indicates no p-value due to single optimizer run. Consider multiple optimizer runs."); } } out.flush(); } private void print(PrintStream out, String[] columns) { out.print(String.format("%-15s", columns[0])); for(int i=1; i<columns.length; i++) { out.print(String.format("%-23s", columns[i])); // 23 not 21 due to metric names } out.println(); } }
2,443
35.477612
142
java
multeval
multeval-master/src/multeval/output/LatexTable.java
package multeval.output; import java.io.*; import java.util.*; import multeval.*; import multeval.metrics.*; import multeval.ResultsManager.Type; public class LatexTable { public void write(ResultsManager results, List<Metric<?>> metricList, PrintWriter out, boolean fullDoc) { if(fullDoc) { out.println("\\documentclass[12pt]{article}"); out.println("\\usepackage[american]{babel}"); out.println("\\usepackage{multirow}"); out.println("\\usepackage{amsmath, amsthm, amssymb}"); out.println("\\begin{document}"); } out.println("\\begin{table}[htb]"); out.println("\\begin{center}"); out.println("\\begin{footnotesize}"); out.println("\\begin{tabular}{|l|l|l|l|l|l|}"); out.println("\\hline"); out.println("\\bf Metric & \\bf System & \\bf Avg & \\bf $\\overline{s}_{\\text{sel}}$ & \\bf $s_{\\text{Test}}$ & \\bf $p$-value \\\\"); out.println("\\hline"); // \multicolumn{6}{|l|}{BTEC Zh-En} \\ String[] metrics = results.metricNames; String[] systems = results.sysNames; int sysCount = systems.length; for(int iMetric = 0; iMetric < metrics.length; iMetric++) { String metricName = metrics[iMetric]; String metricArrow = metricList.get(iMetric).isBiggerBetter() ? "$\\uparrow$" : "$\\downarrow$"; // TODO: Remove this hack if(metricName.equals("Length")) { metricArrow = ""; } out.println("\\multirow{" + sysCount + "}{*}{" + metricName + " " + metricArrow + "}"); for(int iSys = 0; iSys < sysCount; iSys++) { String sysName = systems[iSys]; double avg = results.get(iMetric, iSys, Type.AVG); double sSel = results.get(iMetric, iSys, Type.RESAMPLED_STDDEV_AVG); double sTest = results.get(iMetric, iSys, Type.STDDEV); String sTestStr = Double.isNaN(sTest) ? "-" : String.format("%.1f", sTest); if (iSys == 0) { // baseline has no p-value out.println(String.format("& %s & %.1f & %.1f & %s & - \\\\", sysName, avg, sSel, sTestStr)); } else { double p = results.get(iMetric, iSys, Type.P_VALUE); out.println(String.format("& %s & %.1f & %.1f & %s & %.2f \\\\", sysName, avg, sSel, sTestStr, p)); } } out.println("\\hline"); } out.println("\\end{tabular}"); out.println("\\end{footnotesize}"); out.println("\\end{center}"); // out.println("\\vspace{-.2cm}"); StringBuilder metricDescs = new StringBuilder(); for(Metric<?> metric : metricList) { metricDescs.append(metric.getMetricDescription() + "; "); } out.println("\\caption{\\label{tab:scores} Metric scores for all systems: "+metricDescs.toString()+". p-values are relative to baseline and indicate whether a difference of this magnitude (between the baseline and the system on that line) is likely to be generated again by some random process (a randomized optimizer). Metric scores are averages over multiple runs. $s_{sel}$ indicates the variance due to test set selection and has nothing to do with optimizer instability.}"); out.println("\\end{table}"); if(fullDoc) { out.println("\\end{document}"); } out.flush(); } }
3,200
41.118421
483
java
multeval
multeval-master/src/multeval/parallel/MetricWorkerPool.java
package multeval.parallel; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; public abstract class MetricWorkerPool<Task, ThreadLocals> { private final int threads; private final Supplier<ThreadLocals> threadLocalSupplier; private final int chunkSize; private final List<Task> POISON = ImmutableList.of(); private List<Task> curChunk; private final ArrayList<Thread> workers; private final ArrayBlockingQueue<List<Task>> q; // usually 100 hypotheses or bootstrap points at a time public static final int DEFAULT_CHUNK_SIZE = 100; public MetricWorkerPool(int threads, Supplier<ThreadLocals> threadLocals) { this(threads, threadLocals, DEFAULT_CHUNK_SIZE); } // chunk size is used to reduce contention among threads public MetricWorkerPool(int threads, Supplier<ThreadLocals> threadLocals, int chunkSize) { this.threads = threads; this.threadLocalSupplier = threadLocals; this.chunkSize = chunkSize; this.curChunk = new ArrayList<Task>(chunkSize); this.workers = new ArrayList<Thread>(threads); this.q = new ArrayBlockingQueue<List<Task>>(threads * 100); } public void start() { for (int iThread = 0; iThread < threads; iThread++) { Thread thread = new Thread() { ThreadLocals locals = threadLocalSupplier.get(); @Override public void run() { try { List<Task> chunk = q.take(); while (!chunk.equals(POISON)) { for(Task task : chunk) { doWork(locals, task); } chunk = q.take(); } } catch (InterruptedException e) { ; } catch (Throwable t) { t.printStackTrace(); System.exit(1); } } }; workers.add(thread); thread.start(); } } public void addTask(Task t) throws InterruptedException { if(curChunk.size() == chunkSize) { q.put(curChunk); curChunk = new ArrayList<Task>(chunkSize); } curChunk.add(t); } public void waitForCompletion() throws InterruptedException { if(curChunk.size() > 0) { q.put(curChunk); } for (int iThread = 0; iThread < threads; iThread++) { q.put(POISON); } for (Thread t : workers) { t.join(); } } public abstract void doWork(ThreadLocals locals, Task t); }
2,322
24.25
91
java
multeval
multeval-master/src/multeval/parallel/SynchronizedBufferedReader.java
package multeval.parallel; import java.io.BufferedReader; import java.io.IOException; public class SynchronizedBufferedReader { private BufferedReader in; public SynchronizedBufferedReader(BufferedReader in) { this.in = in; } public synchronized String readLine() throws IOException { return in.readLine(); } public synchronized void close() throws IOException { in.close(); } }
399
18.047619
59
java
multeval
multeval-master/src/multeval/parallel/SynchronizedPrintStream.java
package multeval.parallel; import java.io.PrintStream; /** * So here's the use case: You have a compute-heavy task over lines from an * ordered file and you'd like to write back the processed lines in order. Let's * call each line a "unit" and give it a unit number. This class allows multiple * threads to write to the same output file, guaranteeing the proper ordering. * * @author Jonathan Clark */ public class SynchronizedPrintStream { private PrintStream out; private int curUnit = 0; private Object lock = new Object(); public SynchronizedPrintStream(PrintStream out) { this.out = out; } public void println(int myUnit, String str) throws InterruptedException { while (curUnit != myUnit) { // System.err.println("Waiting to write for " + myUnit); synchronized (lock) { lock.wait(); } // System.err.println("Woke up to check on " + myUnit); } // System.err.println("Writing for " + myUnit); out.println(str); } public void finishUnit(int unit) { synchronized(lock) { curUnit += 1; // System.err.println("Notifying threads of completion of " + (unit-1)); lock.notifyAll(); // System.err.println("Finished notifying threads of completion of " + (unit-1)); } } public void close() { out.close(); } }
1,268
25.4375
83
java
multeval
multeval-master/src/multeval/significance/BootstrapResampler.java
package multeval.significance; import java.util.*; import multeval.metrics.*; import multeval.parallel.MetricWorkerPool; import com.google.common.base.*; public class BootstrapResampler { private final Random random = new Random(); private final int threads; private final List<Metric<?>> masterMetrics; private final List<List<SuffStats<?>>> suffStats; private int totalDataPoints; /** @param suffStats First list corresponds to the metrics, the second * dimension is number of data points (i.e. sentences) and the inner * array is the sufficient statistics for each metric. */ public BootstrapResampler(int threads, List<Metric<?>> metrics, List<List<SuffStats<?>>> suffStats) { Preconditions.checkArgument(metrics.size() > 0, "Must have at least one metric."); Preconditions.checkArgument(suffStats.size() > 0, "Must have at least one data point."); // TODO: Check for sufficient stats and metric count under each data point // being parallel this.threads = threads; this.masterMetrics = metrics; this.suffStats = suffStats; this.totalDataPoints = suffStats.get(0).size(); Preconditions.checkArgument(totalDataPoints > 0, "Need more than zero data points."); } private static class Locals { public final int[] sampleMembers; public final List<Metric<?>> metrics; public Locals(List<Metric<?>> masterMetrics, int sampleSize) { this.sampleMembers = new int[sampleSize]; this.metrics = new ArrayList<Metric<?>>(masterMetrics.size()); for(Metric<?> metric : masterMetrics) { metrics.add(metric.threadClone()); } } } /** Returns a list whose size corresponds to the number of metrics being used * for this resampler. Each inner double array contains a n metric values, * calculated by applying the metric to the sufficient statistics aggregated * from each resampling. * * @param sampleSize Size of each of the n re-sampled groups taken from the * set of points passed to the contructor. * @param numSamples Number of resampled groups to be drawn. * @return * @throws InterruptedException */ public List<double[]> resample(int numSamples) throws InterruptedException { final List<double[]> metricValues = new ArrayList<double[]>(masterMetrics.size()); for(int i = 0; i < masterMetrics.size(); i++) { metricValues.add(new double[numSamples]); } MetricWorkerPool<Integer, Locals> workers = new MetricWorkerPool<Integer, Locals>(threads, new Supplier<Locals>() { @Override public Locals get() { return new Locals(masterMetrics, totalDataPoints); } }) { @Override public void doWork(Locals locals, Integer iSample) { chooseSampleMembers(totalDataPoints, locals.sampleMembers); // NOTE: We could dump the sample members for analysis here if we wanted for(int iMetric = 0; iMetric < masterMetrics.size(); iMetric++) { SuffStats<?> summedStats = sumStats(locals.sampleMembers, iMetric, suffStats); Metric<?> metric = locals.metrics.get(iMetric); double score = metric.scoreStats(summedStats); metricValues.get(iMetric)[iSample] = score; } } }; workers.start(); for(int iSample = 0; iSample < numSamples; iSample++) { workers.addTask(iSample); } workers.waitForCompletion(); return metricValues; } private static SuffStats<?> sumStats(int[] sampleMembers, int iMetric, List<List<SuffStats<?>>> ss) { SuffStats<?> summedStats = ss.get(iMetric).get(0).create(); for(int dataIdx : sampleMembers) { summedStats.add(ss.get(iMetric).get(dataIdx)); } return summedStats; } private void chooseSampleMembers(int totalPointsAvailable, int[] sampleMembersOut) { for(int i = 0; i < sampleMembersOut.length; i++) { sampleMembersOut[i] = random.nextInt(totalPointsAvailable); } } }
3,951
33.973451
119
java
multeval
multeval-master/src/multeval/significance/StratifiedApproximateRandomizationTest.java
package multeval.significance; import java.util.List; import java.util.Random; import multeval.metrics.Metric; import multeval.metrics.SuffStats; import multeval.parallel.MetricWorkerPool; import multeval.util.SuffStatUtils; import com.google.common.base.Preconditions; import com.google.common.base.Supplier; public class StratifiedApproximateRandomizationTest { private static final Random random = new Random(); private final List<Metric<?>> masterMetrics; private final int threads; private final List<List<SuffStats<?>>> suffStatsA; private final List<List<SuffStats<?>>> suffStatsB; private int totalDataPoints; private final int numHyps; private final int numOptRuns; private final boolean debug; /** @param suffStats First list corresponds to the metrics, the second * dimension is number of data points (i.e. sentences) and the inner * data structure is the sufficient statistics for each metric. * The number of data points must equal numHyps times numOptRuns */ public StratifiedApproximateRandomizationTest(int threads, List<Metric<?>> metrics, List<List<SuffStats<?>>> suffStatsA, List<List<SuffStats<?>>> suffStatsB, int numHyps, int numOptRuns, boolean debug) { Preconditions.checkArgument(metrics.size() > 0, "Must have at least one metric."); Preconditions.checkArgument(suffStatsA.size() > 0, "Must have at least one data point."); // TODO: Check for sufficient stats and metric count under each data // point being parallel (same for BootstrapResampler) this.threads = threads; this.masterMetrics = metrics; this.suffStatsA = suffStatsA; this.suffStatsB = suffStatsB; this.totalDataPoints = suffStatsA.get(0).size(); this.numHyps = numHyps; this.numOptRuns = numOptRuns; this.debug = debug; Preconditions.checkArgument(suffStatsA.get(0).size() == suffStatsB.get(0).size(), "System A and System B must have the same number of data points."); Preconditions.checkArgument(totalDataPoints > 0, "Need more than zero data points."); Preconditions.checkArgument(totalDataPoints == numHyps * numOptRuns, String.format("totalDataPoints (%d) in second list must == numHyps (%d) * numOptRuns (%d)", totalDataPoints, numHyps, numOptRuns)); } public double[] getTwoSidedP(int numShuffles) throws InterruptedException { final double[] overallDiffs = new double[masterMetrics.size()]; final double[] scoresA = new double[masterMetrics.size()]; final double[] scoresB = new double[masterMetrics.size()]; for(int iMetric = 0; iMetric < masterMetrics.size(); iMetric++) { Metric<?> metric = masterMetrics.get(iMetric); scoresA[iMetric] = metric.scoreStats(SuffStatUtils.sumStats(suffStatsA.get(iMetric))); scoresB[iMetric] = metric.scoreStats(SuffStatUtils.sumStats(suffStatsB.get(iMetric))); overallDiffs[iMetric] = Math.abs(scoresA[iMetric] - scoresB[iMetric]); } final int[] diffsByChance = new int[masterMetrics.size()]; // threading notes: // sufficient stats are immutable in this method // summing static using a metric doesn't violate thread safeness for any of our metrics MetricWorkerPool<Integer, Shuffling> workers = new MetricWorkerPool<Integer, Shuffling>(threads, new Supplier<Shuffling>() { @Override public Shuffling get() { return new Shuffling(numHyps, numOptRuns); } }) { @Override public void doWork(Shuffling shuffling, Integer i) { shuffling.shuffle(); for(int iMetric = 0; iMetric < masterMetrics.size(); iMetric++) { Metric<?> metric = masterMetrics.get(iMetric); double scoreX = metric.scoreStats(sumStats(shuffling, iMetric, suffStatsA, suffStatsB, false)); double scoreY = metric.scoreStats(sumStats(shuffling, iMetric, suffStatsA, suffStatsB, true)); double sampleDiff = Math.abs(scoreX - scoreY); // the != is important. if we want to score the same system against // itself, // having a zero difference should not be attributed to chance. if (sampleDiff > overallDiffs[iMetric]) { diffsByChance[iMetric]++; } if(debug) { System.err.println("DIFF metric " + iMetric + ": " + scoreX + " - " + scoreY + " --> " + sampleDiff + " >? " + overallDiffs[iMetric] + "; diffsByChance: " + diffsByChance[iMetric]); } } } }; workers.start(); for(int i = 0; i < numShuffles; i++) { workers.addTask(i); } workers.waitForCompletion(); double[] p = new double[masterMetrics.size()]; for(int iMetric = 0; iMetric < masterMetrics.size(); iMetric++) { // +1 applies here, though it only matters for small numbers of // shufflings, which we typically never do. it's necessary to ensure // the probability of falsely rejecting the null hypothesis is no // greater than the rejection level of the test (see william // morgan on significance tests) p[iMetric] = ((double) diffsByChance[iMetric] + 1.0) / ((double) numShuffles + 1.0); } return p; } private static SuffStats<?> sumStats(Shuffling shuffling, int iMetric, List<List<SuffStats<?>>> suffStatsA, List<List<SuffStats<?>>> suffStatsB, boolean invert) { SuffStats<?> summedStats = suffStatsA.get(iMetric).get(0).create(); List<SuffStats<?>> metricStatsA = suffStatsA.get(iMetric); List<SuffStats<?>> metricStatsB = suffStatsB.get(iMetric); for(int iRow = 0; iRow < metricStatsA.size(); iRow++) { SuffStats<?> row = shuffling.at(iRow, metricStatsA, metricStatsB, invert); summedStats.add(row); } return summedStats; } static class Shuffling { private final boolean[] swap; private final int[] optRunPermutation; private final int[] optRunPermutationInv; private final int optRuns; private final int hyps; private static final Random rnd = new Random(); public Shuffling(int hyps, int optRuns) { this.swap = new boolean[hyps*optRuns]; this.optRunPermutation = new int[hyps*optRuns]; this.optRunPermutationInv = new int[hyps*optRuns]; this.hyps = hyps; this.optRuns = optRuns; } public <T> T at(int iRow, List<T> a, List<T> b, boolean invert) { final boolean shouldSwap; final int idx; if(invert) { idx = optRunPermutationInv[iRow]; shouldSwap = !swap[iRow]; } else { idx = optRunPermutation[iRow]; shouldSwap = swap[iRow]; } final List<T> list = shouldSwap ? b : a; return list.get(idx); } // shuffle, stratifying on like hypotheses, but allowing swaps between systems and optimizer runs public void shuffle() { // decide swaps for(int i = 0; i < swap.length; i++) { swap[i] = random.nextBoolean(); } // decide how to permute sentences between optimization runs // 1) init by storing indices into original array for(int i=0; i<optRunPermutation.length; i++) { optRunPermutation[i] = i; } // 2) randomly permute like hypotheses between optimization runs // based on Collections.shuffle, but for a primitive array... // all permutations are equally likely given a fair source of randomness for(int iHyp=0; iHyp<hyps; iHyp++) { for (int iRun=optRuns; iRun>1; iRun--) { int swapRun1 = iRun-1; int swapRun2 = rnd.nextInt(iRun); swap(optRunPermutation, iHyp + hyps*swapRun1, iHyp+hyps*swapRun2); } } // now save inverse function for(int origIdx=0; origIdx<optRunPermutation.length; origIdx++) { int mappedIdx = optRunPermutation[origIdx]; optRunPermutationInv[mappedIdx] = origIdx; } } private void swap(int[] arr, int i, int j) { int tmp = arr[i]; arr[i] = arr[j]; arr[j] = tmp; } } }
7,834
36.668269
137
java
multeval
multeval-master/src/multeval/util/ArrayUtils.java
package multeval.util; import com.google.common.base.*; public class ArrayUtils { /** Add parallel arrays. * * @param summedStats * @param ds */ public static void plusEquals(float[] dest, float[] arg) { Preconditions.checkArgument(dest.length == arg.length, "Arrays not parallel"); for(int i = 0; i < dest.length; i++) { dest[i] += arg[i]; } } /** Add parallel arrays. * * @param summedStats * @param ds */ public static void plusEquals(int[] dest, int[] arg) { Preconditions.checkArgument(dest.length == arg.length, "Arrays not parallel"); for(int i = 0; i < dest.length; i++) { dest[i] += arg[i]; } } public static int[] toIntArray(float[] suffStats) { int[] result = new int[suffStats.length]; for(int i = 0; i < suffStats.length; i++) { result[i] = (int) suffStats[i]; } return result; } public static float[] toFloatArray(int[] intSuffStats) { float[] result = new float[intSuffStats.length]; for(int i = 0; i < intSuffStats.length; i++) { result[i] = (float) intSuffStats[i]; } return result; } public static int indexOf(double[] arr, double val) { for(int i = 0; i < arr.length; i++) { if (arr[i] == val) { return i; } } return -1; } }
1,308
22.8
82
java
multeval
multeval-master/src/multeval/util/CollectionUtils.java
package multeval.util; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import com.google.common.collect.Multiset; import com.google.common.collect.Multiset.Entry; public class CollectionUtils { // rom // http://philippeadjiman.com/blog/2010/02/20/a-generic-method-for-sorting-google-collections-multiset-per-entry-count/ public static <T> List<Entry<T>> sortByCount(Multiset<T> multiset) { Comparator<Multiset.Entry<T>> occurence_comparator = new Comparator<Multiset.Entry<T>>() { public int compare(Multiset.Entry<T> e1, Multiset.Entry<T> e2) { return e2.getCount() - e1.getCount(); } }; List<Entry<T>> sortedByCount = new ArrayList<Entry<T>>(multiset.entrySet()); Collections.sort(sortedByCount, occurence_comparator); return sortedByCount; } public static <T> List<T> head(List<T> list, int n) { if (n <= list.size()) { return list.subList(0, n); } else { return list; } } }
985
26.388889
120
java
multeval
multeval-master/src/multeval/util/FileUtils.java
package multeval.util; import java.io.*; import com.google.common.base.*; public class FileUtils { // File must be ASCII or UTF-8! public static String getLastLine(String file) throws IOException { RandomAccessFile ra = new RandomAccessFile(new File(file), "r"); long pos = ra.length() - 2; // skip newline at end of file while(((char) ra.readByte()) != '\n' && pos > 0) { pos--; ra.seek(pos); } int len = (int) (ra.length() - ra.getFilePointer()); byte[] buf = new byte[len]; ra.readFully(buf); String str = new String(buf, Charsets.UTF_8); return str; } public static void main(String[] args) throws IOException { System.err.println(getLastLine(args[0])); } }
730
24.206897
68
java
multeval
multeval-master/src/multeval/util/LibUtil.java
package multeval.util; import java.net.*; public class LibUtil { // returns null if not found public static void checkLibrary(String qualifiedName, String libName) { try { Class<?> clazz = Class.forName(qualifiedName); URL where = clazz.getProtectionDomain().getCodeSource().getLocation(); System.err.println("Found library " + libName + " at " + where.toString()); } catch(ClassNotFoundException e) { throw new RuntimeException("Could not find library " + libName, e); } } }
522
26.526316
81
java
multeval
multeval-master/src/multeval/util/MathUtils.java
package multeval.util; import java.util.*; public class MathUtils { public static final double SQRT_2 = Math.sqrt(2.0); public static double factorial(int val) { double result = 1.0; for(int i = 2; i <= val; i++) { result *= i; } return result; } public static double gamma(int alpha) { return factorial(alpha - 1); } public static double average(List<Double> observations) { double total = 0.0; for(double d : observations) { total += d; } return total / observations.size(); } public static double average(double[] data) { double total = 0.0; for(double d : data) { total += d; } return total / data.length; } public static double variance(List<Double> observations) { double mean = average(observations); double varSum = 0.0; for(double d : observations) { varSum += (d - mean) * (d - mean); } return varSum / observations.size(); } public static double variance(double[] observations) { double mean = average(observations); double varSum = 0.0; for(double d : observations) { varSum += (d - mean) * (d - mean); } // use bessel's correction return varSum / (observations.length - 1); } // from http://introcs.cs.princeton.edu/21function/ErrorFunction.java.html // fractional error in math formula less than 1.2 * 10 ^ -7. // although subject to catastrophic cancellation when z in very close to 0 // from Chebyshev fitting formula for erf(z) from Numerical Recipes, 6.2 public static double erf(double z) { double t = 1.0 / (1.0 + 0.5 * Math.abs(z)); // use Horner's method double ans = 1 - t * Math .exp(-z * z - 1.26551223 + t * (1.00002368 + t * (0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (-0.82215223 + t * (0.17087277)))))))))); if (z >= 0) return ans; else return -ans; } public static double min(double[] samples) { double result = Double.MAX_VALUE; for(double d : samples) result = Math.min(result, d); return result; } public static double max(double[] samples) { double result = Double.MIN_VALUE; for(double d : samples) result = Math.max(result, d); return result; } public static double stddev(double[] observations) { return Math.sqrt(variance(observations)); } public static int medianIndexInSorted(double[] arr) { // TODO: Better tie breaking? Arrays.sort(arr); if (arr.length < 2) { return 0; } else if (arr.length % 2 == 0) { // return the higher of the 2 midpoints return (arr.length / 2); } else { // return exactly the midpoint return ((arr.length - 1) / 2); } } public static int medianIndex(double[] arr) { if (arr.length < 2) { return 0; } else { double[] copy = Arrays.copyOf(arr, arr.length); Arrays.sort(copy); final int copyIdx; if (copy.length % 2 == 0) { // return the higher of the 2 midpoints copyIdx = (copy.length / 2); } else { // return exactly the midpoint copyIdx = ((copy.length - 1) / 2); } double median = copy[copyIdx]; int arrIdx = ArrayUtils.indexOf(arr, median); return arrIdx; } } }
3,561
25.191176
116
java
multeval
multeval-master/src/multeval/util/StringUtils.java
package multeval.util; import java.util.regex.*; public class StringUtils { public static final Pattern WHITESPACE = Pattern.compile("\\s+"); public static String normalizeWhitespace(String sent) { return WHITESPACE.matcher(sent.trim()).replaceAll(" "); } }
273
18.571429
67
java
multeval
multeval-master/src/multeval/util/SuffStatUtils.java
package multeval.util; import java.util.*; import multeval.metrics.*; import com.google.common.base.*; public class SuffStatUtils { public static SuffStats<?> sumStats(List<SuffStats<?>> suffStats) { Preconditions.checkArgument(suffStats.size() > 0, "Need more than zero data points."); SuffStats<?> summedStats = suffStats.get(0).create(); for(SuffStats<?> dataPoint : suffStats) { summedStats.add(dataPoint); } return summedStats; } }
473
21.571429
90
java
multeval
multeval-master/src/multeval/util/Triple.java
package multeval.util; public class Triple<S1, S2, S3> { public S1 first; public S2 second; public S3 third; public Triple(S1 first, S2 second, S3 third) { this.first = first; this.second = second; this.third = third; } @Override public int hashCode() { return first.hashCode() ^ second.hashCode() ^ third.hashCode(); } @Override public boolean equals(Object obj) { if(obj instanceof Triple) { Triple<?, ?, ?> t = (Triple<?, ?, ?>) obj; return t.first.equals(first) && t.second.equals(second) && t.third.equals(third); } else { return false; } } }
589
19.344828
84
java
multeval
multeval-master/src/multeval/util/Tuple4.java
package multeval.util; public class Tuple4<S1, S2, S3, S4> { public S1 first; public S2 second; public S3 third; public S4 fourth; public Tuple4(S1 first, S2 second, S3 third, S4 fourth) { this.first = first; this.second = second; this.third = third; this.fourth = fourth; } @Override public int hashCode() { return first.hashCode() ^ second.hashCode() ^ third.hashCode() ^ fourth.hashCode(); } @Override public boolean equals(Object obj) { if(obj instanceof Tuple4) { Tuple4<?, ?, ?, ?> t = (Tuple4<?, ?, ?, ?>) obj; return t.first.equals(first) && t.second.equals(second) && t.third.equals(third) && t.fourth.equals(fourth); } else { return false; } } }
700
21.612903
111
java
tsml-java
tsml-java-master/src/main/java/evaluation/EstimatorResultsAnalysis.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation; import ResultsProcessing.MatlabController; import evaluation.storage.EstimatorResults; import evaluation.storage.EstimatorResultsCollection; import fileIO.OutFile; import jxl.Workbook; import jxl.WorkbookSettings; import jxl.write.WritableCellFormat; import jxl.write.WritableFont; import jxl.write.WritableSheet; import jxl.write.WritableWorkbook; import statistics.tests.OneSampleTests; import statistics.tests.TwoSampleTests; import utilities.GenericTools; import utilities.InstanceTools; import utilities.StatisticalUtilities; import utilities.generic_storage.Pair; import weka.clusterers.XMeans; import weka.core.Instances; import java.io.File; import java.io.FileNotFoundException; import java.util.*; import java.util.function.Function; /** * * This is a monster of a class, with some bad code and not enough documentation. It's improving over time however. If there are any questions about it, best bet would be to email me (see below). This class is given a much better front end/'api' in MultipleEstimatorEvaluation.java. Users should almost always use that class for their comparative summaries of different estimators. The two functions from this class in particular a user would actually use in their code might be: performFullEvaluation(...) and performTestAccEvalOnly(...), the former of which is the the function wrapped by MultipleEstimatorEvaluation Basically, this is a collection of static functions to analyse/handle COMPLETED (i.e no folds missing out of those expected of the specified estimatorXdatasetXfoldXsplit set) sets of results in EstimatorResults format For some reason, the excel workbook writer library i found/used makes xls files (instead of xlsx) and doesn't support recent excel default fonts. Just open it and saveas xlsx if you want to Future work when wanted/needed would be to handle incomplete results (e.g random folds missing), more matlab figures over time, and a MASSIVE refactor to remove the crap code * * @author James Large james.large@uea.ac.uk */ public class EstimatorResultsAnalysis { //actual parameters public static String expRootDirectory; public static boolean buildMatlabDiagrams = false; public static boolean testResultsOnly = false; //declares the type of results being processed i.e. classification or clustering. Used to include/exclude certain // metric like estimate timings for classification. public static EstimatorResultsCollection.ResultsType resultsType = EstimatorResultsCollection.ResultsType.CLASSIFICATION; public static PerformanceMetric trainTimeMetric = PerformanceMetric.buildTime; public static PerformanceMetric benchmarkedTrainTimeMetric = PerformanceMetric.buildTimeBenchmarked; public static PerformanceMetric testTimeMetric = PerformanceMetric.totalTestTime; public static PerformanceMetric benchmarkedTestTimeMetric = PerformanceMetric.totalTestTimeBenchmarked; // public static PerformanceMetric testTimeMetric = PerformanceMetric.avgTestPredTime; // public static PerformanceMetric benchmarkedTestTimeMetric = PerformanceMetric.avgTestPredTimeBenchmarked; // public static PerformanceMetric estimateTimeMetric = PerformanceMetric.additionalTimeForEstimate; // public static PerformanceMetric benchmarkedEstimateTimeMetric = PerformanceMetric.additionalTimeForEstimateBenchmarked; // public static PerformanceMetric estimateTimeMetric = PerformanceMetric.fromScratchEstimateTime; // public static PerformanceMetric benchmarkedEstimateTimeMetric = PerformanceMetric.fromScratchEstimateTimeBenchmarked; public static PerformanceMetric memoryMaxMetric = PerformanceMetric.memory; public static List<PerformanceMetric> allComputationalMetrics = Arrays.asList(trainTimeMetric, benchmarkedTrainTimeMetric, testTimeMetric, benchmarkedTestTimeMetric, memoryMaxMetric, PerformanceMetric.totalBuildPlusEstimateTime, PerformanceMetric.totalBuildPlusEstimateTimeBenchmarked, PerformanceMetric.extraTimeForEstimate, PerformanceMetric.extraTimeForEstimateBenchmarked, PerformanceMetric.extraTimeForEstimate, PerformanceMetric.extraTimeForEstimateBenchmarked); //final id's and path suffixes protected static final String matlabFilePath = "src/main/matlab/"; protected static final String pairwiseScatterDiaPath = "dias_PairwiseScatter/"; protected static final String cdDiaFolderName = "dias_CriticalDifference/"; protected static final String pairwiseCDDiaDirName = "pairwise/"; protected static final String friedmanCDDiaDirName = "friedman/"; //todo being used for both raw and benchmarked 2019/10/21 //eval_timings... editing folder name before own ana, then resetting. fix. protected static String computationalDiaFolderName = "dias_Timings"; protected static String computationalDiaFolderName_raw = "dias_ComputationalResourcesRAW"; protected static String computationalDiaFolderName_benchmark = "dias_ComputationalResourcesBENCHMARKED"; public static final double FRIEDMANCDDIA_PVAL = 0.05; public static final String testLabel = "TEST"; public static final String trainLabel = "TRAIN"; public static final String trainTestDiffLabel = "TRAINTESTDIFFS"; public static final String estimateLabel = "ESTIMATE"; public static final String clusterGroupingIdentifier = "PostHocXmeansClustering"; public static class EstimatorEvaluation { public String estimatorName; public EstimatorResults[][] testResults; //[dataset][fold] public EstimatorResults[][] trainResults; //[dataset][fold] public EstimatorEvaluation(String name, EstimatorResults[][] testResults, EstimatorResults[][] trainResults) { this.estimatorName = name; this.testResults = testResults; this.trainResults = trainResults; } } /** * THIS IS THE METHOD YOU'D ACTUALLY USE, the public 'actually do stuff' method * * @param outPath a single directory, called expName, will be made in this location, containing the analysis * @param expname this will be the name of the parent folder that is made and will appear on a number of files * @param metrics a list of PerformanceMetrics that effectively are able to summarise a EstimatorResults * object into a single double, the prediction set's score. e.g. accuracy for these predictions * These metrics will also have indications of how comparisons of this metric should be calculated and represented * @param results a EstimatorResultsCollection containing test (and optionally train) results of 1/more estimators on 1/more datasets over 1/more resamples. * Estimator and dataset names are retrieved from this object * @param dsetGroupings Optional, a map { grouping name, groupings } of maps { group name, datasets in groups } that describe different subsets of * the data within which to repeat the analysis, e.g one group might be 2class datasets vs multiclass datasets. The analysis would * aid in seeing if one estimator has a competitive advantage over the others within different data characteristics/groupings */ public static void performFullEvaluation( String outPath, String expname, List<PerformanceMetric> metrics, EstimatorResultsCollection results, Map<String, Map<String, String[]>> dsetGroupings) { //hacky housekeeping MultipleEstimatorsPairwiseTest.beQuiet = true; OneSampleTests.beQuiet = true; outPath = outPath.replace("\\", "/"); if (!outPath.endsWith("/")) outPath+="/"; outPath += expname + "/"; new File(outPath).mkdirs(); expRootDirectory = outPath; OutFile bigSummary = new OutFile(outPath + expname + "_BIGglobalSummary.csv"); OutFile smallSummary = new OutFile(outPath + expname + "_SMALLglobalSummary.csv"); //this will collect the clique arrays for each metric as found by pairwise stats, //so that they can later be passed to the cd dia maker ArrayList<String> statCliquesForCDDias = new ArrayList<>(); // START USER DEFINED STATS for (PerformanceMetric metric : metrics) { String[] summary = null; try { summary = eval_metric(outPath, expname, results, metric, dsetGroupings); } catch (Exception fnf) { System.out.println("Something went wrong while writing " + metric + "files, likely later stages of analysis could " + "not find files that should have been made " + "internally in earlier stages of the pipeline, FATAL"); fnf.printStackTrace(); System.exit(0); } bigSummary.writeString(metric.name+":"); bigSummary.writeLine(summary[0]); smallSummary.writeString(metric.name+":"); smallSummary.writeLine(summary[1]); if (summary[2] != null) statCliquesForCDDias.add(summary[2]); } // END USER DEFINED STATS // START TIMINGS //timings will attempt to always be summarised if they are present, so handle them here as a special case //and add them onto the list of metrics ArrayList<String[]> compResourceSummaries = new ArrayList<>(); try { String[][] compResourcesSummaryRaw = eval_CompResourcesRAW(outPath, expname, results, null); //dont bother with groupings for timings compResourceSummaries.addAll(Arrays.asList(compResourcesSummaryRaw)); } catch (FileNotFoundException fnf) { System.out.println("Something went wrong while writing RAW timing files, likely " + "later stages of analysis could not find files that should have been made" + "internally in earlier stages of the pipeline, FATAL"); fnf.printStackTrace(); System.exit(0); } catch (Exception ex) { ex.printStackTrace(); System.out.println("Something went wrong while writing RAW timing files. But NOT " + "a filenotfound error. Either timings werent found, some NaN errors occurred," + " etc. Todo look into cases of this as they crop up.\n" + "CONTINUING THE ANALYSIS FOR NOW, but ignoring the RAW timings"); } // TODO proper support for benchmarked timings, link up to the diagram creation code, global summary files, etc // currently standalone try { String[][] compResourcesSummaryBenchmarked = eval_CompResourcesBENCHMARKED(outPath, expname, results, null); //dont bother with groupings for timings compResourceSummaries.addAll(Arrays.asList(compResourcesSummaryBenchmarked)); } catch (FileNotFoundException fnf) { System.out.println("Something went wrong while writing BENCHMARKED timing files, likely " + "later stages of analysis could not find files that should have been made" + "internally in earlier stages of the pipeline, FATAL"); fnf.printStackTrace(); System.exit(0); } catch (Exception ex) { ex.printStackTrace(); System.out.println("Something went wrong while writing BENCHMARKED timing files. But NOT " + "a filenotfound error. Either timings werent found, some NaN errors occurred," + " etc. Todo look into cases of this as they crop up.\n" + "CONTINUING THE ANALYSIS FOR NOW, but ignoring the BENCHMARKED timings"); } //end benchmarked //TODO clean all of this timing stuff up, it's jsut another layer of hacky nonsense. //just need a CLEAN break of 'does everything have timings? do all the timing analysis. //does ANYTHING not have timings? do NONE of the timing analysis //using the presence of summaries for train and test timings as an indicator that they are present List<PerformanceMetric> compMetrics = new ArrayList<>(); if (compResourceSummaries != null) { compMetrics.add(PerformanceMetric.buildTime); compMetrics.add(testTimeMetric); compMetrics.add(PerformanceMetric.totalBuildPlusEstimateTime); compMetrics.add(PerformanceMetric.extraTimeForEstimate); compMetrics.add(memoryMaxMetric); compMetrics.add(PerformanceMetric.buildTimeBenchmarked); compMetrics.add(benchmarkedTestTimeMetric); compMetrics.add(PerformanceMetric.totalBuildPlusEstimateTimeBenchmarked); compMetrics.add(PerformanceMetric.extraTimeForEstimateBenchmarked); for (int j = compResourceSummaries.size()-1; j >= 0; j--) { String label = compMetrics.get(j).name; if (compResourceSummaries.get(j) != null) { //present, so add on automatically to the list of metrics for passing around to spreadsheet/image makers etc metrics.add(compMetrics.get(j)); bigSummary.writeString(label + ":"); bigSummary.writeLine(compResourceSummaries.get(j)[0]); smallSummary.writeString(label + ":"); smallSummary.writeLine(compResourceSummaries.get(j)[1]); statCliquesForCDDias.add(compResourceSummaries.get(j)[2]); } else { //not present, ignore, and remove from list of time-specific metrics //to be passed to the timing dia creator compMetrics.remove(j); bigSummary.writeString(label + ": MISSING\n\n"); smallSummary.writeString(label + ": MISSING\n\n"); } } } //END TIMINGS bigSummary.closeFile(); smallSummary.closeFile(); jxl_buildResultsSpreadsheet(outPath, expname, metrics); String[] statCliquesForCDDiasArr = statCliquesForCDDias.toArray(new String[] { }); if(buildMatlabDiagrams) { MatlabController proxy = MatlabController.getInstance(); proxy.eval("addpath(genpath('"+matlabFilePath+"'))"); matlab_buildCompResourcesDias(compMetrics); matlab_buildCDDias(expname, statCliquesForCDDiasArr); matlab_buildPairwiseScatterDiagrams(outPath, expname, metrics, results.getDatasetNamesInOutput()); } } /** * Essentially just a wrapper for what eval_metricOnSplit does, in the simple case that we just have a 3d array of test accs and want summaries for it Mostly for legacy classification results not in the classifier results file format */ public static void performTestAccEvalOnly(String outPath, String filename, double[][][] testFolds, String[] cnames, String[] dsets, Map<String, Map<String, String[]>> dsetGroupings) throws FileNotFoundException { eval_metricOnSplit(outPath, filename, null, testLabel, PerformanceMetric.acc, testFolds, cnames, dsets, dsetGroupings); } protected static void writeTableFile_EstimatorDataset(String filename, String tableName, double[][] scores, String[] cnames, String[] dsets) { OutFile out=new OutFile(filename); out.writeLine(tableName + fileHelper_tabulate(scores, cnames, dsets)); out.closeFile(); } protected static void writeRawTableFile_EstimatorDataset(String filename, double[][] scores, String[] cnames) { OutFile out=new OutFile(filename); out.writeLine(fileHelper_tabulateRaw(scores, cnames)); out.closeFile(); } protected static void writeTableFile_EstimatorDatasetFolds(String filename, String tableName, double[][][] scores, String[] cnames, String[] dsets) { OutFile out=new OutFile(filename); out.writeLine(tableName + fileHelper_tabulate(scores, cnames, dsets)); out.closeFile(); } protected static void writeRawTableFile_EstimatorDatasetFolds(String filename, double[][][] scores, String[] cnames) { OutFile out=new OutFile(filename); out.writeLine(fileHelper_tabulateRaw(scores, cnames)); out.closeFile(); } /** * also writes separate win/draw/loss files now */ protected static String[] eval_metricOnSplitStatsFile(String outPath, String evalSet, PerformanceMetric metric, double[][][] statPerFold, double[][] statPerDset, double[][] ranks, double[][] stddevsFoldAccs, String[] cnames, String[] dsets) { String splitMetricLabel = evalSet + metric.toString(); StringBuilder shortSummaryStats = new StringBuilder(); shortSummaryStats.append(fileHelper_header(cnames)).append("\n"); shortSummaryStats.append("Avg"+splitMetricLabel+":").append(util_mean(statPerDset)).append("\n"); shortSummaryStats.append("Avg"+splitMetricLabel+"_RANK:").append(util_mean(ranks)).append("\n"); StringBuilder longSummaryStats = new StringBuilder(); longSummaryStats.append(splitMetricLabel).append(fileHelper_header(cnames)).append("\n"); longSummaryStats.append("Avg"+splitMetricLabel+"OverDsets:").append(util_mean(statPerDset)).append("\n"); longSummaryStats.append("Avg"+splitMetricLabel+"RankOverDsets:").append(util_mean(ranks)).append("\n"); longSummaryStats.append("StddevOf"+splitMetricLabel+"OverDsets:").append(util_stddev(statPerDset)).append("\n"); longSummaryStats.append("AvgOfStddevsOf"+splitMetricLabel+"OverDsetFolds:").append(util_mean(stddevsFoldAccs)).append("\n"); longSummaryStats.append("StddevsOf"+splitMetricLabel+"RanksOverDsets:").append(util_stddev(ranks)).append("\n"); String[] wdl = eval_winsDrawsLosses(statPerDset, cnames, dsets); String[] sig01wdl = eval_sigWinsDrawsLosses(0.01, statPerDset, statPerFold, cnames, dsets); String[] sig05wdl = eval_sigWinsDrawsLosses(0.05, statPerDset, statPerFold, cnames, dsets); String wdlDir = outPath+"/WinsDrawsLosses/"; (new File(wdlDir)).mkdir(); OutFile outwdl = null; outwdl = new OutFile(wdlDir + splitMetricLabel + "WinDrawLoss_LIST.csv"); outwdl.writeLine(wdl[1]); outwdl.closeFile(); outwdl = new OutFile(wdlDir + splitMetricLabel + "WinDrawLoss_LIST_Sig01.csv"); outwdl.writeLine(sig01wdl[1]); outwdl.closeFile(); outwdl = new OutFile(wdlDir + splitMetricLabel + "WinDrawLoss_LIST_Sig05.csv"); outwdl.writeLine(sig05wdl[1]); outwdl.closeFile(); outwdl = new OutFile(wdlDir + splitMetricLabel + "WinDrawLoss_TABLE.csv"); outwdl.writeLine(wdl[2]); outwdl.closeFile(); outwdl = new OutFile(wdlDir + splitMetricLabel + "WinDrawLoss_TABLE_Sig01.csv"); outwdl.writeLine(sig01wdl[2]); outwdl.closeFile(); outwdl = new OutFile(wdlDir + splitMetricLabel + "WinDrawLoss_TABLE_Sig05.csv"); outwdl.writeLine(sig05wdl[2]); outwdl.closeFile(); String summaryFname = outPath + fileNameBuild_summaryFile(evalSet,metric); OutFile out=new OutFile(summaryFname); out.writeLine(longSummaryStats.toString()); out.writeLine(wdl[0]); out.writeLine("\n"); out.writeLine(sig01wdl[0]); out.writeLine("\n"); out.writeLine(sig05wdl[0]); out.writeLine("\n"); String cliques = ""; String avgsFile = outPath + fileNameBuild_avgsFile(evalSet, metric); try { out.writeLine(MultipleEstimatorsPairwiseTest.runTests(avgsFile).toString()); // out.writeLine(MultipleEstimatorsPairwiseTest.runTests(outPath+filename+"_"+splitMetricLabal+".csv").toString()); cliques = MultipleEstimatorsPairwiseTest.printCliques(); out.writeLine("\n\n" + cliques); } catch (Exception e) { System.err.println("\n\n"); System.err.println("*****".replace("*", "*****")); System.err.println("MultipleEstimatorsPairwiseTest.runTests() failed. Almost certainly this is because there were" + "too many ties/duplicates within one of the pairwise tests and then an index out of bounds error was thrown. " + "This will be fixed at some point. The analysis will CARRY ON, and everything that is successfully printed out " + "IS CORRECT, however whatever particular table that test would have been summarised as is missing from your files."); System.err.println("avgs filename = "+avgsFile); e.printStackTrace(); System.err.println("*****".replace("*", "*****")); System.err.println("\n\n"); } out.closeFile(); return new String[] { longSummaryStats.toString(), shortSummaryStats.toString(), cliques }; } protected static String fileNameBuild_cd(String filename, String statistic) { return "cd_"+filename+"_"+statistic+"S"; } protected static String fileNameBuild_pws(String filename, String statistic) { return "pws_"+filename+"_"+statistic+"S"; } protected static String fileNameBuild_pwsInd(String c1, String c2, String statistic) { return "pws_"+c1+"VS"+c2+"_"+statistic+"S"; } protected static String fileNameBuild_avgsFile(String evalSet, PerformanceMetric metric) { return evalSet+metric+"_"+(metric.takeMean?"MEANS":"MEDIANS")+".csv"; } protected static String fileNameBuild_ranksFile(String evalSet, PerformanceMetric metric) { return evalSet+metric+"_RANKS.csv"; } protected static String fileNameBuild_stddevFile(String evalSet, PerformanceMetric metric) { return evalSet+metric+"_STDDEV.csv"; } protected static String fileNameBuild_rawAvgsFile(String evalSet, PerformanceMetric metric) { return evalSet+metric+"_RAW.csv"; } protected static String fileNameBuild_summaryFile(String evalSet, PerformanceMetric metric) { return evalSet+metric+"_SUMMARY.csv"; } protected static String fileNameBuild_wdlFile(String evalSet, PerformanceMetric metric) { return evalSet+metric+"_SUMMARY.csv"; } protected static String[] eval_metricOnSplit(String outPath, String filename, String groupingName, String evalSet, PerformanceMetric metric, double[][][] foldVals, String[] cnames, String[] dsets, Map<String, Map<String, String[]>> dsetGroupings) throws FileNotFoundException { outPath += evalSet + "/"; if (groupingName != null && !groupingName.equals("")) outPath += groupingName + "/"; //BEFORE ordering, write the individual folds files eval_perFoldFiles(outPath+evalSet+"FOLD"+metric+"S/", foldVals, cnames, dsets, evalSet); double[][] dsetVals = findAvgsOverFolds(foldVals, metric.takeMean); double[][] stddevsFoldVals = findStddevsOverFolds(foldVals); double[][] ranks = findRanks(dsetVals, metric.maximise); int[] ordering = findOrdering(ranks); //ordering is now an array of value referring to the rank-order of the element at each index //e.g [1, 4, 2, 3] means that the first (in index 0) estimator is best, third is next, then fourth, then second //now order all the info (essentially in parallel arrays) we've collected by the estimator's ranks //such that e.g the data referring to the first estimator is still in index 0, the data referring to //the second estimator is moved to index 1, etc ranks = util_order(ranks, ordering); cnames = util_order(cnames, ordering); foldVals = util_order(foldVals, ordering); dsetVals = util_order(dsetVals, ordering); stddevsFoldVals = util_order(stddevsFoldVals, ordering); if (evalSet.equalsIgnoreCase("TEST") || allComputationalMetrics.contains(metric)) { //qol for cd dia creation, make a copy of all the raw test stat files in a common folder, one for pairwise, one for freidman String cdFolder = expRootDirectory + cdDiaFolderName; (new File(cdFolder)).mkdirs(); OutFile out = new OutFile(cdFolder+"readme.txt"); out.writeLine("remember that nlls are auto-negated now for cd dia ordering\n"); out.writeLine("and that basic notepad wont show the line breaks properly, view (cliques especially) in notepad++"); out.closeFile(); for (String subFolder : new String[] { pairwiseCDDiaDirName, friedmanCDDiaDirName }) { (new File(cdFolder+subFolder+"/")).mkdirs(); String cdName = cdFolder+subFolder+"/"+fileNameBuild_cd(filename,metric.name)+".csv"; //meta hack for qol, negate the nll (sigh...) for correct ordering on dia //ALSO now negating the timings, smaller = better if (!metric.maximise) { double[][] negatedDsetVals = new double[dsetVals.length][dsetVals[0].length]; for (int i = 0; i < dsetVals.length; i++) { for (int j = 0; j < dsetVals[i].length; j++) { negatedDsetVals[i][j] = dsetVals[i][j] * -1; } } writeRawTableFile_EstimatorDataset(cdName, negatedDsetVals, cnames); } else { writeRawTableFile_EstimatorDataset(cdName, dsetVals, cnames); } } //end cd dia qol //qol for pairwisescatter dia creation, make a copy of the test stat files String pwsFolder = expRootDirectory + pairwiseScatterDiaPath; (new File(pwsFolder)).mkdirs(); String pwsName = pwsFolder+fileNameBuild_pws(filename,metric.name)+".csv"; writeRawTableFile_EstimatorDataset(pwsName, dsetVals, cnames); //end pairwisescatter qol //qol for timing dia creation, make a copy of the avgs files with headers if (allComputationalMetrics.contains(metric)) { String compDir = expRootDirectory+ computationalDiaFolderName + "/"; (new File(compDir)).mkdirs(); String fname = compDir+fileNameBuild_avgsFile(evalSet,metric); writeTableFile_EstimatorDataset(fname, evalSet+metric, dsetVals, cnames, dsets); } //end timing dia qol } writeTableFile_EstimatorDataset(outPath + fileNameBuild_ranksFile(evalSet, metric), evalSet+metric+"RANKS", ranks, cnames, dsets); writeTableFile_EstimatorDataset(outPath + fileNameBuild_avgsFile(evalSet, metric), evalSet+metric, dsetVals, cnames, dsets); writeRawTableFile_EstimatorDataset(outPath + fileNameBuild_rawAvgsFile(evalSet, metric), dsetVals, cnames); //for matlab stuff writeTableFile_EstimatorDataset(outPath + fileNameBuild_stddevFile(evalSet, metric), evalSet+metric+"STDDEVS", stddevsFoldVals, cnames, dsets); String[] groupingSummary = { "" }; if (dsetGroupings != null && dsetGroupings.size() != 0) groupingSummary = eval_metricDsetGroups(outPath, filename, evalSet, metric, foldVals, cnames, dsets, dsetGroupings); String[] summaryStrings = {}; summaryStrings = eval_metricOnSplitStatsFile(outPath, evalSet, metric, foldVals, dsetVals, ranks, stddevsFoldVals, cnames, dsets); //write these even if not actually making the dias this execution, might manually make them later writeCliqueHelperFiles(expRootDirectory + cdDiaFolderName + pairwiseCDDiaDirName, filename, metric, summaryStrings[2]); //this really needs cleaning up at some point... jsut make it a list and stop circlejerking to arrays String[] summaryStrings2 = new String[summaryStrings.length+groupingSummary.length]; int i = 0; for ( ; i < summaryStrings.length; i++) summaryStrings2[i] = summaryStrings[i]; for (int j = 0; j < groupingSummary.length; j++) summaryStrings2[i] = groupingSummary[j]; return summaryStrings2; } public static String[] eval_metricDsetGroups(String outPathBase, String filename, String evalSet, PerformanceMetric metric, double[][][] foldVals, String[] cnames, String[] dsets, Map<String, Map<String, String[]>> dsetGroupings) throws FileNotFoundException { String outPath = expRootDirectory + "DatasetGroupings/"; // String outPath = outPathBase + "DatasetGroupings/"; (new File(outPath)).mkdir(); //for each grouping method for (Map.Entry<String, Map<String, String[]>> dsetGroupingMethodEntry : dsetGroupings.entrySet()) { String groupingMethodName = dsetGroupingMethodEntry.getKey(); String groupingMethodPath = outPath + groupingMethodName + "/"; (new File(groupingMethodPath+metric.name+"/"+evalSet+"/")).mkdirs(); Map<String, String[]> dsetGroupingMethod = dsetGroupingMethodEntry.getValue(); if (groupingMethodName.equals(clusterGroupingIdentifier)) { //if clustering is to be done, build the groups now. //can't 'put' these groups back into the dsetGroupings map //since we'd be editing a map that we're currently iterating over //EDIT: actually, jsut move this process outside the for loop as //a preprocess step, if the need ever arises assert(dsetGroupingMethod == null); dsetGroupingMethod = new HashMap<>(); int[] assignments = dsetGroups_clusterDsetResults(StatisticalUtilities.averageFinalDimension(foldVals)); //puts numClusters as final element assert(assignments.length == dsets.length+1); int numClusters = assignments[dsets.length]; String[] clusterNames = new String[numClusters]; String[][] clusterDsets = new String[numClusters][]; //would generally prefer to jsut loop once over the assignments array, but that would //require we already know the size of each cluster and/or wankery with array lists for (int cluster = 0; cluster < numClusters; cluster++) { ArrayList<String> dsetAlist = new ArrayList<>(); for (int dset = 0; dset < dsets.length; dset++) if (assignments[dset] == cluster) dsetAlist.add(dsets[dset]); clusterNames[cluster] = "Cluster " + (cluster+1); clusterDsets[cluster] = dsetAlist.toArray(new String[] { }); dsetGroupingMethod.put(clusterNames[cluster], clusterDsets[cluster]); } //writing all the clusters to one file start here OutFile allDsetsOut = new OutFile(groupingMethodPath+metric.name+"/"+evalSet+"/" + "clusters.csv"); for (int cluster = 0; cluster < numClusters; cluster++) allDsetsOut.writeString(clusterNames[cluster] + ","); allDsetsOut.writeLine(""); //printing variable length 2d array in table form, columns = clusters, rows = dsets int dsetInd = 0; boolean allDone = false; while (!allDone) { allDone = true; for (int cluster = 0; cluster < numClusters; cluster++) { if (dsetInd < clusterDsets[cluster].length) { allDsetsOut.writeString(clusterDsets[cluster][dsetInd]); allDone = false; } allDsetsOut.writeString(","); } allDsetsOut.writeLine(""); dsetInd++; } allDsetsOut.closeFile(); //writing all the clusters to one file end here String clusterGroupsPath = groupingMethodPath+metric+"/"+evalSet+"/" + "DsetClustersTxtFiles/"; (new File(clusterGroupsPath)).mkdir(); //writing each individual clsuter file start here for (int cluster = 0; cluster < numClusters; cluster++) { OutFile clusterFile = new OutFile(clusterGroupsPath + clusterNames[cluster] + ".txt"); for (String dset : clusterDsets[cluster]) clusterFile.writeLine(dset); clusterFile.closeFile(); } } int numGroups = dsetGroupingMethod.size(); String[] groupNames = new String[numGroups]; //using maps for this because estimatornames could be in different ordering based on rankings //within each group. ordering of dataset groups temselves is constant though. jsut skips //annoying/bug inducing housekeeping of indices Map<String, double[]> groupWins = new HashMap<>(); //will reflect ties, e.g if 2 estimators tie for first rank, each will get 'half' a win Map<String, double[]> groupAccs = new HashMap<>(); for (int i = 0; i < cnames.length; i++) { groupWins.put(cnames[i], new double[numGroups]); groupAccs.put(cnames[i], new double[numGroups]); } //for each group in this grouping method StringBuilder [] groupSummaryStringBuilders = new StringBuilder[numGroups]; int groupIndex = 0; for (Map.Entry<String, String[]> dsetGroup : dsetGroupingMethod.entrySet()) { String groupName = dsetGroup.getKey(); groupNames[groupIndex] = groupName; // String groupPath = groupingMethodPath + groupName + "/"; // (new File(groupPath)).mkdir(); //perform group analysis String[] groupDsets = dsetGroup.getValue(); double[][][] groupFoldVals = dsetGroups_collectDsetVals(foldVals, dsets, groupDsets); String groupFileName = filename + "-" + groupName + "-"; // String[] groupSummaryFileStrings = eval_metricOnSplit(groupPath+statName+"/", groupFileName, groupName, evalSet, statName, groupFoldVals, cnames, groupDsets, null); String[] groupSummaryFileStrings = eval_metricOnSplit(groupingMethodPath+metric+"/", groupFileName, groupName, evalSet, metric, groupFoldVals, cnames, groupDsets, null); //collect the accuracies for the dataset group String[] estimatorNamesLine = groupSummaryFileStrings[1].split("\n")[0].split(","); assert(estimatorNamesLine.length-1 == cnames.length); String[] accLineParts = groupSummaryFileStrings[1].split("\n")[1].split(","); for (int i = 1; i < accLineParts.length; i++) { //i=1 => skip the row fileHelper_header double[] accs = groupAccs.get(estimatorNamesLine[i]); accs[groupIndex] = Double.parseDouble(accLineParts[i]); groupAccs.put(estimatorNamesLine[i], accs); } //collect the wins for the group Scanner ranksFileIn = new Scanner(new File(groupingMethodPath+metric+"/"+evalSet+"/"+groupName+"/"+groupFileName+"_"+evalSet+metric+"RANKS.csv")); estimatorNamesLine = ranksFileIn.nextLine().split(","); double[] winCounts = new double[estimatorNamesLine.length]; while (ranksFileIn.hasNextLine()) { //read the ranks on this dataset String[] ranksStr = ranksFileIn.nextLine().split(","); double[] ranks = new double[ranksStr.length]; ranks[0] = Double.MAX_VALUE; for (int i = 1; i < ranks.length; i++) ranks[i] = Double.parseDouble(ranksStr[i]); //there might be ties, so cant just look for the rank "1" List<Integer> minRanks = util_min(ranks); for (Integer minRank : minRanks) winCounts[minRank] += 1.0 / minRanks.size(); } ranksFileIn.close(); for (int i = 1; i < winCounts.length; i++) { double[] wins = groupWins.get(estimatorNamesLine[i]); wins[groupIndex] = winCounts[i]; groupWins.put(estimatorNamesLine[i], wins); } //build the summary string StringBuilder sb = new StringBuilder("Group: " +groupName + "\n"); sb.append(groupSummaryFileStrings[1]); //when will the hacks ever end? String cliques = groupSummaryFileStrings[2]; cliques = cliques.replace("cliques = [", "cliques=,").replace("]", ""); //remove spaces in 'title' before next step cliques = cliques.replace(" ", ",").replace("\n", "\n,"); //make vals comma separated, to line up in csv file sb.append("\n"+cliques); groupSummaryStringBuilders[groupIndex] = sb; groupIndex++; } String groupMethodSummaryFilename = groupingMethodPath + filename + "_" + groupingMethodName + "_" + evalSet + metric + ".csv"; dsetGroups_writeGroupingMethodSummaryFile(groupMethodSummaryFilename, groupSummaryStringBuilders, cnames, groupNames, groupWins, groupAccs); } return new String[] { }; } public static void dsetGroups_writeGroupingMethodSummaryFile(String filename, StringBuilder [] groupSummaryStringBuilders, String[] cnames, String[] groupNames, Map<String, double[]> groupWins, Map<String, double[]> groupAccs) { OutFile groupingMethodSummaryFile = new OutFile(filename); for (StringBuilder groupSummary : groupSummaryStringBuilders) { groupingMethodSummaryFile.writeLine(groupSummary.toString()); groupingMethodSummaryFile.writeLine("\n\n"); } groupingMethodSummaryFile.writeString(dsetGroups_buildAccsTableString(groupAccs, cnames, groupNames)); groupingMethodSummaryFile.writeLine("\n\n"); groupingMethodSummaryFile.writeString(dsetGroups_buildWinsTableString(groupWins, cnames, groupNames)); groupingMethodSummaryFile.closeFile(); } public static String dsetGroups_buildWinsTableString(Map<String, double[]> groupWins, String[] cnames, String[] groupNames) { int numGroups = groupNames.length; StringBuilder sb = new StringBuilder(); sb.append("This table accounts for ties on a dset e.g if 2 estimators share best accuracy " + "that will count as half a win for each").append("\n"); //header row sb.append("NumWinsInGroups:"); for (String cname : cnames) sb.append(","+cname); sb.append(",TotalNumDsetsInGroup").append("\n"); //calc the avgs too double[] groupSums = new double[numGroups], clsfrSums = new double[cnames.length]; for (int i = 0; i < numGroups; i++) { sb.append(groupNames[i]); for (int j = 0; j < cnames.length; j++) { double val = groupWins.get(cnames[j])[i]; groupSums[i] += val; clsfrSums[j] += val; sb.append(","+val); } sb.append(","+(groupSums[i])).append("\n"); } //print final row, avg of estimators double globalSum = 0; sb.append("TotalNumWinsForEstimator"); for (int j = 0; j < cnames.length; j++) { globalSum += clsfrSums[j]; sb.append(","+clsfrSums[j]); } sb.append(","+globalSum).append("\n"); return sb.toString(); } public static String dsetGroups_buildAccsTableString(Map<String, double[]> groupAccs, String[] cnames, String[] groupNames) { int numGroups = groupNames.length; StringBuilder sb = new StringBuilder(); //header row sb.append("AvgAccsOnGroups:"); for (String cname : cnames) sb.append(","+cname); sb.append(",Averages").append("\n"); //calc the avgs too double[] groupAvgs = new double[numGroups], clsfrAvgs = new double[cnames.length]; for (int i = 0; i < numGroups; i++) { sb.append(groupNames[i]); for (int j = 0; j < cnames.length; j++) { double val = groupAccs.get(cnames[j])[i]; groupAvgs[i] += val; clsfrAvgs[j] += val; sb.append(","+val); } sb.append(","+(groupAvgs[i]/cnames.length)).append("\n"); } //print final row, avg of estimators double globalAvg = 0; sb.append("Averages"); for (int j = 0; j < cnames.length; j++) { double avg = clsfrAvgs[j]/numGroups; globalAvg += avg; sb.append(","+avg); } globalAvg /= cnames.length; sb.append(","+globalAvg).append("\n"); return sb.toString(); } public static double[][][] dsetGroups_collectDsetVals(double[][][] foldVals, String[] dsets, String[] groupDsets) { //cloning arrays to avoid any potential referencing issues considering we're recursing + doing more stuff after all this grouping shite double[][][] groupFoldVals = new double[foldVals.length][groupDsets.length][foldVals[0][0].length]; for (int groupDsetInd = 0; groupDsetInd < groupDsets.length; ++groupDsetInd) { String dset = groupDsets[groupDsetInd]; int globalDsetInd = Arrays.asList(dsets).indexOf(dset); for (int estimator = 0; estimator < foldVals.length; estimator++) { for (int fold = 0; fold < foldVals[estimator][globalDsetInd].length; fold++) { groupFoldVals[estimator][groupDsetInd][fold] = foldVals[estimator][globalDsetInd][fold]; } } } return groupFoldVals; } protected static String[] eval_metric(String outPath, String filename, EstimatorResultsCollection results, PerformanceMetric metric, Map<String, Map<String, String[]>> dsetGroupings) throws Exception { String statName = metric.name; outPath += statName + "/"; new File(outPath).mkdirs(); String[] cnames = results.getEstimatorNamesInOutput(); String[] dsets = results.getDatasetNamesInOutput(); double[][][] testFolds = results.sliceSplit("test").retrieveDoubles(metric.getter)[0]; if (!testResultsOnly) { double[][][] trainFolds = results.sliceSplit("train").retrieveDoubles(metric.getter)[0]; double[][][] trainTestDiffsFolds = findTrainTestDiffs(trainFolds, testFolds); eval_metricOnSplit(outPath, filename, null, trainLabel, metric, trainFolds, cnames, dsets, dsetGroupings); eval_metricOnSplit(outPath, filename, null, trainTestDiffLabel, metric, trainTestDiffsFolds, cnames, dsets, dsetGroupings); } return eval_metricOnSplit(outPath, filename, null, testLabel, metric, testFolds, cnames, dsets, dsetGroupings); } protected static String[/*{train,test}*/][] eval_CompResourcesRAW(String outPath, String filename, EstimatorResultsCollection results, Map<String, Map<String, String[]>> dsetGroupings) throws Exception { String[] cnames = results.getEstimatorNamesInOutput(); String[] dsets = results.getDatasetNamesInOutput(); computationalDiaFolderName = computationalDiaFolderName_raw; PerformanceMetric trainTimeMetric = EstimatorResultsAnalysis.trainTimeMetric; String timingsOutPath = outPath + "TimingsRAW/"; //special case for timings new File(timingsOutPath).mkdirs(); // NOTE: getting train timings from test files intentionally ( train.. = ..sliceSplit("test")..), avoids check for whether we're actually loading in // train files in comparison set up. build times should be same in both trainFoldX and testFoldX file anyway double[][][] trainTimes = results.sliceSplit("test").retrieveDoubles(trainTimeMetric.getter)[0]; String[] trainResStr = null; if (trainTimes != null) trainResStr = eval_metricOnSplit(timingsOutPath, filename, null, trainLabel, trainTimeMetric, trainTimes, cnames, dsets, dsetGroupings); double[][][] testTimes = results.sliceSplit("test").retrieveDoubles(testTimeMetric.getter)[0]; String[] testResStr = null; if (testTimes != null) testResStr = eval_metricOnSplit(timingsOutPath, filename, null, testLabel, testTimeMetric, testTimes, cnames, dsets, dsetGroupings); String[] estimateResStr1 = null; String[] estimateResStr2 = null; if (Arrays.asList(results.getSplits()).contains("train") && resultsType != EstimatorResultsCollection.ResultsType.CLUSTERING) { double[][][] estimateTimes1 = results.sliceSplit("train").retrieveDoubles(PerformanceMetric.totalBuildPlusEstimateTime.getter)[0]; if (estimateTimes1 != null) estimateResStr1 = eval_metricOnSplit(timingsOutPath, filename, null, estimateLabel, PerformanceMetric.totalBuildPlusEstimateTime, estimateTimes1, cnames, dsets, dsetGroupings); double[][][] estimateTimes2 = results.sliceSplit("train").retrieveDoubles(PerformanceMetric.extraTimeForEstimate.getter)[0]; if (estimateTimes2 != null) estimateResStr2 = eval_metricOnSplit(timingsOutPath, filename, null, estimateLabel, PerformanceMetric.extraTimeForEstimate, estimateTimes2, cnames, dsets, dsetGroupings); } String memoryOutPath = outPath + "MaxMemory/"; //special case for timings new File(memoryOutPath).mkdirs(); // NOTE: same as before, just getting the memory from the test files double[][][] memoryMax = results.sliceSplit("test").retrieveDoubles(memoryMaxMetric.getter)[0]; String[] memoryResStr = null; if (memoryMax != null) memoryResStr = eval_metricOnSplit(memoryOutPath, filename, null, testLabel, memoryMaxMetric, memoryMax, cnames, dsets, dsetGroupings); return new String[][] { trainResStr, testResStr, estimateResStr1, estimateResStr2, memoryResStr }; // return new String[][] { trainResStr, testResStr, estimateResStr }; } protected static String[/*{train,test}*/][] eval_CompResourcesBENCHMARKED(String outPath, String filename, EstimatorResultsCollection results, Map<String, Map<String, String[]>> dsetGroupings) throws Exception { String[] cnames = results.getEstimatorNamesInOutput(); String[] dsets = results.getDatasetNamesInOutput(); computationalDiaFolderName = computationalDiaFolderName_benchmark; PerformanceMetric trainTimeMetric = benchmarkedTrainTimeMetric; outPath += "TimingsBENCHMARKED/"; //special case for timings new File(outPath).mkdirs(); // NOTE: getting train timings from test files intentionally ( train.. = ..sliceSplit("test")..), avoids check for whether we're actually loading in // train files in comparison set up. build times should be same in both trainFoldX and testFoldX file anyway double[][][] benchmarkedTrainTimes = results.sliceSplit("test").retrieveDoubles(trainTimeMetric.getter)[0]; String[] trainResStr = null; if (benchmarkedTrainTimes != null) { trainResStr = eval_metricOnSplit(outPath, filename, null, trainLabel, trainTimeMetric, benchmarkedTrainTimes, cnames, dsets, dsetGroupings); writeTableFile_EstimatorDatasetFolds(outPath + "allTrainBenchmarkTimes.csv", "TrainBenchmarkTimes", benchmarkedTrainTimes, cnames, dsets); } double[][][] benchmarkedTestTimes = results.sliceSplit("test").retrieveDoubles(benchmarkedTestTimeMetric.getter)[0]; String[] testResStr = null; if (benchmarkedTestTimes != null) { testResStr = eval_metricOnSplit(outPath, filename, null, testLabel, benchmarkedTestTimeMetric, benchmarkedTestTimes, cnames, dsets, dsetGroupings); writeTableFile_EstimatorDatasetFolds(outPath + "allTestBenchmarkTimes.csv", "TestBenchmarkTimes", benchmarkedTestTimes, cnames, dsets); } String[] estimateResStr1 = null; String[] estimateResStr2 = null; if (Arrays.asList(results.getSplits()).contains("train") && resultsType != EstimatorResultsCollection.ResultsType.CLUSTERING) { double[][][] estimateTimes1 = results.sliceSplit("train").retrieveDoubles(PerformanceMetric.totalBuildPlusEstimateTimeBenchmarked.getter)[0]; if (estimateTimes1 != null) estimateResStr1 = eval_metricOnSplit(outPath, filename, null, estimateLabel, PerformanceMetric.totalBuildPlusEstimateTimeBenchmarked, estimateTimes1, cnames, dsets, dsetGroupings); double[][][] estimateTimes2 = results.sliceSplit("train").retrieveDoubles(PerformanceMetric.extraTimeForEstimateBenchmarked.getter)[0]; if (estimateTimes2 != null) estimateResStr2 = eval_metricOnSplit(outPath, filename, null, estimateLabel, PerformanceMetric.extraTimeForEstimateBenchmarked, estimateTimes2, cnames, dsets, dsetGroupings); } return new String[][] { trainResStr, testResStr, estimateResStr1, estimateResStr2 }; } protected static void writeCliqueHelperFiles(String cdCSVpath, String expname, PerformanceMetric metric, String cliques) { (new File(cdCSVpath)).mkdirs(); //temp workaround, just write the cliques and readin again from matlab for ease of checking/editing for pairwise edge cases OutFile out = new OutFile (cdCSVpath + fileNameBuild_cd(expname, metric.name) + "_cliques.txt"); out.writeString(cliques); out.closeFile(); } /** * this will build all the diagrams it can from the average results files that * exist in the cddia directory, instead of being given a list of stats that it should expect * to find there, carry over from when I made the diagrams manually. todo maybe now force it to take * a list of stats to expect as a form of error checking */ protected static void matlab_buildCDDias(String expname, String[] cliques) { MatlabController proxy = MatlabController.getInstance(); proxy.eval("buildDiasInDirectory('"+expRootDirectory+cdDiaFolderName+"/"+friedmanCDDiaDirName+"', 0, "+FRIEDMANCDDIA_PVAL+");"); //friedman proxy.eval("clear"); proxy.eval("buildDiasInDirectory('"+expRootDirectory+cdDiaFolderName+"/"+pairwiseCDDiaDirName+"', 1);"); //pairwise proxy.eval("clear"); } protected static void matlab_buildCompResourcesDias(List<PerformanceMetric> metrics) { MatlabController proxy = MatlabController.getInstance(); for (PerformanceMetric metric : metrics) { String diaFolder = expRootDirectory + "/" + (metric.name.toLowerCase().contains(PerformanceMetric.benchmarkSuffix.toLowerCase()) ? computationalDiaFolderName_benchmark : computationalDiaFolderName_raw) + "/"; String evalSet = metric.defaultSplit; String filenameNoExtension = fileNameBuild_avgsFile(evalSet, metric).replace(".csv", ""); String ylabel = metric.equals(memoryMaxMetric) ? "Max Memory (MB)" : "Time, " + evalSet.toLowerCase() + " (ms)"; proxy.eval("compResourcesLinePlot('" + diaFolder + filenameNoExtension + "', '" + evalSet.toLowerCase() + "','" + ylabel + "');"); } } protected static void eval_perFoldFiles(String outPath, double[][][] folds, String[] cnames, String[] dsets, String splitLabel) { new File(outPath).mkdirs(); StringBuilder headers = new StringBuilder("folds:"); for (int f = 0; f < folds[0][0].length; f++) headers.append(","+f); for (int c = 0; c < folds.length; c++) { OutFile out=new OutFile(outPath + cnames[c]+"_"+splitLabel+"FOLDS.csv"); out.writeLine(headers.toString()); for (int d = 0; d < folds[c].length; d++) { out.writeString(dsets[d]); for (int f = 0; f < folds[c][d].length; f++) out.writeString("," + folds[c][d][f]); out.writeLine(""); } out.closeFile(); } writeRawTableFile_EstimatorDatasetFolds(outPath + "TEXASPLOT_"+splitLabel+".csv", folds, cnames); } protected static String fileHelper_tabulate(double[][] res, String[] cnames, String[] dsets) { StringBuilder sb = new StringBuilder(); sb.append(fileHelper_header(cnames)); for (int dset = 0; dset < res[0].length; ++dset) { sb.append("\n").append(dsets[dset]); for (int estimator = 0; estimator < res.length; estimator++) sb.append("," + res[estimator][dset]); } return sb.toString(); } protected static String fileHelper_tabulateRaw(double[][] res, String[] cnames) { StringBuilder sb = new StringBuilder(); sb.append(fileHelper_header(cnames).substring(1)); for (int dset = 0; dset < res[0].length; ++dset) { sb.append("\n").append(res[0][dset]); for (int estimator = 1; estimator < res.length; estimator++) sb.append("," + res[estimator][dset]); } return sb.toString(); } protected static String fileHelper_tabulate(double[][][] res, String[] cnames, String[] dsets) { StringBuilder sb = new StringBuilder(); sb.append(fileHelper_header(cnames)); for (int dset = 0; dset < res[0].length; ++dset) { for (int fold = 0; fold < res[0][0].length; fold++) { sb.append("\n").append(dsets[dset]).append("_").append(fold); for (int estimator = 0; estimator < res.length; estimator++) sb.append("," + res[estimator][dset][fold]); } } return sb.toString(); } protected static String fileHelper_tabulateRaw(double[][][] res, String[] cnames) { StringBuilder sb = new StringBuilder(); sb.append(fileHelper_header(cnames).substring(1)); for (int dset = 0; dset < res[0].length; ++dset) { for (int fold = 0; fold < res[0][0].length; fold++) { sb.append("\n").append(res[0][dset][fold]); for (int estimator = 1; estimator < res.length; estimator++) sb.append("," + res[estimator][dset][fold]); } } return sb.toString(); } protected static String fileHelper_header(String[] names) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < names.length; i++) sb.append(",").append(names[i]); return sb.toString(); } protected static String util_mean(double[][] res) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < res.length; i++) sb.append(",").append(StatisticalUtilities.mean(res[i], false)); return sb.toString(); } protected static String util_stddev(double[][] res) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < res.length; i++) sb.append(",").append(StatisticalUtilities.standardDeviation(res[i], false, StatisticalUtilities.mean(res[i], false))); return sb.toString(); } // protected static double[][][] util_correctTimingsForBenchmarks(double[][][] timings, double[][][] benchmarks) { // for (int estimator = 0; estimator < timings.length; estimator++) // for (int dset = 0; dset < timings[0].length; dset++) // for (int fold = 0; fold < timings[0][0].length; fold++) // timings[estimator][dset][fold] /= benchmarks[estimator][dset][fold]; // // return timings; // } protected static double[][][] findTrainTestDiffs(double[][][] trainFoldAccs, double[][][] testFoldAccs) { double[][][] diffs = new double[trainFoldAccs.length][trainFoldAccs[0].length][trainFoldAccs[0][0].length]; for (int c = 0; c < diffs.length; c++) for (int d = 0; d < diffs[c].length; d++) for (int f = 0; f < diffs[c][d].length; f++) diffs[c][d][f] = trainFoldAccs[c][d][f] - testFoldAccs[c][d][f]; return diffs; } /** * todo maybe enum for mode etc * * @param takeMean if true, will average by taking mean, else will take median */ protected static double[][] findAvgsOverFolds(double[][][] foldaccs, boolean takeMean) { double[][] accs = new double[foldaccs.length][foldaccs[0].length]; for (int i = 0; i < accs.length; i++) for (int j = 0; j < accs[i].length; j++) if (takeMean) accs[i][j] = StatisticalUtilities.mean(foldaccs[i][j], false); else accs[i][j] = StatisticalUtilities.median(foldaccs[i][j]); return accs; } protected static double[][] findStddevsOverFolds(double[][][] foldaccs) { double[][] devs = new double[foldaccs.length][foldaccs[0].length]; for (int i = 0; i < devs.length; i++) for (int j = 0; j < devs[i].length; j++) devs[i][j] = StatisticalUtilities.standardDeviation(foldaccs[i][j], false, StatisticalUtilities.mean(foldaccs[i][j], false)); return devs; } protected static int[] findOrdering(double[][] r) { double[] avgranks = new double[r.length]; for (int i = 0; i < r.length; i++) avgranks[i] = StatisticalUtilities.mean(r[i], false); int[] res = new int[avgranks.length]; int i = 0; while (i < res.length) { ArrayList<Integer> mins = util_min(avgranks); for (int j = 0; j < mins.size(); j++) { res[mins.get(j)] = i++; avgranks[mins.get(j)] = Double.MAX_VALUE; } } return res; } protected static int[] findReverseOrdering(double[][] r) { double[] avgranks = new double[r.length]; for (int i = 0; i < r.length; i++) avgranks[i] = StatisticalUtilities.mean(r[i], false); int[] res = new int[avgranks.length]; int i = 0; while (i < res.length) { ArrayList<Integer> maxs = util_max(avgranks); for (int j = 0; j < maxs.size(); j++) { res[maxs.get(j)] = i++; avgranks[maxs.get(j)] = -Double.MAX_VALUE; } } return res; } protected static ArrayList<Integer> util_min(double[] d) { double min = d.length+1; ArrayList<Integer> minIndices = null; for (int c = 0; c < d.length; c++) { if(d[c] < min){ min = d[c]; minIndices = new ArrayList<>(); minIndices.add(c); }else if(d[c] == min){ minIndices.add(c); } } return minIndices; } protected static ArrayList<Integer> util_max(double[] d) { double max = -1; ArrayList<Integer> maxIndices = null; for (int c = 0; c < d.length; c++) { if(d[c] > max){ max = d[c]; maxIndices = new ArrayList<>(); maxIndices.add(c); }else if(d[c] == max){ maxIndices.add(c); } } return maxIndices; } protected static String[] util_order(String[] s, int[] ordering) { String[] res = new String[s.length]; for (int i = 0; i < ordering.length; i++) res[ordering[i]] = s[i]; return res; } protected static double[][] util_order(double[][] s, int[] ordering) { double[][] res = new double[s.length][]; for (int i = 0; i < ordering.length; i++) res[ordering[i]] = s[i]; return res; } protected static double[][][] util_order(double[][][] s, int[] ordering) { double[][][] res = new double[s.length][][]; for (int i = 0; i < ordering.length; i++) res[ordering[i]] = s[i]; return res; } /** * @param accs [classifiers][acc on datasets] * @param higherIsBetter if true, larger values will receive a better (i.e. lower) rank, false vice versa. e.g want to maximise acc, but want to minimise time * @return [classifiers][rank on dataset] */ protected static double[][] findRanks(double[][] accs, boolean higherIsBetter) { double[][] ranks = new double[accs.length][accs[0].length]; for (int d = 0; d < accs[0].length; d++) { Double[] orderedAccs = new Double[accs.length]; for (int c = 0; c < accs.length; c++) orderedAccs[c] = accs[c][d]; if (higherIsBetter) Arrays.sort(orderedAccs, Collections.reverseOrder()); else Arrays.sort(orderedAccs); // //README - REDACTED, this problem is currently just being ignored, since it makes so many headaches and is so insignificant anyway // //to create parity between this and the matlab critical difference diagram code, // //rounding the *accuracies used to calculate ranks* to 15 digits (after the decimal) // //this affects the average rank summary statistic, but not e.g the average accuracy statistic // //matlab has a max default precision of 16. in a tiny number of cases, there are differences // //in accuracy that are smaller than this maximum precision, which were being taken into // //acount here (by declaring one as havign a higher rank than the other), but not being // //taken into account in matlab (which considered them a tie). // //one could argue the importance of a difference less than 1x10^-15 when comparing estimators, // //so for ranks only, will round to matlab's precision. rounding the accuracies everywhere // //creates a number of headaches, therefore the tiny inconsistency as a result of this // //will jsut have to be lived with // final int DEFAULT_MATLAB_PRECISION = 15; // for (int c = 0; c < accs.length; c++) { // MathContext mc = new MathContext(DEFAULT_MATLAB_PRECISION, RoundingMode.DOWN); // BigDecimal bd = new BigDecimal(orderedAccs[c],mc); // orderedAccs[c] = bd.doubleValue(); // } for (int rank = 0; rank < accs.length; rank++) { for (int c = 0; c < accs.length; c++) { // if (orderedAccs[rank] == new BigDecimal(accs[c][d], new MathContext(DEFAULT_MATLAB_PRECISION, RoundingMode.DOWN)).doubleValue()) { if (orderedAccs[rank] == accs[c][d]) { ranks[c][d] = rank; //count from one } } } //correcting ties int[] hist = new int[accs.length]; for (int c = 0; c < accs.length; c++) ++hist[(int)ranks[c][d]]; for (int r = 0; r < hist.length; r++) { if (hist[r] > 1) {//ties double newRank = 0; for (int i = 0; i < hist[r]; i++) newRank += r-i; newRank/=hist[r]; for (int c = 0; c < ranks.length; c++) if (ranks[c][d] == r) ranks[c][d] = newRank; } } //correcting for index from 1 for (int c = 0; c < accs.length; c++) ++ranks[c][d]; } return ranks; } protected static String[] eval_winsDrawsLosses(double[][] accs, String[] cnames, String[] dsets) { StringBuilder table = new StringBuilder(); ArrayList<ArrayList<ArrayList<String>>> wdlList = new ArrayList<>(); //[estimatorPairing][win/draw/loss][dsetNames] ArrayList<String> wdlListNames = new ArrayList<>(); String[][] wdlPlusMinus = new String[cnames.length*cnames.length][dsets.length]; table.append("flat" + fileHelper_header(cnames)).append("\n"); int count = 0; for (int c1 = 0; c1 < accs.length; c1++) { table.append(cnames[c1]); for (int c2 = 0; c2 < accs.length; c2++) { wdlListNames.add(cnames[c1] + "_VS_" + cnames[c2]); wdlList.add(new ArrayList<>()); wdlList.get(count).add(new ArrayList<>()); wdlList.get(count).add(new ArrayList<>()); wdlList.get(count).add(new ArrayList<>()); int wins=0, draws=0, losses=0; for (int d = 0; d < dsets.length; d++) { if (accs[c1][d] > accs[c2][d]) { wins++; wdlList.get(count).get(0).add(dsets[d]); wdlPlusMinus[count][d] = "1"; } else if ((accs[c1][d] == accs[c2][d])) { draws++; wdlList.get(count).get(1).add(dsets[d]); wdlPlusMinus[count][d] = "0"; } else { losses++; wdlList.get(count).get(2).add(dsets[d]); wdlPlusMinus[count][d] = "-1"; } } table.append(","+wins+"|"+draws+"|"+losses); count++; } table.append("\n"); } StringBuilder list = new StringBuilder(); for (int i = 0; i < wdlListNames.size(); ++i) { list.append(wdlListNames.get(i)); list.append("\n"); list.append("Wins("+wdlList.get(i).get(0).size()+"):"); for (String dset : wdlList.get(i).get(0)) list.append(",").append(dset); list.append("\n"); list.append("Draws("+wdlList.get(i).get(1).size()+"):"); for (String dset : wdlList.get(i).get(1)) list.append(",").append(dset); list.append("\n"); list.append("Losses("+wdlList.get(i).get(2).size()+"):"); for (String dset : wdlList.get(i).get(2)) list.append(",").append(dset); list.append("\n\n"); } StringBuilder plusMinuses = new StringBuilder(); for (int j = 0; j < wdlPlusMinus.length; j++) plusMinuses.append(",").append(wdlListNames.get(j)); for (int i = 0; i < dsets.length; i++) { plusMinuses.append("\n").append(dsets[i]); for (int j = 0; j < wdlPlusMinus.length; j++) plusMinuses.append(",").append(wdlPlusMinus[j][i]); } return new String[] { table.toString(), list.toString(), plusMinuses.toString() }; } protected static String[] eval_sigWinsDrawsLosses(double pval, double[][] accs, double[][][] foldAccs, String[] cnames, String[] dsets) { StringBuilder table = new StringBuilder(); ArrayList<ArrayList<ArrayList<String>>> wdlList = new ArrayList<>(); //[estimatorPairing][win/draw/loss][dsetNames] ArrayList<String> wdlListNames = new ArrayList<>(); String[][] wdlPlusMinus = new String[cnames.length*cnames.length][dsets.length]; table.append("p=" + pval + fileHelper_header(cnames)).append("\n"); int count = 0; for (int c1 = 0; c1 < foldAccs.length; c1++) { table.append(cnames[c1]); for (int c2 = 0; c2 < foldAccs.length; c2++) { wdlListNames.add(cnames[c1] + "_VS_" + cnames[c2]); wdlList.add(new ArrayList<>()); wdlList.get(count).add(new ArrayList<>()); wdlList.get(count).add(new ArrayList<>()); wdlList.get(count).add(new ArrayList<>()); int wins=0, draws=0, losses=0; for (int d = 0; d < dsets.length; d++) { if (accs[c1][d] == accs[c2][d]) { //when the accuracies are identical, p == NaN. //because NaN < 0.05 apparently it wont be counted as a draw, but a loss //so handle it here draws++; wdlList.get(count).get(1).add(dsets[d]); wdlPlusMinus[count][d] = "0"; continue; } double p = TwoSampleTests.studentT_PValue(foldAccs[c1][d], foldAccs[c2][d]); if (p > pval) { draws++; wdlList.get(count).get(1).add(dsets[d]); wdlPlusMinus[count][d] = "0"; } else { //is sig if (accs[c1][d] > accs[c2][d]) { wins++; wdlList.get(count).get(0).add(dsets[d]); wdlPlusMinus[count][d] = "1"; } else { losses++; wdlList.get(count).get(2).add(dsets[d]); wdlPlusMinus[count][d] = "-1"; } } } table.append(","+wins+"|"+draws+"|"+losses); count++; } table.append("\n"); } StringBuilder list = new StringBuilder(); for (int i = 0; i < wdlListNames.size(); ++i) { list.append(wdlListNames.get(i)); list.append("\n"); list.append("Wins("+wdlList.get(i).get(0).size()+"):"); for (String dset : wdlList.get(i).get(0)) list.append(",").append(dset); list.append("\n"); list.append("Draws("+wdlList.get(i).get(1).size()+"):"); for (String dset : wdlList.get(i).get(1)) list.append(",").append(dset); list.append("\n"); list.append("Losses("+wdlList.get(i).get(2).size()+"):"); for (String dset : wdlList.get(i).get(2)) list.append(",").append(dset); list.append("\n\n"); } StringBuilder plusMinuses = new StringBuilder(); for (int j = 0; j < wdlPlusMinus.length; j++) plusMinuses.append(",").append(wdlListNames.get(j)); for (int i = 0; i < dsets.length; i++) { plusMinuses.append("\n").append(dsets[i]); for (int j = 0; j < wdlPlusMinus.length; j++) plusMinuses.append(",").append(wdlPlusMinus[j][i]); } return new String[] { table.toString(), list.toString(), plusMinuses.toString() }; } /** * Intended for potentially new stats that are introduced over time (at time of writing this function, * build and especially test times), where maybe some older files in the intended analysis * dont have the stat but newer ones do, or some estimators that write their own files * (via e.g TrainAccuracyEstimate) aren't properly writing them. * * Missing for timings is defined as -1. why cant i hold all this spaghetti? * * Looking ONLY at the test files, a) because they should all be here anyway else * wouldnt have got as far as needing to call this, b) because the 'testtime' stored * in the testfold files are the test timing we're generally actually interested in, * i.e. the total prediction time of the fully trained estimator on the test set, * as opposed to the test time of the estimator on (e.g) crossvalidation folds in training * that is stored in the train file * * @returns null if any of the wanted info is missing, else the score described by the stat for each results */ private static double[][][] getTimingsIfAllArePresent(List<EstimatorEvaluation> res, Function<EstimatorResults, Double> getter) { double[][][] info = new double[res.size()][res.get(0).testResults.length][res.get(0).testResults[0].length]; for (int i = 0; i < res.size(); i++) { for (int j = 0; j < res.get(i).testResults.length; j++) { for (int k = 0; k < res.get(i).testResults[j].length; k++) { info[i][j][k] = getter.apply(res.get(i).testResults[j][k]); if (info[i][j][k] == -1) return null; } } } return info; } protected static double[][][] getInfo(List<EstimatorEvaluation> res, Function<EstimatorResults, Double> getter, String trainortest) { double[][][] info = new double[res.size()][res.get(0).testResults.length][res.get(0).testResults[0].length]; for (int i = 0; i < res.size(); i++) { if (trainortest.equalsIgnoreCase(trainLabel)) for (int j = 0; j < res.get(i).trainResults.length; j++) for (int k = 0; k < res.get(i).trainResults[j].length; k++) info[i][j][k] = getter.apply(res.get(i).trainResults[j][k]); else if (trainortest.equalsIgnoreCase(testLabel)) for (int j = 0; j < res.get(i).testResults.length; j++) for (int k = 0; k < res.get(i).testResults[j].length; k++) info[i][j][k] = getter.apply(res.get(i).testResults[j][k]); else { System.out.println("getInfo(), trainortest="+trainortest); System.exit(0); } } return info; } protected static String[] getNames(List<EstimatorEvaluation> res) { String[] names = new String[res.size()]; for (int i = 0; i < res.size(); i++) names[i] = res.get(i).estimatorName; return names; } protected static void jxl_buildResultsSpreadsheet(String basePath, String expName, List<PerformanceMetric> metrics) { WritableWorkbook wb = null; WorkbookSettings wbs = new WorkbookSettings(); wbs.setLocale(new Locale("en", "EN")); try { wb = Workbook.createWorkbook(new File(basePath + expName + "ResultsSheet.xls"), wbs); } catch (Exception e) { System.out.println("ERROR CREATING RESULTS SPREADSHEET"); System.out.println(e); System.exit(0); } WritableSheet summarySheet = wb.createSheet("GlobalSummary", 0); String summaryCSV = basePath + expName + "_SMALLglobalSummary.csv"; jxl_copyCSVIntoSheet(summarySheet, summaryCSV); for (int i = 0; i < metrics.size(); i++) { if (PerformanceMetric.getAllTimingStatistics().contains(metrics.get(i))) { String benchmarkSuff = metrics.get(i).benchmarked ? "BENCHMARKED" : "RAW"; String splitLabel = metrics.get(i).defaultSplit; jxl_buildStatSheets_timings(wb, basePath, metrics.get(i), i, splitLabel, benchmarkSuff); } else { jxl_buildStatSheets(wb, basePath, metrics.get(i), i); } } try { wb.write(); wb.close(); } catch (Exception e) { System.out.println("ERROR WRITING AND CLOSING RESULTS SPREADSHEET"); System.out.println(e); System.exit(0); } } protected static void jxl_buildStatSheets(WritableWorkbook wb, String basePath, PerformanceMetric metric, int statIndex) { String metricPath = basePath + metric + "/"; String testMetricPath = metricPath + testLabel + "/"; WritableSheet testSheet = wb.createSheet(metric+"Test", wb.getNumberOfSheets()); String testCSV = testMetricPath+ fileNameBuild_avgsFile(testLabel, metric); jxl_copyCSVIntoSheet(testSheet, testCSV); WritableSheet summarySheet = wb.createSheet(metric+"TestSigDiffs", wb.getNumberOfSheets()); String summaryCSV = testMetricPath + fileNameBuild_summaryFile(testLabel, metric); jxl_copyCSVIntoSheet(summarySheet, summaryCSV); } protected static void jxl_buildStatSheets_timings(WritableWorkbook wb, String basePath, PerformanceMetric metric, int statIndex, String evalSet, String timingType) { // ************* the difference: timings folder assumed instead of going by the specific metric name //i.e Timings/TRAIN/TrainTimings and Timings/TEST/TestTimings //instead of TrainTimings/TRAIN/TrainTimings ... String metricPath = basePath + "Timings"+timingType+"/" + evalSet + "/"; WritableSheet avgsSheet = wb.createSheet(metric.name, wb.getNumberOfSheets()); String testCSV = metricPath + fileNameBuild_avgsFile(evalSet, metric); jxl_copyCSVIntoSheet(avgsSheet, testCSV); WritableSheet summarySheet = wb.createSheet(metric.name+"SigDiffs", wb.getNumberOfSheets()); String summaryCSV = metricPath + fileNameBuild_summaryFile(evalSet, metric); jxl_copyCSVIntoSheet(summarySheet, summaryCSV); } protected static void jxl_copyCSVIntoSheet(WritableSheet sheet, String csvFile) { try { Scanner fileIn = new Scanner(new File(csvFile)); int rowInd = 0; while (fileIn.hasNextLine()) { Scanner lineIn = new Scanner(fileIn.nextLine()); lineIn.useDelimiter(","); int colInd = -1; while (lineIn.hasNext()) { colInd++; //may not reach end of block, so incing first and initialising at -1 String cellContents = lineIn.next(); WritableFont font = new WritableFont(WritableFont.ARIAL, 10); WritableCellFormat format = new WritableCellFormat(font); try { int iCellContents = Integer.parseInt(cellContents); sheet.addCell(new jxl.write.Number(colInd, rowInd, iCellContents, format)); continue; //if successful, val was int, has been written, move on } catch (NumberFormatException nfm) { } try { double dCellContents = Double.parseDouble(cellContents); sheet.addCell(new jxl.write.Number(colInd, rowInd, dCellContents, format)); continue; //if successful, val was int, has been written, move on } catch (NumberFormatException nfm) { } sheet.addCell(new jxl.write.Label(colInd, rowInd, cellContents, format)); } rowInd++; } } catch (Exception e) { System.out.println("ERROR BUILDING RESULTS SPREADSHEET, COPYING CSV"); System.out.println(e); System.exit(0); } } public static Pair<String[], double[][]> matlab_readRawFile(String file, int numDsets) throws FileNotFoundException { ArrayList<String> cnames = new ArrayList<>(); Scanner in = new Scanner(new File(file)); Scanner linein = new Scanner(in.nextLine()); linein.useDelimiter(","); while (linein.hasNext()) cnames.add(linein.next()); double[][] vals = new double[cnames.size()][numDsets]; for (int d = 0; d < numDsets; d++) { linein = new Scanner(in.nextLine()); linein.useDelimiter(","); for (int c = 0; c < cnames.size(); c++) vals[c][d] = linein.nextDouble(); } return new Pair<>(cnames.toArray(new String[] { }), vals); } public static void matlab_buildPairwiseScatterDiagrams(String outPath, String expName, List<PerformanceMetric> metrics, String[] dsets) { outPath += pairwiseScatterDiaPath; for (PerformanceMetric metric : metrics) { try { boolean compStat = allComputationalMetrics.contains(metric); boolean originIsZero = !compStat; // if not a computational stat, probably in the range 0..1, keep that instead of min..max boolean drawFitLine = compStat; Pair<String[], double[][]> asd = matlab_readRawFile(outPath + fileNameBuild_pws(expName, metric.name) + ".csv", dsets.length); String[] estimatorNames = asd.var1; double[][] allResults = asd.var2; int numEstimator = allResults.length; MatlabController proxy = MatlabController.getInstance(); for (int c1 = 0; c1 < numEstimator-1; c1++) { for (int c2 = c1+1; c2 < numEstimator; c2++) { String c1name = estimatorNames[c1]; String c2name = estimatorNames[c2]; double[] c1res = allResults[c1]; double[] c2res = allResults[c2]; if (c1name.compareTo(c2name) > 0) { String t = c1name; c1name = c2name; c2name = t; double[] t2 = c1res; c1res = c2res; c2res = t2; } String pwFolderName = outPath + c1name + "vs" + c2name + "/"; (new File(pwFolderName)).mkdir(); StringBuilder sb = new StringBuilder("array = ["); for (int i = 0; i < dsets.length; i++) { sb.append(c1res[i] + "," + c2res[i] + ";"); } proxy.eval(sb.toString() + "];"); final StringBuilder concat = new StringBuilder(); concat.append("'"); concat.append(c1name.replaceAll("_", "\\\\_")); concat.append("',"); concat.append("'"); concat.append(c2name.replaceAll("_", "\\\\_")); concat.append("'"); proxy.eval("labels = {" + concat.toString() + "};"); // proxy.eval("'" + fileNameBuild_pwsInd(c1name, c2name, metric.name) + "'"); //just print the filename in the matlab window, for log of progress. no longer printing fig details proxy.eval("pairedscatter('" + pwFolderName + fileNameBuild_pwsInd(c1name, c2name, metric.name).replaceAll("\\.", "") + "',array(:,1),array(:,2),labels,'"+metric.name+"','"+metric.comparisonDescriptor+"',"+drawFitLine+","+originIsZero+");"); proxy.eval("clear"); } } } catch (Exception io) { System.out.println("buildPairwiseScatterDiagrams("+outPath+") failed loading " + metric.name + " file\n" + io); } } } public static int[] dsetGroups_clusterDsetResults(double[/*estimator*/][/*dataset*/] results) { double[/*dataset*/][/*estimator*/] dsetScores = GenericTools.cloneAndTranspose(results); int numDsets = dsetScores.length; for (int dset = 0; dset < dsetScores.length; dset++) { double dsetAvg = StatisticalUtilities.mean(dsetScores[dset], false); for (int clsfr = 0; clsfr < dsetScores[dset].length; clsfr++) dsetScores[dset][clsfr] -= dsetAvg; } Instances clusterData = InstanceTools.toWekaInstances(dsetScores); XMeans xmeans = new XMeans(); xmeans.setMaxNumClusters(Math.min((int)Math.sqrt(numDsets), 5)); xmeans.setSeed(0); try { xmeans.buildClusterer(new Instances(clusterData)); //pass copy, just in case xmeans does any kind of reordering of //instances. we want to maintain order of dsets/instances for indexing purposes } catch (Exception e) { System.out.println("Problem building clusterer for post hoc dataset groupings\n" + e); } int numClusters = xmeans.numberOfClusters(); int[] assignments = new int[numDsets+1]; assignments[numDsets] = numClusters; for (int i = 0; i < numDsets; i++) { try { assignments[i] = xmeans.clusterInstance(clusterData.instance(i)); } catch (Exception e) { System.out.println("Problem assigning clusters in post hoc dataset groupings, dataset " + i + "\n" + e); } } return assignments; } public static void main(String[] args) throws Exception { String[] settings=new String[6]; settings[0]="Z:/Data/UCIDelgado/"; settings[1]="Z:/Results_7_2_19/CAWPEReproducabiltyTests/CAWPEReproducabiltyTest23/Results/"; settings[2]="false"; settings[3]="a"; settings[4]="a"; settings[5]="1"; String[] datasets = { "flags","glass","haberman-survival","hayes-roth","heart-cleveland","heart-hungarian","heart-switzerland","heart-va","hepatitis","hill-valley","horse-colic","ilpd-indian-liver","image-segmentation","ionosphere","iris","led-display","lenses","letter","libras","low-res-spect","lung-cancer","lymphography","mammographic", "molec-biol-promoter","molec-biol-splice","monks-1","monks-2","monks-3","mushroom","musk-1","musk-2","nursery","oocytes_merluccius_nucleus_4d","oocytes_merluccius_states_2f","oocytes_trisopterus_nucleus_2f", "oocytes_trisopterus_states_5b","optical","ozone","page-blocks","parkinsons","pendigits","pima","pittsburg-bridges-MATERIAL","pittsburg-bridges-REL-L","pittsburg-bridges-SPAN","pittsburg-bridges-T-OR-D", "pittsburg-bridges-TYPE","planning","plant-margin","plant-shape","plant-texture","post-operative","primary-tumor","ringnorm","seeds","semeion","soybean","spambase","spect","spectf","statlog-australian-credit", "statlog-german-credit","statlog-heart","statlog-image","statlog-landsat","statlog-shuttle","statlog-vehicle","steel-plates","synthetic-control","teaching","thyroid","tic-tac-toe","titanic","trains","twonorm", "vertebral-column-2clases","vertebral-column-3clases","wall-following","waveform","waveform-noise","wine","wine-quality-red","wine-quality-white","yeast","zoo" }; String[] estimators = new String[]{ "NN", "C45", "Logistic", "SVML" }; // ClassifierExperiments.ExperimentalArguments expSettings = new ClassifierExperiments.ExperimentalArguments(settings); // setupAndRunMultipleExperimentsThreaded(expSettings, estimators,datasets,0,3); // new MultipleEstimatorEvaluation("Z:/Results_7_2_19/CAWPEReproducabiltyTests/CAWPEReproducabiltyTest23/Analysis", "timingsDiaTest", 3). setTestResultsOnly(true). // setTestResultsOnly(false). setBuildMatlabDiagrams(true). // setBuildMatlabDiagrams(false). setUseAccuracyOnly(). setDatasets(datasets). readInEstimators(estimators, estimators, "Z:/Results_7_2_19/CAWPEReproducabiltyTests/CAWPEReproducabiltyTest22/Results/"). runComparison(); } }
88,736
47.783397
348
java
tsml-java
tsml-java-master/src/main/java/evaluation/MultipleEstimatorEvaluation.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation; import ResultsProcessing.MatlabController; import evaluation.storage.EstimatorResultsCollection; import java.io.File; import java.io.FileNotFoundException; import java.io.FilenameFilter; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Scanner; import utilities.DebugPrinting; /** * This essentially just wraps EstimatorResultsAnalysis.performFullEvaluation(...) in a nicer to use way. Will be updated over time * * Builds summary stats, sig tests, and optionally matlab dias for the EstimatorResults objects provided/files pointed to on disk. Can optionally use * just the test results, if that's all that is available, or both train and test (will also compute the train test diff) * * USAGE: see workingExampleCodeRunnableOnTSCServerMachine() for fleshed out example, in short though: * Construct object, set any non-default bool options, set any non-default statistics to use, set datasets to compare on, and (rule of thumb) LASTLY add * estimators/results located in memory or on disk and call runComparison(). * * Least-code one-off use case that's good enough for most problems is: * new MultipleEstimatorEvaluation("write/path/", "experimentName", numFolds). * setDatasets(development.experiments.DataSets.UCIContinuousFileNames). * readInEstimators(new String[] {"NN", "C4.5"}, baseReadingPath). * runComparison(); * * Will call findAllStatsOnce on each of the EstimatorResults (i.e. will do nothing if findAllStats has already been called elsewhere before), * and there's a bool (default true) to set whether to null the instance prediction info after stats are found to save memory. * If some custom analysis method not defined natively in estimatorresults that uses the individual prediction info, * (defined using addEvaluationStatistic(String statName, Function<EstimatorResults, Double> estimatorResultsManipulatorFunction)) will need to keep the info, but that can get problematic depending on how many estimator/datasets/folds there are For some reason, the first excel workbook writer library i found/used makes xls files (instead of xlsx) and doesn't support recent excel default fonts. Just open it and saveas if you want to switch it over. There's a way to globally change font in a workbook if you want to change it back Future work (here and in EstimatorResultsAnalysis.performFullEvaluation(...)) when wanted/needed could be to handle incomplete results (e.g random folds missing), more matlab figures over time, and more refactoring of the obviously bad parts of the code * * @author James Large (james.large@uea.ac.uk) */ public class MultipleEstimatorEvaluation implements DebugPrinting { private String writePath; private String experimentName; private List<String> datasets; private List<String> estimatorsInStorage; private List<String> estimatorsInOutput; private List<String> readPaths; private Map<String, Map<String, String[]>> datasetGroupings; // Map<GroupingMethodTitle(e.g "ByNumAtts"), Map<GroupTitle(e.g "<100"), dsetsInGroup(must be subset of datasets)>> private EstimatorResultsCollection resultsCollection; private int numFolds; private List<PerformanceMetric> metrics; private EstimatorResultsCollection.ResultsType resultsType = EstimatorResultsCollection.ResultsType.CLASSIFICATION; /** * if true, the relevant .m files must be located in the netbeans project directory */ private boolean buildMatlabDiagrams; /** * if true, will not attempt to load trainFold results, and will not produce stats for train or traintestdiffs results */ private boolean testResultsOnly; /** * if true, will basically just transpose the results, and swap the dataset names for the estimatornames. * ranks, sig tests, etc, will then compare the 'performance of datasets'. Intended use when comparing * e.g. different preprocessing techniques which are saved as arffs and then a collection of estimators * are evaluated on each. */ private boolean evaluateDatasetsOverEstimators; /** * if true, will perform xmeans clustering on the estimatorXdataset results, to find data-driven datasetgroupings, as well * as any extra dataset groupings you've defined. * * 1) for each dataset, each estimator's [stat] is replaced by its difference to the util_mean for that dataset * e.g if scores of 3 estimators on a dataset are { 0.8, 0.7, 0.6 }, the new vals will be { 0.1, 0, -0.1 } * * 2) weka instances are formed from this data, with estimators as atts, datasets as insts * * 3) xmeans clustering performed, as a (from a human input pov) quick way of determining number of clusters + those clusters * * 4) perform the normal grouping analysis based on those clusters */ private boolean performPostHocDsetResultsClustering; /** * if true, will close the matlab connected once analysis complete (if it was opened) * if false, will allow for multiple stats runs in a single execution, but the * thread will not end while the matlab instance is open, so the connection must * be closed or execution terminated manually */ private boolean closeMatlabConnectionWhenFinished = true; /** * If false, all combinations of all splits/estimators/datasets/folds must be present, * else the evaluation will not proceed. * * If true, missing results shall be ignored, and only the "minimal complete subset" * shall be evaluated. The minimal complete subsets comprised of the datasets that ALL estimators * have completed ALL folds on. * * As such, the evaluation shall only be performed on datasets that all the estimators * have completed. If this is 0, nothing will happen, of course. */ private boolean ignoreMissingResults = false; /** * @param experimentName forms the analysis directory name, and the prefix to most files */ public MultipleEstimatorEvaluation(String writePath, String experimentName, int numFolds) { this.writePath = writePath; this.experimentName = experimentName; this.numFolds = numFolds; this.buildMatlabDiagrams = false; this.testResultsOnly = true; this.performPostHocDsetResultsClustering = false; this.datasets = new ArrayList<>(); this.datasetGroupings = new HashMap<>(); this.resultsCollection = new EstimatorResultsCollection(); this.estimatorsInOutput = new ArrayList<>(); this.estimatorsInStorage = new ArrayList<>(); this.readPaths = new ArrayList<>(); this.metrics = PerformanceMetric.getDefaultStatistics(); } /** * if true, will basically just transpose the results, and swap the dataset names for the estimatornames. * ranks, sig tests, etc, will then compare the 'performance of datasets'. Intended use when comparing * e.g. different preprocessing techniques which are saved as arffs and then a collection of estimators * are evaluated on each. */ // public void setEvaluateDatasetsOverEstimators(boolean evaluateDatasetsOverEstimators) { // this.evaluateDatasetsOverEstimators = evaluateDatasetsOverEstimators; // } /** * if true, will not attempt to load trainFold results, and will not produce stats for train or traintestdiffs results */ public MultipleEstimatorEvaluation setTestResultsOnly(boolean b) { testResultsOnly = b; return this; } /** * if true, the relevant .m files must be located in the netbeans project directory */ public MultipleEstimatorEvaluation setBuildMatlabDiagrams(boolean b) { buildMatlabDiagrams = b; closeMatlabConnectionWhenFinished = true; return this; } /** * if true, the relevant .m files must be located in the netbeans project directory */ public MultipleEstimatorEvaluation setBuildMatlabDiagrams(boolean b, boolean closeMatlabConnectionWhenFinished) { buildMatlabDiagrams = b; this.closeMatlabConnectionWhenFinished = closeMatlabConnectionWhenFinished; return this; } /** * if true, will null the individual prediction info of each EstimatorResults object after stats are found */ public MultipleEstimatorEvaluation setCleanResults(boolean cleanResults) { resultsCollection.setCleanResults(cleanResults); return this; } public MultipleEstimatorEvaluation setIgnoreMissingDistributions(boolean ignoreMissingDistributions) { resultsCollection.setIgnoreMissingDistributions(ignoreMissingDistributions); return this; } /** * if true, will perform xmeans clustering on the estimatorXdataset results, to find data-driven datasetgroupings, as well * as any extra dataset groupings you've defined. * * 1) for each dataset, each estimator's [stat] is replaced by its difference to the util_mean for that dataset e.g if scores of 3 estimators on a dataset are { 0.8, 0.7, 0.6 }, the new vals will be { 0.1, 0, -0.1 } 2) weka instances are formed from this data, with estimators as atts, datasets as insts 3) xmeans clustering performed, as a (from a human input pov) quick way of determining number of clusters + those clusters 4) perform the normal grouping analysis based on those clusters */ public MultipleEstimatorEvaluation setPerformPostHocDsetResultsClustering(boolean b) { performPostHocDsetResultsClustering = b; return this; } /** * @param datasetListFilename the path and name of a file containing a list of datasets, one per line * @throws FileNotFoundException */ public MultipleEstimatorEvaluation setDatasets(String datasetListFilename) throws FileNotFoundException { Scanner filein = new Scanner(new File(datasetListFilename)); List<String> dsets = new ArrayList<>(); while (filein.hasNextLine()) dsets.add(filein.nextLine()); return setDatasets(dsets); } public MultipleEstimatorEvaluation setDatasets(List<String> datasets) { this.datasets = datasets; return this; } public MultipleEstimatorEvaluation setDatasets(String[] datasets) { this.datasets = Arrays.asList(datasets); return this; } /** * Pass a directory containing a number of text files. The directory name (not including path) * becomes the groupingMethodName (e.g ByNumAtts). Each text file contains a newline-separated * list of datasets for an individual group. The textfile's name (excluding .txt file suffix) * is the name of that group. */ public MultipleEstimatorEvaluation setDatasetGroupingFromDirectory(String groupingDirectory) throws FileNotFoundException { setDatasetGroupingFromDirectory(groupingDirectory, (new File(groupingDirectory)).getName()); return this; } /** * Use this if you want to define a different grouping method name to the directory name * for clean printing purposes/clarity. E.g directory name might be 'UCRDsetGroupingByNumAtts_2groups', but the * name you define to be printed on the analysis could just be 'ByNumAtts' * * Pass a directory containing a number of text files. Each text file contains a newline-separated * list of datasets for an individual group. The textfile's name (excluding .txt file suffix) * is the name of that group. */ public MultipleEstimatorEvaluation setDatasetGroupingFromDirectory(String groupingDirectory, String customGroupingMethodName) throws FileNotFoundException { clearDatasetGroupings(); addDatasetGroupingFromDirectory(groupingDirectory, customGroupingMethodName); return this; } /** * Sets the type of results to load in, i.e. classification or clustering */ public void setResultsType(EstimatorResultsCollection.ResultsType resultsType) { this.resultsType = resultsType; } /** * Pass a directory containing a number of DIRECTORIES that define groupings. Each subdirectory contains * a number of text files. The names of these subdirectories define the grouping method names. * Each text file within contains a newline-separated * list of datasets for an individual group. The textfile's name (excluding .txt file suffix) * is the name of that group. */ public MultipleEstimatorEvaluation addAllDatasetGroupingsInDirectory(String groupingSuperDirectory) throws FileNotFoundException { for (String groupingDirectory : (new File(groupingSuperDirectory)).list(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return dir.isDirectory(); } })) { addDatasetGroupingFromDirectory(groupingSuperDirectory + groupingDirectory); } return this; } /** * Pass a directory containing a number of text files. Each text file contains a newline-separated * list of datasets for an individual group. The textfile's name (excluding .txt file suffix) * is the name of that group. */ public MultipleEstimatorEvaluation addDatasetGroupingFromDirectory(String groupingDirectory) throws FileNotFoundException { addDatasetGroupingFromDirectory(groupingDirectory, (new File(groupingDirectory)).getName()); return this; } /** * Use this if you want to define a different grouping method name to the directory name * for clean printing purposes/clarity. E.g directory name might be 'UCRDsetGroupingByNumAtts_2groups', but the * name you define to be printed on the analysis could just be 'ByNumAtts' * * Pass a directory containing a number of text files. Each text file contains a newline-separated * list of datasets for an individual group. The textfile's name (excluding .txt file suffix) * is the name of that group. */ public MultipleEstimatorEvaluation addDatasetGroupingFromDirectory(String groupingDirectory, String customGroupingMethodName) throws FileNotFoundException { File[] groups = (new File(groupingDirectory)).listFiles(); String[] groupNames = new String[groups.length]; String[][] dsets = new String[groups.length][]; for (int i = 0; i < groups.length; i++) { groupNames[i] = groups[i].getName().replace(".txt", "").replace(".csv", ""); Scanner filein = new Scanner(groups[i]); List<String> groupDsets = new ArrayList<>(); while (filein.hasNextLine()) groupDsets.add(filein.nextLine()); dsets[i] = groupDsets.toArray(new String [] { }); } addDatasetGrouping(customGroupingMethodName, groupNames, dsets); return this; } /** * The purely array based method for those inclined * * @param groupingMethodName e.g "ByNumAtts" * @param groupNames e.g { "<100", ">100" }, where group name indices line up with outer array of 'groups' * @param groups [groupNames.length][variablelength number of datasets] */ public MultipleEstimatorEvaluation setDatasetGrouping(String groupingMethodName, String[] groupNames, String[][] groups) { clearDatasetGroupings(); addDatasetGrouping(groupingMethodName, groupNames, groups); return this; } /** * The purely array based method for those inclined * * @param groupingMethodName e.g "ByNumAtts" * @param groupNames e.g { "<100", ">100" }, where group name indices line up with outer array of 'groups' * @param groups [groupNames.length][variablelength number of datasets] */ public MultipleEstimatorEvaluation addDatasetGrouping(String groupingMethodName, String[] groupNames, String[][] groups) { Map<String, String[]> groupsMap = new HashMap<>(); for (int i = 0; i < groupNames.length; i++) groupsMap.put(groupNames[i], groups[i]); datasetGroupings.put(groupingMethodName, groupsMap); return this; } public MultipleEstimatorEvaluation clearDatasetGroupings() { this.datasetGroupings.clear(); return this; } /** * 4 stats: acc, balanced acc, auroc, nll */ public MultipleEstimatorEvaluation setUseDefaultEvaluationStatistics() { metrics = PerformanceMetric.getDefaultStatistics(); return this; } public MultipleEstimatorEvaluation setUseAccuracyOnly() { metrics = PerformanceMetric.getAccuracyStatistic(); return this; } public MultipleEstimatorEvaluation setUseAllStatistics() { metrics = PerformanceMetric.getAllPredictionStatistics(); return this; } public MultipleEstimatorEvaluation setUseEarlyClassificationStatistics() { metrics = PerformanceMetric.getEarlyClassificationStatistics(); return this; } public MultipleEstimatorEvaluation setUseClusteringStatistics() { metrics = PerformanceMetric.getClusteringStatistics(); return this; } public MultipleEstimatorEvaluation setUseRegressionStatistics() { metrics = PerformanceMetric.getRegressionStatistics(); return this; } /** * Read in the results from file estimator by estimator, can be used if results are in different locations * (e.g beast vs local) * * @param estimatorName Should exactly match the directory name of the results to use * @param baseReadPath Should be a directory containing a subdirectory named [estimatorName] * @return */ public MultipleEstimatorEvaluation readInEstimator(String estimatorName, String baseReadPath) throws Exception { return readInEstimator(estimatorName, estimatorName, baseReadPath); } /** * Read in the results from file estimator by estimator, can be used if results are in different locations * (e.g beast vs local) * * @param estimatorNameInStorage Should exactly match the directory name of the results to use * @param estimatorNameInOutput Can provide a different 'human' friendly or context-aware name if appropriate, to be printed in the output files/on images * @param baseReadPath Should be a directory containing a subdirectory named [estimatorName] * @return */ public MultipleEstimatorEvaluation readInEstimator(String estimatorNameInStorage, String estimatorNameInOutput, String baseReadPath) throws Exception { estimatorsInStorage.add(estimatorNameInStorage); estimatorsInOutput.add(estimatorNameInOutput); readPaths.add(baseReadPath); return this; } /** * Read in the results from file from a common base path * * @param estimatorNames Should exactly match the directory name of the results to use * @param baseReadPath Should be a directory containing subdirectories with the names in estimatorNames * @return */ public MultipleEstimatorEvaluation readInEstimators(String[] estimatorNames, String baseReadPath) throws Exception { return readInEstimators(estimatorNames, estimatorNames, baseReadPath); } /** * Read in the results from file from a common base path * * @param estimatorNamesInOutput Should exactly match the directory name of the results to use * @param baseReadPath Should be a directory containing subdirectories with the names in estimatorNames * @return */ public MultipleEstimatorEvaluation readInEstimators(String[] estimatorNamesInStorage, String[] estimatorNamesInOutput, String baseReadPath) throws Exception { if (estimatorNamesInOutput.length != estimatorNamesInStorage.length) throw new Exception("Sizes of the estimator names to read in and use in output differ: estimatorNamesInStorage.length=" + estimatorNamesInStorage.length + ", estimatorNamesInOutput.length="+estimatorNamesInOutput.length); for (int i = 0; i < estimatorNamesInStorage.length; i++) readInEstimator(estimatorNamesInStorage[i], estimatorNamesInOutput[i], baseReadPath); return this; } /** * If false, all combinations of all splits/estimators/datasets/folds must be present, * else the evaluation will not proceed. * * If true, missing results shall be ignored, and only the "minimal complete subset" * shall be evaluated. The minimal complete subsets comprised of the datasets that ALL estimators * have completed ALL folds on. * * As such, the evaluation shall only be performed on datasets that all the estimators * have completed. If this is 0, nothing will happen, of course. */ public MultipleEstimatorEvaluation setIgnoreMissingResults(boolean ignoreMissingResults) { this.ignoreMissingResults = ignoreMissingResults; resultsCollection.setAllowMissingResults(ignoreMissingResults); return this; } private void transposeEverything() { // //need to put the classifier names into the datasets list // //repalce the entries of the classifier results map with entries for each dataset // //to go from this: Map<String/*classifierNames*/, ClassifierResults[/* train/test */][/* dataset */][/* fold */]> classifiersResults; // // and a list of datasetnames // //to this: Map<String/*datasetNames*/, ClassifierResults[/* train/test */][/* classifier */][/* fold */]> classifiersResults; // // and a list of classifiernames // // int numClassifiers = classifiersResults.size(); // int numDatasets = datasets.size(); // // //going to pull everything out into parallel arrays and work that way... // //innefficient, but far more likely to actually work // String[] origClassifierNames = new String[numClassifiers]; // ClassifierResults[][][][] origClassifierResults = new ClassifierResults[numClassifiers][][][]; // // int i = 0; // for (Map.Entry<String, ClassifierResults[][][]> origClassiiferResultsEntry : classifiersResults.entrySet()) { // origClassifierNames[i] = origClassiiferResultsEntry.getKey(); // origClassifierResults[i] = origClassiiferResultsEntry.getValue(); // i++; // } // // ClassifierResults[][][][] newDataseResultsArr = new ClassifierResults[numDatasets][2][numClassifiers][numFolds]; // // // //do the transpose // for (int dset = 0; dset < numDatasets; dset++) { // // int splitStart = 0; // if (testResultsOnly) { // newDataseResultsArr[dset][0] = null; //no train results // splitStart = 1; //dont try and copythem over // } // // for (int split = splitStart; split < 2; split++) { // for (int classifier = 0; classifier < numClassifiers; classifier++) { // //leaving commented for reference, but can skip this loop, and copy across fold array refs instead of individual fold refs // //for (int fold = 0; fold < numFolds; fold++) // // newDataseResultsArr[dset][split][classifier][fold] = origClassifierResults[classifier][split][dset][fold]; // //// System.out.println("newDataseResultsArr[dset]" + newDataseResultsArr[dset].toString().substring(0, 30)); //// System.out.println("newDataseResultsArr[dset][split]" + newDataseResultsArr[dset][split].toString().substring(0, 30)); //// System.out.println("newDataseResultsArr[dset][split][classifier]" + newDataseResultsArr[dset][split][classifier].toString().substring(0, 30)); //// System.out.println("origClassifierResults[classifier]" + origClassifierResults[classifier].toString().substring(0, 30)); //// System.out.println("origClassifierResults[classifier][split]" + origClassifierResults[classifier][split].toString().substring(0, 30)); //// System.out.println("origClassifierResults[classifier][split][dset]" + origClassifierResults[classifier][split][dset].toString().substring(0, 30)); // // newDataseResultsArr[dset][split][classifier] = origClassifierResults[classifier][split][dset]; // } // } // } // // //and put back into a map // Map<String, ClassifierResults[][][]> newDsetResultsMap = new HashMap<>(); // for (int dset = 0; dset < numDatasets; dset++) // newDsetResultsMap.put(datasets.get(dset), newDataseResultsArr[dset]); // // this.classifiersResults = newDsetResultsMap; // this.datasets = Arrays.asList(origClassifierNames); } public void runComparison() throws Exception { resultsCollection.setEstimators(estimatorsInStorage.toArray(new String[] { }), estimatorsInOutput.toArray(new String[] { }), readPaths.toArray(new String[] { })); resultsCollection.setDatasets(datasets.toArray(new String[] { })); resultsCollection.setFolds(numFolds); if (testResultsOnly) resultsCollection.setSplit_Test(); else resultsCollection.setSplit_TrainTest(); resultsCollection.setResultsType(resultsType); resultsCollection.load(); if (ignoreMissingResults) resultsCollection = resultsCollection.reduceToMinimalCompleteResults_datasets(); if (evaluateDatasetsOverEstimators) transposeEverything(); EstimatorResultsAnalysis.buildMatlabDiagrams = buildMatlabDiagrams; EstimatorResultsAnalysis.testResultsOnly = testResultsOnly; EstimatorResultsAnalysis.resultsType = resultsType; //EstimatorResultsAnalysis will find this flag internally as queue to do clustering if (performPostHocDsetResultsClustering) datasetGroupings.put(EstimatorResultsAnalysis.clusterGroupingIdentifier, null); printlnDebug("Writing started"); EstimatorResultsAnalysis.performFullEvaluation(writePath, experimentName, metrics, resultsCollection, datasetGroupings); printlnDebug("Writing finished"); if (buildMatlabDiagrams && closeMatlabConnectionWhenFinished) MatlabController.getInstance().discconnectMatlab(); } public static void main(String[] args) throws Exception { // String basePath = "C:/JamesLPHD/HESCA/UCI/UCIResults/"; //// String basePath = "Z:/Results/FinalisedUCIContinuous/"; // // MultipleEstimatorEvaluation mcc = // new MultipleEstimatorEvaluation("C:/JamesLPHD/analysisTest/", "testrunPWS10", 30); // // mcc.setTestResultsOnly(true); //as is default // mcc.setBuildMatlabDiagrams(true); //as is default // mcc.setCleanResults(true); //as is default // mcc.setDebugPrinting(true); // // mcc.setUseDefaultEvaluationStatistics(); //as is default, acc,balacc,auroc,nll //// mcc.setUseAccuracyOnly(); //// mcc.addEvaluationStatistic("F1", (ClassifierResults cr) -> {return cr.f1;}); //add on the f1 stat too //// mcc.setUseAllStatistics(); // // mcc.setDatasets(development.experiments.DataSets.UCIContinuousFileNames); // // //general rule of thumb: set/add/read the classifiers as the last thing before running // mcc.readInClassifiers(new String[] {"NN", "C4.5", "RotF", "RandF"}, basePath); //// mcc.readInClassifier("RandF", basePath); // // // mcc.runComparison(); // new MultipleEstimatorEvaluation("Z:/Results/FinalisedUCIContinuousAnalysis/", "testy_mctestface", 30). // setTestResultsOnly(false). // setDatasets(development.experiments.DataSets.UCIContinuousFileNames). // readInClassifiers(new String[] {"1NN", "C4.5"}, "Z:/Results/FinalisedUCIContinuous/"). // runComparison(); // new MultipleEstimatorEvaluation("C:\\JamesLPHD\\DatasetGroups\\anatesting\\", "test29", 30). //// setBuildMatlabDiagrams(true). //// setUseAllStatistics(). //// setDatasets(Arrays.copyOfRange(development.experiments.DataSets.UCIContinuousFileNames, 0, 10)). //using only 10 datasets just to make it faster... //// setDatasets("C:/Temp/dsets.txt"). // setDatasets("C:/Temp/dsets.txt"). // setDatasetGroupingFromDirectory("C:\\JamesLPHD\\DatasetGroups\\TestGroups"). // setPerformPostHocDsetResultsClustering(true). // readInClassifiers(new String[] {"1NN", "C4.5", "MLP", "RotF", "RandF"}, "C:\\JamesLPHD\\HESCA\\UCR\\UCRResults"). // runComparison(); workingExampleCodeRunnableOnTSCServerMachine(); } public static void workingExampleCodeRunnableOnTSCServerMachine() throws FileNotFoundException, Exception { //Running from my PC, this code takes 34 seconds to run, despite looking at only 10 folds of 10 datasets. //The majority of this time is eaten up by reading the results from the server. If you have results on your local PC, this runs in a second. //to rerun this from a clean slate to check validity, delete any existing 'Example1' folder in here: String folderToWriteAnalysisTo = "Z:/Backups/Results_7_2_19/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/"; String nameOfAnalysisWhichWillBecomeFolderName = "ExampleTranspose"; int numberOfFoldsAKAResamplesOfEachDataset = 10; MultipleEstimatorEvaluation mee = new MultipleEstimatorEvaluation(folderToWriteAnalysisTo, nameOfAnalysisWhichWillBecomeFolderName, numberOfFoldsAKAResamplesOfEachDataset); //10 folds only to make faster... String aFileWithListOfDsetsToUse = "Z:/Backups/Results_7_2_19/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/dsets.txt"; mee.setDatasets(aFileWithListOfDsetsToUse); String aDirectoryContainingFilesThatDefineDatasetGroupings = "Z:/Backups/Results_7_2_19/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/dsetGroupings/evenAndOddDsets/"; String andAnother = "Z:/Backups/Results_7_2_19/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/dsetGroupings/topAndBotHalves/"; mee.addDatasetGroupingFromDirectory(aDirectoryContainingFilesThatDefineDatasetGroupings); mee.addDatasetGroupingFromDirectory(andAnother); mee.setPerformPostHocDsetResultsClustering(true); //will create 3rd data-driven grouping automatically String[] classifiers = new String[] {"1NN", "C4.5", "NB"}; String directoryWithResultsClassifierByClassifier = "Z:/Backups/Results_7_2_19/FinalisedUCIContinuous/"; mee.readInEstimators(classifiers, directoryWithResultsClassifierByClassifier); // mee.setEvaluateDatasetsOverEstimators(true); //cannot use with the dataset groupings, in this example. could define classifier groupings though ! mee.runComparison(); //minimal version of above: // MultipleEstimatorEvaluation mee = new MultipleEstimatorEvaluation("Z:/Results/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/", "Example1", 10); //10 folds only to make faster... // mee.setDatasets("Z:/Results/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/dsets.txt"); // mee.addDatasetGroupingFromDirectory("Z:/Results/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/dsetGroups/randomGrouping1/"); // mee.addDatasetGroupingFromDirectory("Z:/Results/FinalisedUCIContinuousAnalysis/WORKINGEXAMPLE/dsetGroups/randomGrouping2/"); // mee.setPerformPostHocDsetResultsClustering(true); //will create 3rd data-driven grouping automatically // mee.readInEstimators(new String[] {"1NN", "C4.5", "MLP", "RotF", "RandF"}, "Z:/Results/FinalisedUCIContinuous/"); // mee.runComparison(); } }
33,257
49.162896
214
java
tsml-java
tsml-java-master/src/main/java/evaluation/MultipleEstimatorsPairwiseTest.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation; import fileIO.InFile; import fileIO.OutFile; import java.io.File; import java.io.OutputStream; import java.io.PrintStream; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Scanner; import statistics.tests.OneSampleTests; import statistics.tests.OneSampleTests; import statistics.tests.TwoSampleTests; /** * Reads in a file of accuracies for k estimators and generates a kxk matrix * of p-values. INPUT FORMAT: ,Estimator1,Estimator2,Estimator3, ...,Estimatork Problem1 , 0.5,.... Problem2 , 0.5,.... . . ProblemN, 0.5,.... Output: Pairwise matrix of difference and a version of results. * * @author ajb */ public class MultipleEstimatorsPairwiseTest { public static boolean beQuiet = false; static double[][] accs; //ROW indicates estimator, for ease of processing static double[][] pValsTTest; //ROW indicates estimator, for ease of processing static double[][] pValsSignTest; //ROW indicates estimator, for ease of processing static double[][] pValsSignRankTest; //ROW indicates estimator, for ease of processing static double[][] bonferonni_pVals; //ROW indicates estimator, for ease of processing static boolean[][] noDifference; //ROW indicates estimator, for ease of processing static int nosEstimators; static int nosProblems; static String[] names; /** Assumes estimator names in the first line and problem names in the first column */ public static void loadData(String file, PrintStream out){ InFile data=new InFile(file); nosProblems=data.countLines()-1; data=new InFile(file); String[] temp=data.readLine().split(","); nosEstimators=temp.length-1; names=new String[nosEstimators]; for(int i=0;i<nosEstimators;i++) names[i]=temp[i+1]; accs=new double[nosEstimators][nosProblems]; for(int j=0;j<nosProblems;j++){ String[] line = data.readLine().split(","); if(!beQuiet) System.out.print("Problem ="+line[0]+","); for(int i=0;i<nosEstimators;i++){ accs[i][j]=Double.parseDouble(line[i+1]); if (!beQuiet) out.print(accs[i][j]+","); } if (!beQuiet) out.print("\n"); } } public static void loadData(String file){ loadData(file, System.out); } public static void findPVals(){ pValsTTest=new double[nosEstimators][nosEstimators]; pValsSignTest=new double[nosEstimators][nosEstimators]; pValsSignRankTest=new double[nosEstimators][nosEstimators]; OneSampleTests test=new OneSampleTests(); for(int i=0;i<nosEstimators;i++) { for(int j=i+1;j<nosEstimators;j++){ //Find differences double[] diff=new double[accs[i].length]; for(int k=0;k<accs[i].length;k++) diff[k]=accs[i][k]-accs[j][k]; String str=test.performTests(diff); if(!beQuiet) System.out.println("TEST Estimator "+names[i]+" VS "+names[j]+ " returns string "+str); String[] tmp=str.split(","); pValsTTest[i][j]=Double.parseDouble(tmp[2]); pValsSignTest[i][j]=Double.parseDouble(tmp[5]); pValsSignRankTest[i][j]=Double.parseDouble(tmp[8]); } } } public static void findMeanDifferences(String file){ double[][] meanDiff=new double[nosEstimators][nosEstimators]; OutFile outf=new OutFile(file); for(int i=0;i<nosEstimators;i++) { for(int j=i+1;j<nosEstimators;j++){ for(int k=0;k<accs[i].length;k++) meanDiff[i][j]+=accs[i][k]-accs[j][k]; meanDiff[i][j]/=accs[i].length; meanDiff[j][i]=-meanDiff[i][j]; // meanDiff[i][j]*=-1; } } for(int i=0;i<nosEstimators;i++) { for(int j=0;j<nosEstimators;j++) outf.writeString(meanDiff[i][j]+","); outf.writeString("\n"); } } /** * * @param alpha * @param printPVals */ public static void findDifferences(double alpha,boolean printPVals){ noDifference=new boolean[nosEstimators][nosEstimators]; for(int i=0;i<nosEstimators;i++) { noDifference[i][i]=true; /* for(int j=nosEstimators-1;j>i;j--) { int numComparisons = nosEstimators - i; noDifference[i][j] = true; noDifference[j][i] = true; double criticalValue = alpha / (numComparisons - (j - nosEstimators - 1)); // System.out.println(" Critical value = "+criticalValue); if (pValsSignRankTest[i][j] < criticalValue) { noDifference[i][j] = false; } } */ for(int j=i+1;j<nosEstimators;j++){ noDifference[i][j]=true; noDifference[j][i]=true; if(pValsSignRankTest[i][j]<alpha){ // if(pValsTTest[i][j]<alpha && pValsSignTest[i][j]< alpha && pValsSignRankTest[i][j]<alpha) noDifference[i][j]=false; noDifference[j][i]=false; } } } DecimalFormat df = new DecimalFormat("##.#####"); } /** * This has a built in Holm correction * @param input * @param output */ public static void runTests(String input, String output){ loadData(input); // loadData("C:\\Research\\Papers\\2016\\JMLR HIVE-COTE Jason\\RiseTestWithNames.csv"); findPVals(); double alpha=0.1; //printPVals=false; //Bonferonni adjusted // alpha/=nosEstimators*(nosEstimators-1)/2; //Control adjusted alpha/=nosEstimators-1; findDifferences(alpha,true); //Sort estimators by rank: assume already done OutFile cliques=new OutFile(output); for(int i=0;i<nosEstimators;i++){ for(int j=0;j<nosEstimators;j++) cliques.writeString(noDifference[i][j]+","); cliques.writeString("\n"); } } public static StringBuilder runTests(double[][] d,String[] n){ nosProblems=d.length; nosEstimators=d[0].length; names=n; accs=d; findPVals(); double alpha=0.05; //printPVals=false; //Bonferonni adjusted // alpha/=nosEstimators*(nosEstimators-1)/2; //Control adjusted alpha/=nosEstimators-1; findDifferences(alpha,true); StringBuilder results=new StringBuilder(); results.append("T TEST"); for(int i=0;i<nosEstimators;i++) results.append(",").append(names[i]); results.append("\n"); for(int i=0;i<nosEstimators;i++){ results.append(names[i]); for(int j=0;j<nosEstimators;j++) results.append(",").append(pValsTTest[i][j]); results.append("\n"); } results.append("\n"); results.append("SIGN TEST"); for(int i=0;i<nosEstimators;i++) results.append(",").append(names[i]); results.append("\n"); for(int i=0;i<nosEstimators;i++){ results.append(names[i]); for(int j=0;j<nosEstimators;j++) results.append(",").append(pValsSignTest[i][j]); results.append("\n"); } results.append("\n"); results.append("SIGN RANK TEST"); for(int i=0;i<nosEstimators;i++) results.append(",").append(names[i]); results.append("\n"); for(int i=0;i<nosEstimators;i++){ results.append(names[i]); for(int j=0;j<nosEstimators;j++) results.append(",").append(pValsSignRankTest[i][j]); results.append("\n"); } results.append("\n"); results.append("NOSIGDIFFERENCE"); for(int i=0;i<nosEstimators;i++) results.append(",").append(names[i]); results.append("\n"); for(int i=0;i<nosEstimators;i++){ results.append(names[i]); for(int j=0;j<nosEstimators;j++) results.append(",").append(noDifference[i][j]); results.append("\n"); } return results; } public static StringBuilder runSignRankTest(double[][] d,String[] n){ nosProblems=d.length; nosEstimators=d[0].length; names=n; accs=d; findPVals(); double alpha=0.05; //printPVals=false; //Bonferonni adjusted // alpha/=nosEstimators*(nosEstimators-1)/2; //Control adjusted alpha/=nosEstimators-1; findDifferences(alpha,true); StringBuilder results=new StringBuilder(); results.append("SIGN RANK TEST \n "); for(int i=0;i<nosEstimators;i++) results.append(",").append(names[i]); results.append("\n"); for(int i=0;i<nosEstimators;i++){ results.append(names[i]); for(int j=0;j<nosEstimators;j++) results.append(",").append(pValsSignRankTest[i][j]); results.append("\n"); } return results; } /** * * @param input * @return */ public static StringBuilder runTests(String input){ loadData(input); findPVals(); double alpha=0.05; //printPVals=false; //Bonferonni adjusted // alpha/=nosEstimators*(nosEstimators-1)/2; //Control adjusted // alpha/=nosEstimators-1; findDifferences(alpha,true); StringBuilder cliques=new StringBuilder(); cliques.append("T TEST"); for(int i=0;i<nosEstimators;i++) cliques.append(",").append(names[i]); cliques.append("\n"); for(int i=0;i<nosEstimators;i++){ cliques.append(names[i]); for(int j=0;j<nosEstimators;j++) cliques.append(",").append(pValsTTest[i][j]); cliques.append("\n"); } cliques.append("\n"); cliques.append("SIGN TEST"); for(int i=0;i<nosEstimators;i++) cliques.append(",").append(names[i]); cliques.append("\n"); for(int i=0;i<nosEstimators;i++){ cliques.append(names[i]); for(int j=0;j<nosEstimators;j++) cliques.append(",").append(pValsSignTest[i][j]); cliques.append("\n"); } cliques.append("\n"); cliques.append("SIGN RANK TEST"); for(int i=0;i<nosEstimators;i++) cliques.append(",").append(names[i]); cliques.append("\n"); for(int i=0;i<nosEstimators;i++){ cliques.append(names[i]); for(int j=0;j<nosEstimators;j++) cliques.append(",").append(pValsSignRankTest[i][j]); cliques.append("\n"); } cliques.append("\n"); cliques.append("NOSIGDIFFERENCE"); for(int i=0;i<nosEstimators;i++) cliques.append(",").append(names[i]); cliques.append("\n"); for(int i=0;i<nosEstimators;i++){ cliques.append(names[i]); for(int j=0;j<nosEstimators;j++) cliques.append(",").append(noDifference[i][j]); cliques.append("\n"); } return cliques; } public static String printCliques() { StringBuilder sb = new StringBuilder(); sb.append("cliques = ["); boolean[][] cliques = findCliques(noDifference); for (int i = 0; i < cliques.length; i++) { for (int j = 0; j < cliques[i].length; j++) sb.append(cliques[i][j] ? "1" : 0).append(" "); sb.append("\n"); } sb.append("]\n"); return sb.toString(); } public static boolean[][] findCliques(boolean[][] same) { //want to find the largest non-contained 'blocks' in the matrix where all values are true, i.e. all estimators //are similar to all over estimators within the block. // //if 1 = true, 0 = false, for input data // 1 1 1 0 0 // 1 1 1 1 0 // 1 1 1 1 0 // 0 1 1 1 1 // 0 0 0 1 1 // //the resulting cliques would be // 1 1 1 0 0 // 0 1 1 1 0 // 0 0 0 1 1 //starting each search on the diagonal (where similar always = true), shall try to grow a square down and to //the right where all elements are true. stop when a false would be included in the square. if the square is //at least size 2 already, this is a clique. List<List<Integer>> cliques = new ArrayList<>(); int prevEndOfClique = 0; for (int i = 0; i < same.length; i++) { List<Integer> clique = new ArrayList<>(Arrays.asList(i)); growClique(same, clique); if (clique.size() > 1) { //potential new clique, check it's not contained within the previous, i.e. a new estimator has been //included at the end of the clique int endOfClique = clique.get(clique.size()-1); if (endOfClique > prevEndOfClique) { cliques.add(clique); prevEndOfClique = endOfClique; } //else is just a subclique of the previous } } boolean[][] finalCliques = new boolean[cliques.size()][same.length]; for (int i = 0; i < cliques.size(); ++i) { for (int j = 0; j < cliques.get(i).size(); ++j) { finalCliques[i][cliques.get(i).get(j)] = true; } } return finalCliques; } private static void growClique(boolean[][] same, List<Integer> clique) { int prevVal = clique.get(clique.size()-1); if (prevVal == same.length-1) return; // reached the end of the estimators, no more room to grow int cliqueStart = clique.get(0); int nextVal = prevVal+1; // suppose size of clique is already 2, we'll need to check that all of the 1's are to true to grow the clique // 0 0 1 // 0 0 1 // 1 1 1 // but actually, it should be mirrored about the diagonal, and the diagonal itself is always true, so really only need to check // 0 0 0 0 0 1 // 0 0 0 or 0 0 1 // 1 1 0 0 0 0 for (int col = cliqueStart; col < nextVal; col++) { if (!same[nextVal][col]) { //found that growing the clique would include a false, quit now return; } } //all checks passed, add on this index and try to grow again clique.add(nextVal); growClique(same, clique); } private static void testNewCliques() { boolean[][] same = { //dtw example from teams { true, true, true, false, false, false, }, { true, true, true, true, false, false, }, { true, true, true, false, true, true, }, { false, true, false, true, true, true, }, { false, false, true, true, true, true, }, { false, false, true, true, true, true, }, }; noDifference = same; System.out.println(printCliques()); } //jamesl note: old clique forming used from roughly late 2017 to august2020 produced contained cliques/edge cases //in certain situations that were fixed by hand. now taking the clique forming approach of strictly all self similar //within blocks. see new findCliques() // public static boolean[][] findCliques(boolean[][] same) { // boolean[][] cliques = new boolean[same.length][]; // for (int i = 0; i < same.length; i++) { // // boolean[] clique = new boolean[same.length]; // boolean inClique = false; // // for (int j = i+1; j < same[i].length; j++) { // //all before i assumed false, wrong side of the diagonal // //i,j assumed true, no sig diff with self // //however only set later, to take advantaged of a binary flag // //for the existance of a clique for this estimator // inClique = inClique || same[i][j]; // clique[j] = same[i][j]; // } // clique[i] = true; //self similarity always true // // if (inClique) //if similarity with at least one other // if (!isSubClique(cliques, clique)) //if similarity not already represented within a previously found clique // addClique(cliques, clique); // } // // int numNull = 0; // for (int i = cliques.length-1; i >= 0; i--) { // if (cliques[i] == null) // numNull++; // else // break; // } // // //shittiest way to avoid arraylists // boolean[][] finalCliques = new boolean[cliques.length-numNull][]; // System.arraycopy(cliques, 0, finalCliques, 0, finalCliques.length); // // return finalCliques; // } // // public static void addClique(boolean[][] cliques, boolean[] newClique) { // for (int i = 0; i < cliques.length; i++) { // if (cliques[i] == null) { // cliques[i] = newClique; // break; // } // } // } // // public static boolean isSubClique(boolean[][] cliques, boolean[] newClique) { // for (int i = 0; i < cliques.length && cliques[i]!=null; i++) { // boolean subOfThisClique = true; // for (int j = 0; j < cliques[i].length; j++) { // if (newClique[j] && !cliques[i][j]) //if j is similar in new, but is not similar in old, found a difference and therefore new is not sub of this one // subOfThisClique = false; // } // if (subOfThisClique) // return true; // } // return false; // } public static void main(String[] args) { testNewCliques(); ////ASSUME INPUT IN RANK ORDER, WITH TOP RANKED ESTIMATOR FIRST, WORST LAST //// String input="C:\\Users\\ajb\\Dropbox\\Results\\SimulationExperiments\\BasicExperiments\\"; //// String output="C:\\Users\\ajb\\Dropbox\\Results\\SimulationExperiments\\BasicExperiments\\"; //// String[] allSimulators={"WholeSeriesElastic","Interval","Shapelet","Dictionary","ARMA","All"}; //// for(String s:allSimulators) // String input="C:\\Users\\ajb\\Dropbox\\For Eamonn\\MPvsBenchmark.csv"; // // input="C:\\Users\\ajb\\Dropbox\\Working docs\\Research\\RotF Paper\\Results Standard RotF\\Shapelet Results\\Results.csv"; //// String input="C:\\Research\\Results\\RepoResults\\HIVE Results"; ////// //// input="C:\\Research\\Papers\\2017\\PKDD BOP to BOSS\\Results\\vsCNN"; //// input="C:\\Users\\ajb\\Dropbox\\Temp\\test"; //// String s= "All"; //// runTests(input+s+"CombinedResults.csv",input+s+"Tests.csv"); //// runTests(input+".csv",input+"Tests.csv"); // System.out.println(runTests(input).toString()); // System.out.println("\n\n" + printCliques()); ////findMeanDifferences(input+" MeanDiffs.csv"); } /* private static void createTable(File file, PrintStream out) throws FileNotFoundException{ Scanner sc = new Scanner(file); while(sc.hasNextLine()){ String[] data = sc.nextLine().split((",")); String dataSet = data[0]; float[] results = new float[data.length-1]; int index=0; out.print(dataSet); for(int i=1; i<data.length; i++){ results[i-1] = Float.parseFloat(data[i]); if(results[i-1] > results[index] ){ index = i-1; } } for(int i=0; i<results.length; i++){ String format = " & %s"; if(index == i) format = " & {\\bf %f}"; out.printf(format, results[i]); } out.printf("\\\\\n"); } } */}
21,282
35.25724
166
java
tsml-java
tsml-java-master/src/main/java/evaluation/PerformanceMetric.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation; import evaluation.storage.ClassifierResults; import evaluation.storage.ClustererResults; import evaluation.storage.EstimatorResults; import evaluation.storage.RegressorResults; import java.util.ArrayList; import java.util.List; import java.util.function.Function; /** * This is essentially a placeholder class, and may be expanded into a full package * with this being an abstract base class etc. * * For now, this is a container class for metrics and meta info about them. It contains - The name of this metric for printouts - A function to get this metric's score from an estimator results object (in future, perhaps calculate them here instead, etc) - A flag for whether this metric wants to be maximise or minimised - A flag to _suggest_ how this metric should be summarised/averaged - for now, mean vs median for e.g accs vs timings. For timings we would want to use median instead of mean to reduce effect of outliers - in future, probably just define a comparator - A descriptor for use in images when comparing estimators with this metric, e.g better/worse/slower - Maybe more in the future * * @author James Large (james.large@uea.ac.uk) */ public class PerformanceMetric { public static final String benchmarkSuffix = "_BM"; public String name; public Function<EstimatorResults, Double> getter; public boolean takeMean; public boolean maximise; public boolean benchmarked; // for timings public String defaultSplit; // mainly for timing split descriptors, e.g. build time = train, pred times = test /** * currently only used for the pairwise scatter diagrams in the pipeline, * this refers to the descriptor for comparing the scores of a metric between * estimators * * If the raw value of a is HIGHER than b, then a is {better,worse,slower,faster,etc.} than b */ public String comparisonDescriptor; public PerformanceMetric(String metricName, Function<EstimatorResults, Double> getScore, boolean takeMean, boolean maximised, String comparisonDescriptor, boolean benchmarked, String defaultSplit) { this.name = metricName; this.getter = getScore; this.takeMean = takeMean; this.maximise = maximised; this.comparisonDescriptor = comparisonDescriptor; this.benchmarked = benchmarked; this.defaultSplit = defaultSplit; } public double getScore(EstimatorResults res) { return getter.apply(res); } public String toString() { return name; } private static final boolean min = false, max = true; private static final boolean median = false, mean = true; private static final boolean isBenchmarked = true, isNotBenchmarked = false; private static final String better = "better", worse = "worse", slower = "slower", faster = "faster"; private static final String train = "train", test = "test", estimate = "estimate"; public static PerformanceMetric acc = new PerformanceMetric("ACC", ClassifierResults.GETTER_Accuracy, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric balacc = new PerformanceMetric("BALACC", ClassifierResults.GETTER_BalancedAccuracy, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric AUROC = new PerformanceMetric("AUROC", ClassifierResults.GETTER_AUROC, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric NLL = new PerformanceMetric("NLL", ClassifierResults.GETTER_NLL, mean, min, worse, isNotBenchmarked, test); public static PerformanceMetric F1 = new PerformanceMetric("F1", ClassifierResults.GETTER_F1, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric MCC = new PerformanceMetric("MCC", ClassifierResults.GETTER_MCC, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric precision = new PerformanceMetric("Prec", ClassifierResults.GETTER_Precision, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric recall = new PerformanceMetric("Recall", ClassifierResults.GETTER_Recall, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric sensitivity = new PerformanceMetric("Sens", ClassifierResults.GETTER_Sensitivity, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric specificity = new PerformanceMetric("Spec", ClassifierResults.GETTER_Specificity, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric buildTime = new PerformanceMetric("TrainTimes", ClassifierResults.GETTER_buildTimeDoubleMillis, median, min, slower, isNotBenchmarked, train); public static PerformanceMetric totalTestTime = new PerformanceMetric("TestTimes", ClassifierResults.GETTER_totalTestTimeDoubleMillis, median, min, slower, isNotBenchmarked, test); public static PerformanceMetric avgTestPredTime = new PerformanceMetric("AvgPredTimes", ClassifierResults.GETTER_avgTestPredTimeDoubleMillis, median, min, slower, isNotBenchmarked, test); public static PerformanceMetric fromScratchEstimateTime = new PerformanceMetric("FromScratchEstTimes", ClassifierResults.GETTER_fromScratchEstimateTimeDoubleMillis, median, min, slower, isNotBenchmarked, estimate); public static PerformanceMetric totalBuildPlusEstimateTime = new PerformanceMetric("BuildAndEstTimes", ClassifierResults.GETTER_totalBuildPlusEstimateTimeDoubleMillis, median, min, slower, isNotBenchmarked, estimate); public static PerformanceMetric extraTimeForEstimate = new PerformanceMetric("ExtraTimeForEst", ClassifierResults.GETTER_additionalTimeForEstimateDoubleMillis, median, min, slower, isNotBenchmarked, estimate); public static PerformanceMetric buildTimeBenchmarked = new PerformanceMetric("TrainTimes"+benchmarkSuffix, ClassifierResults.GETTER_buildTimeDoubleMillisBenchmarked, median, min, slower, isBenchmarked, train); public static PerformanceMetric totalTestTimeBenchmarked = new PerformanceMetric("TestTimes"+benchmarkSuffix, ClassifierResults.GETTER_totalTestTimeDoubleMillisBenchmarked, median, min, slower, isBenchmarked, test); public static PerformanceMetric avgTestPredTimeBenchmarked = new PerformanceMetric("AvgPredTimes"+benchmarkSuffix, ClassifierResults.GETTER_avgTestPredTimeDoubleMillisBenchmarked, median, min, slower, isBenchmarked, test); public static PerformanceMetric fromScratchEstimateTimeBenchmarked = new PerformanceMetric("FromScratchEstTimes"+benchmarkSuffix, ClassifierResults.GETTER_fromScratchEstimateTimeDoubleMillisBenchmarked, median, min, slower, isBenchmarked, estimate); public static PerformanceMetric totalBuildPlusEstimateTimeBenchmarked = new PerformanceMetric("BuildAndEstTimes"+benchmarkSuffix, ClassifierResults.GETTER_totalBuildPlusEstimateTimeDoubleMillisBenchmarked, median, min, slower, isBenchmarked, estimate); public static PerformanceMetric extraTimeForEstimateBenchmarked = new PerformanceMetric("ExtraTimeForEst"+benchmarkSuffix, ClassifierResults.GETTER_additionalTimeForEstimateDoubleMillisBenchmarked, median, min, slower, isBenchmarked, estimate); public static PerformanceMetric benchmarkTime = new PerformanceMetric("BenchmarkTimes", ClassifierResults.GETTER_benchmarkTime, median, min, slower, isNotBenchmarked, train); public static PerformanceMetric memory = new PerformanceMetric("MaxMemory", ClassifierResults.GETTER_MemoryMB, median, min, worse, isNotBenchmarked, train); public static PerformanceMetric earliness = new PerformanceMetric("Earliness", ClassifierResults.GETTER_Earliness, mean, min, worse, isNotBenchmarked, test); public static PerformanceMetric harmonicMean = new PerformanceMetric("HarmonicMean", ClassifierResults.GETTER_HarmonicMean, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric clAcc = new PerformanceMetric("CL-ACC", ClustererResults.GETTER_Accuracy, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric RI = new PerformanceMetric("RI", ClustererResults.GETTER_RandIndex, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric ARI = new PerformanceMetric("ARI", ClustererResults.GETTER_AdjustedRandIndex, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric MI = new PerformanceMetric("MI", ClustererResults.GETTER_MutualInformation, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric NMI = new PerformanceMetric("NMI", ClustererResults.GETTER_NormalizedMutualInformation, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric AMI = new PerformanceMetric("AMI", ClustererResults.GETTER_AdjustedMutualInformation, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric MSE = new PerformanceMetric("MSE", RegressorResults.GETTER_MSE, mean, min, worse, isNotBenchmarked, test); public static PerformanceMetric RMSE = new PerformanceMetric("RMSE", RegressorResults.GETTER_RMSE, mean, min, worse, isNotBenchmarked, test); public static PerformanceMetric MAE = new PerformanceMetric("MAE", RegressorResults.GETTER_MAE, mean, min, worse, isNotBenchmarked, test); public static PerformanceMetric R2 = new PerformanceMetric("R2", RegressorResults.GETTER_R2, mean, max, better, isNotBenchmarked, test); public static PerformanceMetric MAPE = new PerformanceMetric("MAPE", RegressorResults.GETTER_MAPE, mean, min, worse, isNotBenchmarked, test); public static List<PerformanceMetric> getAccuracyStatistic() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(acc); return stats; } public static List<PerformanceMetric> getDefaultStatistics() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(acc); stats.add(balacc); stats.add(AUROC); stats.add(NLL); return stats; } public static List<PerformanceMetric> getAllPredictionStatistics() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(acc); stats.add(balacc); stats.add(AUROC); stats.add(NLL); stats.add(F1); stats.add(MCC); stats.add(precision); stats.add(recall); stats.add(sensitivity); stats.add(specificity); return stats; } public static List<PerformanceMetric> getEarlyClassificationStatistics() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(acc); stats.add(balacc); stats.add(AUROC); stats.add(NLL); stats.add(F1); stats.add(MCC); stats.add(precision); stats.add(recall); stats.add(sensitivity); stats.add(specificity); stats.add(earliness); stats.add(harmonicMean); return stats; } public static List<PerformanceMetric> getClusteringStatistics() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(clAcc); stats.add(RI); stats.add(ARI); stats.add(MI); stats.add(NMI); stats.add(AMI); return stats; } public static List<PerformanceMetric> getRegressionStatistics() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(MSE); stats.add(RMSE); stats.add(MAE); stats.add(R2); stats.add(MAPE); return stats; } public static List<PerformanceMetric> getAllTimingStatistics() { List<PerformanceMetric> stats = getBenchmarkedTimingStatistics(); stats.addAll(getNonBenchmarkedTimingStatistics()); return stats; } public static List<PerformanceMetric> getBenchmarkedTimingStatistics() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(buildTimeBenchmarked); stats.add(totalTestTimeBenchmarked); stats.add(avgTestPredTimeBenchmarked); stats.add(fromScratchEstimateTimeBenchmarked); stats.add(totalBuildPlusEstimateTimeBenchmarked); stats.add(extraTimeForEstimateBenchmarked); return stats; } public static List<PerformanceMetric> getNonBenchmarkedTimingStatistics() { ArrayList<PerformanceMetric> stats = new ArrayList<>(); stats.add(buildTime); stats.add(totalTestTime); stats.add(avgTestPredTime); stats.add(fromScratchEstimateTime); stats.add(totalBuildPlusEstimateTime); stats.add(extraTimeForEstimate); return stats; } }
14,160
58.251046
256
java
tsml-java
tsml-java-master/src/main/java/evaluation/ROCDiagramMaker.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation; import ResultsProcessing.MatlabController; import static evaluation.EstimatorResultsAnalysis.matlabFilePath; import evaluation.storage.ClassifierResults; import java.io.File; import java.util.Arrays; import java.util.HashMap; import java.util.Map; /** * Class to convert results in the format as they are in the results pipeline (ClassifierResults) * into the format for the roccurves.m matlab script for generating roc diagrams * * @author James Large (james.large@uea.ac.uk) */ public class ROCDiagramMaker { public static String rocDiaPath = "dias_ROCCurve/"; public static String[] formatClassifierNames(String[] cnames) { int maxLength = -1; for (String cname : cnames) if (cname.length() > maxLength) maxLength = cname.length(); String[] paddedNames = new String[cnames.length]; for (int i = 0; i < cnames.length; i++) { paddedNames[i] = cnames[i]; while(paddedNames[i].length() < maxLength) paddedNames[i] += " "; } return paddedNames; } public static double[] extractPosClassProbabilities(ClassifierResults results, int positiveClass) { double[][] dists = results.getProbabilityDistributionsAsArray(); double[] posClassProbs = new double[dists.length]; for (int i = 0; i < posClassProbs.length; i++) posClassProbs[i] = dists[i][positiveClass]; return posClassProbs; } public static int findMinorityClass(double[] classVals) { HashMap<Integer,Integer> classes = new HashMap<>(); for (double classVal : classVals) { Integer v = classes.get(classVal); if (v == null) v = 0; classes.put((int)classVal, v++); } int minClass = -1, minCount = Integer.MAX_VALUE; for (Map.Entry<Integer, Integer> entry : classes.entrySet()) { if (entry.getValue() < minCount) { minCount = entry.getValue(); minClass = entry.getKey(); } } return minClass; } public static void matlab_buildROCDiagrams(String outPath, String expName, String dsetName, ClassifierResults[] cresults, String[] cnames) { matlab_buildROCDiagrams(outPath, expName, dsetName, cresults, cnames, findMinorityClass(cresults[0].getTrueClassValsAsArray())); } public static void matlab_buildROCDiagrams(String outPath, String expName, String dsetName, ClassifierResults[] cresults, String[] cnames, int positiveClassIndex) { String targetFolder = outPath + rocDiaPath; (new File(targetFolder)).mkdirs(); String targetFile = targetFolder + "rocDia_" + expName + "_" + dsetName; try { MatlabController proxy = MatlabController.getInstance(); proxy.eval("addpath(genpath('"+matlabFilePath+"'))"); proxy.eval("m_fname = '" + targetFile + "';"); //holy hacks batman // turns [CAWPE, resnet, XGBoost] // into ['CAWPE '; 'resnet '; 'XGBoost'] String[] paddedNames = formatClassifierNames(cnames); // System.out.println("m_cnames = " + Arrays.toString(paddedNames).replace(", ", "'; '").replace("[", "['").replace("]", "']") + ""); proxy.eval("m_cnames = " + Arrays.toString(paddedNames).replace(", ", "'; '").replace("[", "['").replace("]", "']") + ";"); double[] cvals = cresults[0].getTrueClassValsAsArray(); int[] m_cvals = new int[cvals.length]; for (int i = 0; i < cvals.length; i++) m_cvals[i] = (int)cvals[i]; proxy.eval("m_cvals = " + Arrays.toString(m_cvals) + ";"); StringBuilder probsSB = new StringBuilder(); for (int i = 0; i < cresults.length; i++) { double[] probs = extractPosClassProbabilities(cresults[i], positiveClassIndex); probsSB.append(Arrays.toString(probs).replace("[", "").replace("]", ";")); } proxy.eval("m_posClassProbs = [ " + probsSB.toString() + " ];"); proxy.eval("m_posClass = " + positiveClassIndex + ";"); //function [f] = roccurves(filepathandname,classifierNames,classValues,posClassProbs,posClassLabel,visible) proxy.eval("roccurves(m_fname, m_cnames, m_cvals, m_posClassProbs, m_posClass, 'off')"); proxy.eval("clear"); proxy.discconnectMatlab(); } catch (Exception io) { System.out.println("matlab_buildROCDiagrams failed while building " +targetFile+ "\n" + io); } } public static void main(String[] args) throws Exception { String baseReadPath = "C:/JamesLPHD/Alcohol/JOURNALPAPER/Results/"; String dset = "JWRorJWB_BlackBottle"; String[] cnames = { "CAWPE", "resnet", "XGBoost" }; int numFolds = 10; ClassifierResults[][] res = new ClassifierResults[cnames.length][numFolds]; for (int i = 0; i < res.length; i++) { for (int f = 0; f < numFolds; f++) { res[i][f] = new ClassifierResults(baseReadPath + cnames[i] + "/Predictions/" + dset + "/testFold"+f+".csv"); } } ClassifierResults[] concatenatedRes = ClassifierResults.concatenateClassifierResults(res); matlab_buildROCDiagrams("C:/Temp/rocDiaTest/", "testDias", dset, concatenatedRes, cnames); //single fold // String baseReadPath = "C:/JamesLPHD/Alcohol/JOURNALPAPER/Results/"; // String dset = "JWRorJWB_BlackBottle"; // String[] cnames = { "CAWPE", "resnet", "XGBoost" }; // // ClassifierResults[] res = new ClassifierResults[cnames.length]; // for (int i = 0; i < res.length; i++) // res[i] = new ClassifierResults(baseReadPath + cnames[i] + "/Predictions/" + dset + "/testFold0.csv"); // // matlab_buildROCDiagrams("C:/Temp/rocDiaTest/", "testDias", dset, res, cnames); } }
7,024
41.835366
174
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/CrossValidationEvaluator.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import experiments.ClassifierLists; import experiments.data.DatasetLoading; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import weka.classifiers.Classifier; import weka.core.Instances; /** * An evaluator that performs k-fold crossvalidation (default k=10) on the given s * data and evaluates the given classifier(s) on each fold. * * Concatenated predictions across all folds are returned from the main * evaluate method, however predictions split across each fold can also be retrieved * afterwards * * @author James Large (james.large@uea.ac.uk) */ public class CrossValidationEvaluator extends MultiSamplingEvaluator { private String previousRelationName = "EmPtY"; private ArrayList<Instances> folds; private ArrayList<ArrayList<Integer>> foldIndexing; public CrossValidationEvaluator() { super(0,false,false,false,false); this.folds = null; this.foldIndexing = null; this.numFolds = 10; } public CrossValidationEvaluator(int numFolds) { this(); setNumFolds(numFolds); } public CrossValidationEvaluator(int seed, boolean cloneData, boolean setClassMissing, boolean cloneClassifiers, boolean maintainClassifiers) { super(seed,cloneData,setClassMissing, cloneClassifiers, maintainClassifiers); this.folds = null; this.foldIndexing = null; this.numFolds = 10; } public ArrayList<ArrayList<Integer>> getFoldIndices() { return foldIndexing; } /** * @return the index in the original train set of the instance found at folds.get(fold).get(indexInFold) */ public int getOriginalInstIndex(int fold, int indexInFold) { return foldIndexing.get(fold).get(indexInFold); } private void checkNumCVFolds(int numInstances) { if (numInstances < numFolds) numFolds = numInstances; } @Override public synchronized ClassifierResults evaluate(Classifier classifier, Instances dataset) throws Exception { ClassifierResults res = crossValidateWithStats(classifier, dataset); res.findAllStatsOnce(); return res; } public synchronized ClassifierResults crossValidateWithStats(Classifier classifier, Instances dataset) throws Exception { return crossValidateWithStats(new Classifier[] { classifier }, dataset)[0]; } /** * Performs more extensive cross validation using dist for instance and * returns more information. * * Each classifier is built/validated using the same subsets of the data provided * i.e for each prediction, all classifiers will have trained on the exact same * subset data to have made that classification * * If folds have already been defined (by a call to buildFolds()), will use those, * else will create them internally. Setting the seed makes folds reproducable * across different instantiations of this object * * @return double[classifier][prediction] */ public synchronized ClassifierResults[] crossValidateWithStats(Classifier[] classifiers, final Instances dataset) throws Exception { if (folds == null || !previousRelationName.equals(dataset.relationName())) buildFolds(dataset); if (cloneClassifiers) cloneClassifiers(classifiers); //store for later storage of results, in case we want to set the class values missing //on each instance at predict time double[] trueClassVals = dataset.attributeToDoubleArray(dataset.classIndex()); resultsPerFold = new ClassifierResults[classifiers.length][numFolds]; //TODO obviously clean up this garbage once actual design is decided on List<List<Future<ClassifierResults>>> futureResultsPerFold = new ArrayList<>(classifiers.length); //generic arrays... for (int i = 0; i < classifiers.length; i++) { futureResultsPerFold.add(new ArrayList<>(numFolds)); for (int j = 0; j < numFolds; j++) futureResultsPerFold.get(i).add(null); } if (multiThread) executor = Executors.newFixedThreadPool(numThreads); //for each fold as test for(int fold = 0; fold < numFolds; fold++){ Instances[] trainTest = buildTrainTestSet(fold); final Instances train = trainTest[0]; final Instances test = trainTest[1]; String foldStr = "cvFold"+fold; //for each classifier in ensemble for (int classifierIndex = 0; classifierIndex < classifiers.length; ++classifierIndex) { // get the classifier instance to be used this fold final Classifier foldClassifier = cloneClassifiers ? foldClassifiers[classifierIndex][fold] : classifiers[classifierIndex]; final SingleTestSetEvaluator tester = new SingleTestSetEvaluator(seed, cloneData, setClassMissing); Callable<ClassifierResults> eval = () -> { long estimateTime = System.nanoTime(); ClassifierResults res = tester.evaluate(foldClassifier, train, test); estimateTime = System.nanoTime() - estimateTime; res.setErrorEstimateTime(estimateTime); res.setDatasetName(res.getDatasetName()+"_"+foldStr); return res; }; if (!multiThread) { //compute the result now resultsPerFold[classifierIndex][fold] = eval.call(); if (cloneClassifiers && !maintainClassifiers) foldClassifiers[classifierIndex][fold] = null; //free the memory } else { futureResultsPerFold.get(classifierIndex).set(fold, executor.submit(eval)); } } } if (multiThread) { //collect results from futures, this method will not continue until all folds done for (int fold = 0; fold < numFolds; fold++) { for (int classifierIndex = 0; classifierIndex < classifiers.length; ++classifierIndex) { resultsPerFold[classifierIndex][fold] = futureResultsPerFold.get(classifierIndex).get(fold).get(); if (cloneClassifiers && !maintainClassifiers) foldClassifiers[classifierIndex][fold] = null; //free the memory } } executor.shutdown(); } //shove concatenated fold data into ClassifierResults objects, the singular form //to represent the entire cv process (trainFoldX) //and get predictions for instances as ordered in original train set, instead of //the order predicted in //todo maybe implement flag to turn this off/on, bespoke to cv really ClassifierResults[] results = new ClassifierResults[classifiers.length]; for (int c = 0; c < classifiers.length; c++) { results[c] = concatenateAndReorderFoldPredictions(resultsPerFold[c], classifiers[c].getClass().getSimpleName(), dataset.relationName(), trueClassVals); } return results; } private ClassifierResults concatenateAndReorderFoldPredictions(ClassifierResults[] foldResults, String fullClassifierName, String fullDatasetName, double[] trueClassVals) throws Exception { ClassifierResults res = new ClassifierResults(foldResults[0].numClasses()); res.setTimeUnit(TimeUnit.NANOSECONDS); res.setEstimatorName(fullClassifierName); res.setDatasetName(fullDatasetName); res.setFoldID(seed); res.setSplit("train"); //todo revisit, or leave with the assumption that calling method will set this to test when needed res.turnOffZeroTimingsErrors(); double[][] dists = new double[trueClassVals.length][]; double[] preds = new double[trueClassVals.length]; long[] times = new long[trueClassVals.length]; String[] descs = new String[trueClassVals.length]; long totalBuildTime = 0; long totalEstimateTime = 0; for (int fold = 0; fold < numFolds; fold++) { String foldStr = "cvFold"+fold; //has the preds in order predicted for this fold ClassifierResults foldRes = foldResults[fold]; totalBuildTime += foldRes.getBuildTime(); totalEstimateTime += foldRes.getErrorEstimateTime(); for (int i = 0; i < foldRes.numInstances(); i++) { //get them out as original order in train set int originalIndex = getOriginalInstIndex(fold, i); double[] dist = foldRes.getProbabilityDistribution(i); dists[originalIndex] = dist; times[originalIndex] = foldRes.getPredictionTime(i); descs[originalIndex] = foldStr+foldRes.getPredDescription(i); //crossvalidator always resolved ties randomly, continued for reproducability //even if the lower-level evaluator resolved ties e.g. naively per fold //todo review double tiesResolvedRandomlyPred; tiesResolvedRandomlyPred = indexOfMax(dist); preds[originalIndex] = tiesResolvedRandomlyPred; } } res.addAllPredictions(trueClassVals, preds, dists, times, descs); res.setBuildTime(totalBuildTime); res.turnOnZeroTimingsErrors(); //have put the total build time before errors being turned back on, //e.g. ED1NN might legitimately get 0 build time for each fold, but for //all classifiers at least a FEW predictions should take more than ~200 //nanoseconds res.setErrorEstimateTime(totalEstimateTime); return res; } // public synchronized ClassifierResults[] crossValidateWithStats(Classifier[] classifiers, final Instances dataset) throws Exception { // // if (folds == null || !previousRelationName.equals(dataset.relationName())) // buildFolds(dataset); // // if (cloneClassifiers) // cloneClassifiers(classifiers); // // //store for later storage of results, in case we want to set the class values missing // //on each instance at predict time // double[] trueClassVals = dataset.attributeToDoubleArray(dataset.classIndex()); // // //these will store dists and preds for instance AS THEY ARE ORDERED IN THE DATASET GIVEN // //as opposed to instances in the order that they are predicted, after having been split into the k folds. // //storing them here in order, then adding into the classifierresults objects in order after the actual // //cv has finished // double[][][] allFolds_distsForInsts = new double[classifiers.length][dataset.numInstances()][]; // long[][] allFolds_predTimes = new long[classifiers.length][dataset.numInstances()]; // long[] totalEstimateTimes = new long[classifiers.length]; // // resultsPerFold = new ClassifierResults[classifiers.length][numFolds]; // // //for each fold as test // for(int fold = 0; fold < numFolds; fold++){ // Instances[] trainTest = buildTrainTestSet(fold); // final Instances train = trainTest[0]; // final Instances test = trainTest[1]; // // //for each classifier in ensemble // for (int classifierIndex = 0; classifierIndex < classifiers.length; ++classifierIndex) { // // // get the classifier instance to be used this fold // Classifier foldClassifier = classifiers[classifierIndex]; // if (cloneClassifiers) // //use the clone instead // foldClassifier = foldClassifiers[classifierIndex][fold]; // // long foldEstimateTimeStart = System.nanoTime(); //for errorEstimateTime of the full results object // long foldBuildTime = foldEstimateTimeStart; //for the buildtime of this fold's results object // foldClassifier.buildClassifier(train); // foldBuildTime = System.nanoTime() - foldBuildTime; // // // init the classifierXfold results object // ClassifierResults classifierFoldRes = new ClassifierResults(dataset.numClasses()); // classifierFoldRes.setTimeUnit(TimeUnit.NANOSECONDS); // classifierFoldRes.setClassifierName(foldClassifier.getClass().getSimpleName()); // classifierFoldRes.setDatasetName(dataset.relationName()+"_cvfold"+fold); // classifierFoldRes.setFoldID(seed); // classifierFoldRes.setSplit("train"); // classifierFoldRes.turnOffZeroTimingsErrors(); // classifierFoldRes.setBuildTime(foldBuildTime); // // //for each test instance on this fold // for(int i = 0; i < test.numInstances(); i++){ // int instIndex = getOriginalInstIndex(fold, i); // // Instance testInst = test.instance(i); // // double classVal = testInst.classValue(); //save in case we're deleting next line // if (setClassMissing) // testInst.setClassMissing(); // // //classify and store prediction // long startTime = System.nanoTime(); // double[] dist = foldClassifier.distributionForInstance(testInst); // long predTime = System.nanoTime()- startTime; // // allFolds_distsForInsts[classifierIndex][instIndex] = dist; // allFolds_predTimes[classifierIndex][instIndex] = predTime; // // classifierFoldRes.addPrediction(classVal, dist, indexOfMax(dist), predTime, ""); // } // // long foldEstimateTime = System.nanoTime() - foldEstimateTimeStart; // totalEstimateTimes[classifierIndex] += foldEstimateTime; // // classifierFoldRes.turnOnZeroTimingsErrors(); // classifierFoldRes.finaliseResults(); // classifierFoldRes.findAllStatsOnce(); // resultsPerFold[classifierIndex][fold] = classifierFoldRes; // // if (cloneClassifiers && !maintainClassifiers) // foldClassifiers[classifierIndex][fold] = null; //free the memory // } // } // // //shove concatenated fold data into ClassifierResults objects, the singular form // //to represent the entire cv process (trainFoldX) // ClassifierResults[] results = new ClassifierResults[classifiers.length]; // for (int c = 0; c < classifiers.length; c++) { // results[c] = new ClassifierResults(dataset.numClasses()); // results[c].setTimeUnit(TimeUnit.NANOSECONDS); // results[c].setClassifierName(classifiers[c].getClass().getSimpleName()); // results[c].setDatasetName(dataset.relationName()); // results[c].setFoldID(seed); // results[c].setSplit("train"); //todo revisit, or leave with the assumption that calling method will set this to test when needed // // results[c].turnOffZeroTimingsErrors(); // results[c].setErrorEstimateTime(totalEstimateTimes[c]); // for (int i = 0; i < dataset.numInstances(); i++) { // double tiesResolvedRandomlyPred; // // tiesResolvedRandomlyPred = indexOfMax(allFolds_distsForInsts[c][i]); // // results[c].addPrediction(allFolds_distsForInsts[c][i], tiesResolvedRandomlyPred, allFolds_predTimes[c][i], ""); // } // results[c].turnOnZeroTimingsErrors(); // // results[c].finaliseResults(trueClassVals); // } // // return results; // } /** * @return [0] = new train set, [1] = test(validation) set */ public Instances[] buildTrainTestSet(int testFold) { Instances[] trainTest = new Instances[2]; trainTest[0] = null; trainTest[1] = new Instances(folds.get(testFold)); Instances temp; // had to add in redundant instance storage so we don't keep killing the base set of Instances by mistake for(int f = 0; f < folds.size(); f++){ if(f==testFold){ continue; } temp = new Instances(folds.get(f)); if(trainTest[0]==null){ trainTest[0] = temp; }else{ trainTest[0].addAll(temp); } } return trainTest; } public void buildFolds(Instances dataset) throws Exception { previousRelationName = dataset.relationName(); if (cloneData) dataset = new Instances(dataset); //make copy checkNumCVFolds(dataset.numInstances()); Random r = new Random(seed); folds = new ArrayList<Instances>(); foldIndexing = new ArrayList<ArrayList<Integer>>(); for(int i = 0; i < numFolds; i++){ folds.add(new Instances(dataset,0)); foldIndexing.add(new ArrayList<>()); } ArrayList<Integer> instanceIds = new ArrayList<>(); for(int i = 0; i < dataset.numInstances(); i++) instanceIds.add(i); Collections.shuffle(instanceIds, r);//only use of random is here //distribute insts into class groups, recording their original index ArrayList<Instances> byClass = new ArrayList<>(); ArrayList<ArrayList<Integer>> byClassIndices = new ArrayList<>(); for(int i = 0; i < dataset.numClasses(); i++){ byClass.add(new Instances(dataset,0)); byClassIndices.add(new ArrayList<>()); } for (int i = 0; i < instanceIds.size(); ++i) { int instIndex = instanceIds.get(i); int instClassVal; instClassVal = (int)dataset.instance(instIndex).classValue(); byClass.get(instClassVal).add(dataset.instance(instIndex)); byClassIndices.get(instClassVal).add(instIndex); } //and get them back out, so now in class order but randomized within each each ArrayList<Integer> sortedByClassInstanceIds = new ArrayList<>(); for (int c = 0; c < dataset.numClasses(); c++) sortedByClassInstanceIds.addAll(byClassIndices.get(c)); int start = 0; for(int fold = 0; fold < numFolds; fold++) { int i = start; while (i < dataset.numInstances()) { folds.get(fold).add(dataset.instance(sortedByClassInstanceIds.get(i))); foldIndexing.get(fold).add(sortedByClassInstanceIds.get(i)); i += numFolds; } start++; } } private double indexOfMax(double[] dist) { double bsfWeight = -(Double.MAX_VALUE); ArrayList<Integer> bsfClassVals = null; for (int c = 0; c < dist.length; c++) { if(dist[c] > bsfWeight){ bsfWeight = dist[c]; bsfClassVals = new ArrayList<>(); bsfClassVals.add(c); }else if(dist[c] == bsfWeight){ bsfClassVals.add(c); } } double pred; //if there's a tie for highest voted class after all modules have voted, settle randomly if(bsfClassVals.size()>1) pred = bsfClassVals.get(new Random(0).nextInt(bsfClassVals.size())); else pred = bsfClassVals.get(0); return pred; } public static void main(String[] args) throws Exception { // buildFoldsTest(); classifierCloningTest(); } public static void classifierCloningTest() throws Exception { String resLoc = "C:/Temp/crossvalidatortests/"; String dataLoc = "C:/TSC Problems/"; String dset = "ItalyPowerDemand"; String[] classifierNames = { "MLP", "SVML", "Logistic", "C45", "NN" }; int numResamples = 5; for (String classifierName : classifierNames) { System.out.println(classifierName); for (int resample = 0; resample < numResamples; resample++) { Instances[] data = DatasetLoading.sampleDataset(dataLoc, dset, resample); Classifier classifier = ClassifierLists.setClassifierClassic(classifierName, resample); CrossValidationEvaluator cv = new CrossValidationEvaluator(resample, true, false, true, true); ClassifierResults fullcvResults = cv.evaluate(classifier, data[0]); System.out.println("\tdataset resample "+resample+" cv acc: "+fullcvResults.getAcc()); for (int fold = 0; fold < cv.numFolds; fold++) { ClassifierResults foldClassifierResultsOnValFold = cv.resultsPerFold[0][fold]; System.out.println("\t\t cv fold "+fold+": "+foldClassifierResultsOnValFold.getAcc()); SingleTestSetEvaluator testeval = new SingleTestSetEvaluator(resample, true, false); ClassifierResults foldClassifierResultsOnFullTest = testeval.evaluate(cv.foldClassifiers[0][fold], data[1]); System.out.println("\t\t fold "+fold+" classiifer on test: "+foldClassifierResultsOnFullTest.getAcc()); } classifier.buildClassifier(data[0]); SingleTestSetEvaluator testeval = new SingleTestSetEvaluator(resample, true, false); System.out.println("\tfull train set test acc : " + testeval.evaluate(classifier, data[1]).getAcc()); } System.out.println(""); } } public static void buildFoldsTest() throws Exception { CrossValidationEvaluator cv = new CrossValidationEvaluator(); cv.setNumFolds(3); cv.setSeed(0); String dset = "lenses"; // String dset = "balloons"; // String dset = "acute-inflammation"; Instances insts = DatasetLoading.loadDataNullable("C:/UCI Problems/"+dset+"/"+dset); System.out.println("Full data:"); System.out.println("numinsts="+insts.numInstances()); int[] classCounts = new int[insts.numClasses()]; double[] classDists = new double[insts.numClasses()]; for (int j = 0; j < insts.numInstances(); j++) classCounts[(int)insts.get(j).classValue()]++; for (int j = 0; j < insts.numClasses(); j++) classDists[j] = (double)classCounts[j] / insts.numInstances(); System.out.println("classcounts= " +Arrays.toString(classCounts)); System.out.println("classdist= " +Arrays.toString(classDists)); cv.buildFolds(insts); for (int i = 0; i < cv.numFolds; i++) { Instances fold = cv.folds.get(i); System.out.println("\nFold " + i); System.out.println("numinsts="+fold.numInstances()); int[] classCount = new int[insts.numClasses()]; double[] classDist = new double[fold.numClasses()]; for (int j = 0; j < fold.numInstances(); j++) classCount[(int)fold.get(j).classValue()]++; for (int j = 0; j < fold.numClasses(); j++) classDist[j] = (double)classCount[j] / fold.numInstances(); System.out.println("classcounts= " +Arrays.toString(classCount)); System.out.println("classdist= " +Arrays.toString(classDist)); Collections.sort(cv.foldIndexing.get(i)); System.out.println("(sorted) orginal indices: " + cv.foldIndexing.get(i)); // for (int j = 0; j < fold.numInstances(); j++) // System.out.print(cv.foldIndexing.get(i).get(j)+","); System.out.println(""); } } @Override public Evaluator cloneEvaluator() { CrossValidationEvaluator ev = new CrossValidationEvaluator(this.seed, this.cloneData, this.setClassMissing, this.cloneClassifiers, this.maintainClassifiers); //INTENTIONALLY NOT COPYING ACROSS FOLDS. That is a utility to help speed things up //If people try to clone evaluators with folds already built, safer to force //folds to be rebuilt (seeded/deterministic, ofc) than to potentially create //many copies of large datasets return ev; } }
26,356
44.286942
193
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/Evaluator.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import tsml.classifiers.TSClassifier; import tsml.classifiers.distance_based.utils.system.copy.CopierUtils; import tsml.classifiers.distance_based.utils.collections.params.ParamHandler; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Converter; import weka.classifiers.Classifier; import weka.core.Instances; import weka.core.Randomizable; import java.io.Serializable; /** * * @author James Large (james.large@uea.ac.uk) */ public abstract class Evaluator implements Randomizable, ParamHandler, Serializable { int seed; /** * Flag for whether to clone the data. Defaults to false, as no classifier should * be editing the data itself when training/testing, however setting this to true * will guarantee that the same (jave) instantiations of (weka) instance(s) objects * can be reused in higher-level experimental code. */ boolean cloneData; /** * Each instance will have setClassMissing() called upon it. To ABSOLUTELY enforce that * no classifier can cheat in any way (e.g some filter/transform inadvertently incorporates the class * value back into the transformed data set). * * The only reason to leave this as false (as it has been by default, for backwards compatability reasons) * is that in higher level experimental code, the same (jave) instantiations of (weka) instance(s) objects are used multiple * times, and the latter expects the class value to still be there (to check for correct predictions, e.g) */ boolean setClassMissing; public Evaluator(int seed, boolean cloneData, boolean setClassMissing) { this.seed = seed; this.cloneData = cloneData; this.setClassMissing = setClassMissing; } @Override public int getSeed() { return seed; } @Override public void setSeed(int seed) { this.seed = seed; } /** * Flag for whether to clone the data. Defaults to false, as no classifier should * be editing the data itself when training/testing, however setting this to true * will guarantee that the same (jave) instantiations of (weka) instance(s) objects * can be reused in higher-level experimental code. */ public boolean getCloneData() { return cloneData; } /** * Flag for whether to clone the data. Defaults to false, as no classifier should * be editing the data itself when training/testing, however setting this to true * will guarantee that the same (jave) instantiations of (weka) instance(s) objects * can be reused in higher-level experimental code. */ public void setCloneData(boolean cloneData) { this.cloneData = cloneData; } /** * Each instance will have setClassMissing() called upon it. To ABSOLUTELY enforce that * no classifier can cheat in any way (e.g some filter/transform inadvertently incorporates the class * value back into the transformed data set). * * The only reason to leave this as false (as it has been by default, for backwards compatability reasons) * is that in higher level experimental code, the same (jave) instantiations of (weka) instance(s) objects are used multiple * times, and the latter expects the class value to still be there (to check for correct predictions, e.g) */ public boolean getSetClassMissing() { return setClassMissing; } /** * Each instance will have setClassMissing() called upon it. To ABSOLUTELY enforce that * no classifier can cheat in any way (e.g some filter/transform inadvertently incorporates the class * value back into the transformed data set). * * The only reason to leave this as false (as it has been by default, for backwards compatability reasons) * is that in higher level experimental code, the same (jave) instantiations of (weka) instance(s) objects are used multiple * times, and the latter expects the class value to still be there (to check for correct predictions, e.g) */ public void setSetClassMissing(boolean setClassMissing) { this.setClassMissing = setClassMissing; } public ClassifierResults evaluate(Classifier classifier, Instances dataset) throws Exception { return evaluate(TSClassifier.wrapClassifier(classifier), Converter.fromArff(dataset)); } public ClassifierResults evaluate(TSClassifier classifier, TimeSeriesInstances data) throws Exception { return evaluate(classifier.getClassifier(), Converter.toArff(data)); } public Evaluator cloneEvaluator() { try { return (Evaluator) CopierUtils.deepCopy(this); } catch(Exception e) { throw new IllegalStateException(e); } } }
5,700
40.919118
129
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/InternalEstimateEvaluator.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import tsml.classifiers.EnhancedAbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instances; /** * A dummy/wrapper evaluator for gathering the internal estimates of classifiers that satisfy * EnhancedAbstractClassifier.classifierAbleToEstimateOwnPerformance(classifier)) * * Builds the classifier on the data, and returns the ClassifierResults object that is build internally by the classifier * * Currently, no additional meta info is supplied/forced into the results object by the evaluator, and so it is up to * experimenters and classifier authors to populate any additional meta-info needed/wanted * * @author James Large (james.large@uea.ac.uk) */ public class InternalEstimateEvaluator extends Evaluator { public InternalEstimateEvaluator() { super(0,false,false); } public InternalEstimateEvaluator(int seed, boolean cloneData, boolean setClassMissing) { super(seed,cloneData,setClassMissing); } @Override public synchronized ClassifierResults evaluate(Classifier classifier, Instances dataset) throws Exception { if (!EnhancedAbstractClassifier.classifierAbleToEstimateOwnPerformance(classifier)) throw new IllegalArgumentException("To generate an internal estimate of performance, a classifier must extend " + "EnhancedAbstractClassifier and have CAN_ESTIMATE_OWN_PERFORMANCE=true. Classifier class passed: " + classifier.getClass().getSimpleName()); final Instances insts = cloneData ? new Instances(dataset) : dataset; EnhancedAbstractClassifier eac = (EnhancedAbstractClassifier) classifier; eac.setEstimateOwnPerformance(true); eac.setSeed(seed); eac.buildClassifier(insts); ClassifierResults res = eac.getTrainResults(); return res; } @Override public Evaluator cloneEvaluator() { return new InternalEstimateEvaluator(this.seed, this.cloneData, this.setClassMissing); } }
2,827
39.985507
160
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/MultiSamplingEvaluator.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import java.util.concurrent.ExecutorService; import tsml.classifiers.MultiThreadable; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; /** * Base class for evaluators that will evaluate over multiple resamples (e.g stratified random resamples) * or folds (e.g cross validation) of the data. In api methods, I have simply referred to these * as folds as a semi-arbitrary choice. * * Provides functionality for cloning and saving the trained models on each fold * * @author James Large (james.large@uea.ac.uk) */ public abstract class MultiSamplingEvaluator extends SamplingEvaluator implements MultiThreadable { /** * TODO this should be replaced with some globally-aware (singleton?) thread managing * service, instead of having everything spawning it's own service. That will be handled * in future/with discussion though */ protected ExecutorService executor = null; protected int numThreads = 1; protected boolean multiThread = false; /** * The number of folds (aka resamples, depending on the context of the * particular MultiSamplingEvaluator implementation) to produce, evaluate on, * and concatenate/average over */ protected int numFolds; /** * If true, the classifiers shall be cloned when building and predicting on each fold. * * This is achieved via AbstractClassifier.makeCopy(...), and therefore the classifier * and all relevant/wanted info/hyperparamters that may have been set up prior to giving * the classifier to the evaluator must be properly (de-)serialisable. * * Useful if a particular classifier maintains information after one buildclassifier that * might not be replaced or effect the next call to buildclassifier. Ideally, this * should not be the case, but this option will make sure either way * * If maintainClassifiers == true, clone classifiers is forced to true */ protected boolean cloneClassifiers = false; /** * If true, will keep the classifiers trained on each fold in memory * * When set to true, will force clone classifier to also be true. Note - this will naturally * come with a large cost to required memory, (size of trained classifier) * numFolds */ protected boolean maintainClassifiers = false; /** * If maintainClassifiers is true, this will become populated with the classifiers * trained on each fold, [classifier][fold], otherwise will be null */ protected Classifier[][] foldClassifiers = null; /** * Populated with the classifierresults object for each fold, such that each * object effectively represents a single hold-out validation set. * [classifier][fold] */ protected ClassifierResults[][] resultsPerFold = null; public MultiSamplingEvaluator() { super(0,false,false); } public MultiSamplingEvaluator(int seed, boolean cloneData, boolean setClassMissing, boolean cloneClassifiers, boolean maintainClassifiers) { super(seed, cloneData, setClassMissing); this.cloneClassifiers = cloneClassifiers; setMaintainClassifiers(maintainClassifiers); } public int getNumFolds() { return numFolds; } public void setNumFolds(int numFolds) { this.numFolds = numFolds; } public ClassifierResults[] getFoldResults() { return getFoldResults(0); } public ClassifierResults[] getFoldResults(int classifierIndex) { if (resultsPerFold != null && resultsPerFold.length > classifierIndex) return resultsPerFold[classifierIndex]; else return null; } public ClassifierResults[][] getFoldResultsAll() { return resultsPerFold; } public Classifier[] getFoldClassifiers() { return getFoldClassifiers(0); } public Classifier[] getFoldClassifiers(int classifierIndex) { if (foldClassifiers != null) return foldClassifiers[0]; else return null; } public Classifier[][] getFoldClassifiersAll() { return foldClassifiers; } /** * If true, will keep the classifiers trained on each fold in memory * * When set to true, will force clone classifier to also be true. Note - this will naturally * come with a large cost to required memory, (size of trained classifier) * numFolds */ public void setMaintainClassifiers(boolean maintainClassifiers) { this.maintainClassifiers = maintainClassifiers; if (maintainClassifiers) this.cloneClassifiers = true; } /** * If true, will keep the classifiers trained on each fold in memory * * When set to true, will force clone classifier to also be true. Note - this will naturally * come with a large cost to required memory, (size of trained classifier) * numFolds */ public boolean getMaintainClassifiers() { return maintainClassifiers; } /** * If true, the classifiers shall be cloned when building and predicting on each fold. * * This is achieved via AbstractClassifier.makeCopy(...), and therefore the classifier * and all relevant/wanted info/hyperparamters that may have been set up prior to giving * the classifier to the evaluator must be properly (de-)serialisable. * * Useful if a particular classifier maintains information after one buildclassifier that * might not be replaced or effect the next call to buildclassifier. Ideally, this * should not be the case, but this option will make sure either way * * If maintainClassifiers == true, clone classifiers is forced to true */ public boolean getCloneClassifiers() { return cloneClassifiers; } /** * If true, the classifiers shall be cloned when building and predicting on each fold. * * This is achieved via AbstractClassifier.makeCopy(...), and therefore the classifier * and all relevant/wanted info/hyperparamters that may have been set up prior to giving * the classifier to the evaluator must be properly (de-)serialisable. * * Useful if a particular classifier maintains information after one buildclassifier that * might not be replaced or effect the next call to buildclassifier. Ideally, this * should not be the case, but this option will make sure either way * * If maintainClassifiers == true, clone classifiers is forced to true */ public void setCloneClassifiers(boolean cloneClassifiers) { this.cloneClassifiers = cloneClassifiers; } protected void cloneClassifier(Classifier classifier) throws Exception { // clone them all here in one go for efficiency of serialisation foldClassifiers = new Classifier[1][]; foldClassifiers[0] = AbstractClassifier.makeCopies(classifier, numFolds); } protected void cloneClassifiers(Classifier[] classifiers) throws Exception { // clone them all here in one go for efficiency of serialisation foldClassifiers = new Classifier[classifiers.length][]; for (int c = 0; c < classifiers.length; ++c) foldClassifiers[c] = AbstractClassifier.makeCopies(classifiers[c], numFolds); } /** * NOTE: multithreading (numThreads > 1) forces cloneClassifiers to true for * concurrency reasons. */ @Override //MultiThreadable public void enableMultiThreading(int numThreads) { if (numThreads > 1) { this.numThreads = numThreads; this.multiThread = true; this.cloneClassifiers = true; } else{ this.numThreads = 1; this.multiThread = false; } } }
8,800
37.600877
144
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/OutOfBagEvaluator.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import org.junit.Assert; import tsml.classifiers.TSClassifier; import tsml.classifiers.distance_based.utils.system.copy.CopierUtils; import tsml.classifiers.distance_based.utils.system.logging.LogUtils; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import utilities.ArrayUtilities; import utilities.ClassifierTools; import java.util.*; import java.util.logging.Logger; public class OutOfBagEvaluator extends Evaluator { private static final Logger DEFAULT_LOGGER = LogUtils.getLogger(OutOfBagEvaluator.class); private transient Logger log = DEFAULT_LOGGER; private TimeSeriesInstances inBagTrainData; private List<Integer> inBagTrainDataIndices; private TimeSeriesInstances outOfBagTestData; private List<Integer> outOfBagTestDataIndices; private boolean cloneClassifier = false; public OutOfBagEvaluator() { super(-1, false, false); } @Override public ClassifierResults evaluate(TSClassifier classifier, TimeSeriesInstances data) throws Exception { final Random random = new Random(seed); // build a new oob train / test data inBagTrainDataIndices = new ArrayList<>(data.numInstances()); final Set<Integer> oobTestSetIndices = new HashSet<>(data.numInstances()); oobTestSetIndices.addAll(ArrayUtilities.sequence(data.numInstances())); // pick n instances from train data, where n is the size of train data for(int i = 0; i < data.numInstances(); i++) { int index = random.nextInt(data.numInstances()); inBagTrainDataIndices.add(index); // remove the train instance from the test bag (if not already) oobTestSetIndices.remove(index); } // populate in-bag train data inBagTrainData = new TimeSeriesInstances(data.getClassLabels()); for(Integer i : inBagTrainDataIndices) { // quick check that oob test / train are independent Assert.assertFalse(oobTestSetIndices.contains(i)); TimeSeriesInstance instance = data.get(i); inBagTrainData.add(instance); } // populate out-of-bag test data outOfBagTestData = new TimeSeriesInstances(data.getClassLabels()); outOfBagTestDataIndices = new ArrayList<>(oobTestSetIndices); for(Integer i : outOfBagTestDataIndices) { TimeSeriesInstance instance = data.get(i); outOfBagTestData.add(instance); } // build the tree on the oob train if(cloneClassifier) { classifier = CopierUtils.deepCopy(classifier); } classifier.buildClassifier(inBagTrainData); // test tree on the oob test ClassifierResults results = new ClassifierResults(); ClassifierTools.addPredictions(classifier, outOfBagTestData, results, random); return results; } public TimeSeriesInstances getInBagTrainData() { return inBagTrainData; } public TimeSeriesInstances getOutOfBagTestData() { return outOfBagTestData; } public List<Integer> getInBagTrainDataIndices() { return inBagTrainDataIndices; } public boolean isCloneClassifier() { return cloneClassifier; } public void setCloneClassifier(final boolean cloneClassifier) { this.cloneClassifier = cloneClassifier; } public List<Integer> getOutOfBagTestDataIndices() { return outOfBagTestDataIndices; } }
4,334
38.054054
117
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/SamplingEvaluator.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; /** * Base class for evaluators that will sample the data provided to the evaluate function * in some way, and build the classifier at least once on some part of the data. * Classifiers need not have been built on some from of data prior to using this evaluator. * * In this sense, evaluators extending this class can be viewed as 'self contained', in * that no external data is needed. * * NOTE: A typical extension of this class might be to e.g. resample the data passed * into a train and test set, build on the train and predict the test. As a result of this * use case, the number of predictions in the returned classifierresults object is * not equal to the number of instances in the data passed. * * Currently, this class does not add any extra functionality, however acts as an * extra step in the inheritance hierarchy to distinguish 'self-contained' and 'not self-contained' * evaluators by the loose definition above. * * @author James Large (james.large@uea.ac.uk) */ public abstract class SamplingEvaluator extends Evaluator { public SamplingEvaluator(int seed, boolean cloneData, boolean setClassMissing) { super(seed, cloneData, setClassMissing); } }
2,038
42.382979
99
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/SingleSampleEvaluator.java
/* * Copright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import utilities.InstanceTools; import weka.classifiers.Classifier; import weka.core.Instances; /** * Resamples the data provided once to form train and test sets, builds on the train * and evaluates on the test. Resampled according to the seed, and is deterministic, * using the standard InstanceTools.resampleInstances method * * @author James Large (james.large@uea.ac.uk) */ public class SingleSampleEvaluator extends SamplingEvaluator { double propInstancesInTrain = 0.5; public SingleSampleEvaluator() { super(0, false, false); } public SingleSampleEvaluator(int seed, boolean cloneData, boolean setClassMissing) { super(seed, cloneData, setClassMissing); } public double getPropInstancesInTrain() { return propInstancesInTrain; } public void setPropInstancesInTrain(double propInstancesInTrain) { this.propInstancesInTrain = propInstancesInTrain; } @Override public synchronized ClassifierResults evaluate(Classifier classifier, Instances dataset) throws Exception { Instances[] trainTest = InstanceTools.resampleInstances(dataset, seed, propInstancesInTrain); SingleTestSetEvaluator eval = new SingleTestSetEvaluator(this.seed, this.cloneData, this.setClassMissing); return eval.evaluate(classifier, trainTest[0], trainTest[1]); } @Override public Evaluator cloneEvaluator() { SingleSampleEvaluator ev = new SingleSampleEvaluator(this.seed, this.cloneData, this.setClassMissing); ev.setPropInstancesInTrain(this.propInstancesInTrain); return ev; } }
2,505
35.318841
114
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/SingleTestSetEvaluator.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import java.util.concurrent.TimeUnit; import static utilities.GenericTools.indexOfMax; import tsml.classifiers.Interpretable; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; /** * Simply gathers predictions from an already built/trained classifier on the data given * * As much meta info as possible shall be inferred (e.g. classifier name based on the class name), * but the calling function should explicitely set/check any meta info it wants to if accuracy is * important or the values non-standard (e.g. in this context you want the classifier name to * include some specific parameter identifier) * * distributionForInstance(Instance) MUST be defined, even if the classifier only really returns * a one-hot distribution * * @author James Large (james.large@uea.ac.uk) */ public class SingleTestSetEvaluator extends Evaluator { public SingleTestSetEvaluator() { super(0,false,false); } public SingleTestSetEvaluator(int seed, boolean cloneData, boolean setClassMissing) { super(seed,cloneData,setClassMissing); } private boolean vis = false; public SingleTestSetEvaluator(int seed, boolean cloneData, boolean setClassMissing, boolean vis) { super(seed,cloneData,setClassMissing); this.vis = vis; } @Override public synchronized ClassifierResults evaluate(Classifier classifier, Instances dataset) throws Exception { final Instances insts = cloneData ? new Instances(dataset) : dataset; ClassifierResults res = new ClassifierResults(insts.numClasses()); res.setTimeUnit(TimeUnit.NANOSECONDS); res.setEstimatorName(classifier.getClass().getSimpleName()); res.setDatasetName(dataset.relationName()); res.setFoldID(seed); res.setSplit("train"); //todo revisit, or leave with the assumption that calling method will set this to test when needed res.turnOffZeroTimingsErrors(); for (Instance testinst : insts) { double trueClassVal = testinst.classValue(); if (setClassMissing) testinst.setClassMissing(); long startTime = System.nanoTime(); double[] dist = classifier.distributionForInstance(testinst); long predTime = System.nanoTime() - startTime; if (vis) ((Interpretable)classifier).lastClassifiedInterpretability(); res.addPrediction(trueClassVal, dist, indexOfMax(dist), predTime, ""); //todo indexOfMax does not break ties randomly. } res.turnOnZeroTimingsErrors(); res.finaliseResults(); res.findAllStatsOnce(); return res; } /** * Utility method, will build on the classifier on the train set and evaluate on the test set */ public synchronized ClassifierResults evaluate(Classifier classifier, Instances train, Instances test) throws Exception { long buildTime = System.nanoTime(); classifier.buildClassifier(train); buildTime = System.nanoTime() - buildTime; ClassifierResults res = evaluate(classifier, test); res.turnOffZeroTimingsErrors(); res.setBuildTime(buildTime); res.turnOnZeroTimingsErrors(); return res; } @Override public Evaluator cloneEvaluator() { return new SingleTestSetEvaluator(this.seed, this.cloneData, this.setClassMissing); } }
4,310
36.815789
130
java
tsml-java
tsml-java-master/src/main/java/evaluation/evaluators/StratifiedResamplesEvaluator.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.evaluators; import evaluation.storage.ClassifierResults; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import weka.classifiers.Classifier; import weka.core.Instances; /** * An evaluator that performs k stratified random resamples (default k=30) of the given * data and evaluates the given classifier(s) on each resample. * * Concatenated predictions across all resamples are returned from the main * evaluate method, however predictions split across each resample can also be retrieved * afterwards * * @author James Large (james.large@uea.ac.uk) */ public class StratifiedResamplesEvaluator extends MultiSamplingEvaluator { double propInstancesInTrain; /** * If true, the seeds used to generate each resample shall simply be id * of the resample in the loop, i.e. the values 0 to numFolds-1 * * This would mirror the generation of arff folds in ClassifierExperiments, for example. * This also means that the seed of this StratifiedResamplesEvaluator object * has no real use, aside from it would be stored as the fold id in the meta data * of the concatenated results object. * * Otherwise if false, the data resample seeds shall be randomly generated * via the seed of this object. So still reproducable, but likely not aligned with * resamples produced semi manually by just looping over numFolds and using i * as the seed */ boolean useEachResampleIdAsSeed; public StratifiedResamplesEvaluator() { super(0,false,false,false,false); this.numFolds = 30; this.propInstancesInTrain = 0.5; } public StratifiedResamplesEvaluator(int seed, boolean cloneData, boolean setClassMissing, boolean cloneClassifiers, boolean maintainClassifiers) { super(seed,cloneData,setClassMissing, cloneClassifiers, maintainClassifiers); this.numFolds = 30; this.propInstancesInTrain = 0.5; } @Override public Evaluator cloneEvaluator() { StratifiedResamplesEvaluator ev = new StratifiedResamplesEvaluator(this.seed, this.cloneData, this.setClassMissing, this.cloneClassifiers, this.maintainClassifiers); ev.setPropInstancesInTrain(this.propInstancesInTrain); ev.setUseEachResampleIdAsSeed(this.useEachResampleIdAsSeed); return ev; } /** * If true, the seeds used to generate each resample shall simply be id * of the resample in the loop, i.e. the values 0 to numFolds-1 * * This would mirror the generation of arff folds in ClassifierExperiments, for example. * This also means that the seed of this StratifiedResamplesEvaluator object * has no real use, aside from it would be stored as the fold id in the meta data * of the concatenated results object. * * Otherwise if false, the data resample seeds shall be randomly generated * via the seed of this object. So still reproducable, but likely not aligned with * resamples produced semi manually by just looping over numFolds and using i * as the seed */ public boolean getUseEachResampleIdAsSeed() { return useEachResampleIdAsSeed; } /** * If true, the seeds used to generate each resample shall simply be id * of the resample in the loop, i.e. the values 0 to numFolds-1 * * This would mirror the generation of arff folds in ClassifierExperiments, for example. * This also means that the seed of this StratifiedResamplesEvaluator object * has no real use, aside from it would be stored as the fold id in the meta data * of the concatenated results object. * * Otherwise if false, the data resample seeds shall be randomly generated * via the seed of this object. So still reproducable, but likely not aligned with * resamples produced semi manually by just looping over numFolds and using i * as the seed */ public void setUseEachResampleIdAsSeed(boolean useEachResampleIdAsSeed) { this.useEachResampleIdAsSeed = useEachResampleIdAsSeed; } public double getPropInstancesInTrain() { return propInstancesInTrain; } public void setPropInstancesInTrain(double propInstancesInTrain) { this.propInstancesInTrain = propInstancesInTrain; } /** * This returns a single ClassifierResults object, however in common usage * a ClassifierResults object would typically refer to only one of the resamples, * not all of them. The object returned by this method should effectively be treated * as a special case. The parameters line is not reliable, as optimal parameters * are calculated 30 times, for example. * * What this will return is an object where the predictions are concatenated * for each resample, i.e the predictions are ordered as all predictions for the * test set of fold0, then the predictions for fold1, etc. * * Therefore, if you're evaluating multiple classifiers on the same dataset using this * evaluator, the predictions will all line up to each other (assuming they are seeded the * same to produce the same resamples). Each prediction will refer to the same test * case being predicted after being trained on the same data. * * Additionally, concatenating the folds in this manner means that the stats * reported calculated by ClassifierResults are automatically the stats averaged * over the resamples * * If you want to access the classifier results objects for each fold, these are * also stored in this evaluator object, call getResultsOfEachSample() * * @param classifier * @param dataset * @return * @throws Exception */ @Override public synchronized ClassifierResults evaluate(Classifier classifier, Instances dataset) throws Exception { //todo revisit, suppose numFolds = 30, propInTrain = 0.5, numInstances = 20, 20 choose 10 = 184756 >>>>> 30... // if (dataset.numInstances() <= numFolds) { // System.out.println("Warning, num resamples requested is greater than the number of instances, " // + "performing a leave-one-out cross validation instead"); // return performLOOCVInstead(classifier, dataset); // } ClassifierResults res = stratifiedResampleWithStats(classifier, dataset); res.findAllStatsOnce(); return res; } public synchronized ClassifierResults stratifiedResampleWithStats(Classifier classifier, Instances dataset) throws Exception { return stratifiedResampleWithStats(new Classifier[] { classifier }, dataset)[0]; } public synchronized ClassifierResults[] stratifiedResampleWithStats(Classifier[] classifiers, Instances data) throws Exception { final Instances dataset = cloneData ? new Instances(data) : data; if (cloneClassifiers) cloneClassifiers(classifiers); resultsPerFold = new ClassifierResults[classifiers.length][numFolds]; ClassifierResults[] allConcatenatedClassifierRes = new ClassifierResults[classifiers.length]; //TODO obviously clean up this garbage once actual design is decided on List<List<Future<ClassifierResults>>> futureResultsPerFold = new ArrayList<>(classifiers.length); //generic arrays... for (int i = 0; i < classifiers.length; i++) { futureResultsPerFold.add(new ArrayList<>(numFolds)); for (int j = 0; j < numFolds; j++) futureResultsPerFold.get(i).add(null); } if (multiThread) executor = Executors.newFixedThreadPool(numThreads); for (int classifierIndex = 0; classifierIndex < classifiers.length; ++classifierIndex) { //rebuild for each classifier so resamples are aligned //ignored if useEachResampleIdAsSeed == true Random classifierRng = new Random(seed); long estimateTimeStart = System.nanoTime(); for (int fold = 0; fold < numFolds; fold++) { final Classifier foldClassifier = cloneClassifiers ? foldClassifiers[classifierIndex][fold] : classifiers[classifierIndex]; int resampleSeed = useEachResampleIdAsSeed ? fold : classifierRng.nextInt(); String foldStr = "resample"+resampleSeed; SingleSampleEvaluator eval = new SingleSampleEvaluator(resampleSeed, this.cloneData, this.setClassMissing); eval.setPropInstancesInTrain(this.propInstancesInTrain); Callable<ClassifierResults> foldEvalFunc = () -> { long estimateTime = System.nanoTime(); ClassifierResults res = eval.evaluate(foldClassifier, dataset); estimateTime = System.nanoTime() - estimateTime; res.setErrorEstimateTime(estimateTime); res.setDatasetName(res.getDatasetName()+"_"+foldStr); return res; }; if (!multiThread) { //compute the result now resultsPerFold[classifierIndex][fold] = foldEvalFunc.call(); if (cloneClassifiers && !maintainClassifiers) foldClassifiers[classifierIndex][fold] = null; //free the memory } else { //spawn a job to compute the result, will collect it later futureResultsPerFold.get(classifierIndex).set(fold, executor.submit(foldEvalFunc)); } } if (multiThread) { //collect results from futures, this method will not continue until all folds done for (int fold = 0; fold < numFolds; fold++) { resultsPerFold[classifierIndex][fold] = futureResultsPerFold.get(classifierIndex).get(fold).get(); if (cloneClassifiers && !maintainClassifiers) foldClassifiers[classifierIndex][fold] = null; //free the memory } } long estimateTime = System.nanoTime() - estimateTimeStart; ClassifierResults concatenatedClassifierRes = ClassifierResults.concatenateClassifierResults(resultsPerFold[classifierIndex]); concatenatedClassifierRes.setTimeUnit(TimeUnit.NANOSECONDS); concatenatedClassifierRes.setEstimatorName(classifiers[classifierIndex].getClass().getSimpleName()); concatenatedClassifierRes.setDatasetName(dataset.relationName()); concatenatedClassifierRes.setFoldID(seed); concatenatedClassifierRes.setSplit("train"); //todo revisit, or leave with the assumption that calling method will set this to test when needed concatenatedClassifierRes.setErrorEstimateTime(estimateTime); allConcatenatedClassifierRes[classifierIndex] = concatenatedClassifierRes; } if (multiThread) executor.shutdown(); return allConcatenatedClassifierRes; } }
12,370
46.217557
173
java
tsml-java
tsml-java-master/src/main/java/evaluation/storage/ClassifierResults.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.storage; import fileIO.OutFile; import java.io.File; import java.io.FileNotFoundException; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.NoSuchElementException; import java.util.Random; import java.util.Scanner; import java.util.concurrent.TimeUnit; import java.util.function.Function; import utilities.*; /** * This is a container class for the storage of predictions and meta-info of a * classifier on a single set of instances (for example, the test set of a particular * resample of a particular dataset). * * Predictions can be stored via addPrediction(...) or addAllPredictions(...) * Currently, the information stored about each prediction is: * - The true class value (double getTrueClassValue(index)) * - The predicted class value (double getPredClassValue(index)) * - The probability distribution for this instance (double[] getProbabilityDistribution(index)) * - The time taken to predict this instance id (long getPredictionTime(index)) * - An optional description of the prediction (String getPredDescription(index)) * * The meta info stored is: * [LINE 1 OF FILE] * - get/setDatasetName(String) * - get/setClassifierName(String) * - get/setSplit(String) * - get/setFoldId(String) * - get/setTimeUnit(TimeUnit) * - FileType, set implicitly via the write...() method used * - get/setDescription(String) * [LINE 2 OF FILE] * - get/setParas(String) * [LINE 3 OF FILE] * 1 - getAccuracy() (calculated from predictions, only settable with a suitably annoying message) * 2 - get/setBuildTime(long) * 3 - get/setTestTime(long) * 4 - get/setBenchmarkTime(long) * 5 - get/setMemory(long) * 6 - (set)numClasses(int) (either set by user or indirectly found through predicted probability distributions) * 7 - get/setErrorEstimateMethod(String) (loosely formed, e.g. cv_10) * 8 - get/setErrorEstimateTime(long) (time to form an estimate from scratch, e.g. time of cv_10) * 9 - get/setBuildPlusEstimateTime(long) (time to train on full data, AND estimate error on it) * * [REMAINING LINES: PREDICTIONS] * - trueClassVal, predClassVal,[empty], dist[0], dist[1] ... dist[c],[empty], predTime, [empty], predDescription * * Supports reading/writing of results from/to file, in the 'classifierResults file-format' * - loadResultsFromFile(String path) * - writeFullResultsToFile(String path) (other writing formats also supported, write...ToFile(...) * * Supports recording of timings in different time units. Nanoseconds is the default. * Older files that are read in and do not have a time unit specified are assumed to be in milliseconds. * * WARNING: The timeunit does not enforce/convert any already-stored times to the new time unit from the old. * If e.g build time is set to 10 (intended to mean 10 milliseconds, as would be default), but then time * unit was changed to e.g seconds, the value stored as the build time is still 10. User must make sure * to either perform conversions themselves or be consistent in their timing units * long buildTimeInSecs = //some process * long buildTimeInResultsUnit = results.getTimeUnit().convert(builtTimeInSecs, TimeUnit.SECONDS); * results.setBuildTime(buildTimeInResultsUnit) * * Also supports the calculation of various evaluative performance metrics based on the predictions (accuracy, * auroc, nll etc.) which are used in the MultipleEstimatorEvaluation pipeline. For now, call * findAllStats() to calculate the performance metrics based on the stored predictions, and access them * via directly via the public variables. In the future, these metrics will likely be separated out * into their own package * * * EXAMPLE USAGE: * ClassifierResults res = new ClassifierResults(); * //set a particular timeunit, if using something other than nanos. Nanos recommended * //set any meta info you want to keep, e.g classifiername, datasetname... * * for (Instance inst : test) { * long startTime = //time * double[] dist = classifier.distributionForInstance(inst); * long predTime = //time - startTime * * double pred = max(dist); //with some particular tie breaking scheme built in. * //easiest is utilities.GenericTools.indexOfMax(double[]) * * res.addPrediction(inst.classValue(), dist, pred, predTime, ""); //description is optional * } * * res.finaliseResults(); //performs some basic validation, and calcs some relevant internal info * * //can now find summary scores for these predictions * //stats stored in simple public members for now * res.findAllStats(); * * //and/or save to file * res.writeFullResultsToFile(path); * * //and could then load them back in * ClassifierResults res2 = new ClassifierResults(path); * * //the are automatically finalised, however the stats are not automatically found * res2.findAllStats(); * * TODOS: * - Move metric/scores/stats into their own packge, and rename consistently to scores OR metrics. * - Rename finaliseResults to finalisePredictions, and add in the extra validation * - Consult with group and implement writeCompactResultsTo...(...) as wanted * - Maybe break down the object into different parts to reduce the get/set bloat. This * is a very large and needlessly complex object. Predictions object, ExpInfo (line1) object, etcetc * * @author James Large (james.large@uea.ac.uk) + edits from just about everybody * @date 19/02/19 */ public class ClassifierResults extends EstimatorResults implements DebugPrinting, Serializable { /** * Print a message with the filename to stdout when a file cannot be loaded. * Can get very tiresome if loading thousands of files with some expected failures, * and a higher level process already summarises them, thus this option to * turn off the messages */ public static boolean printOnFailureToLoad = true; //LINE 1: meta info, set by user // estimatorName // datasetName // split // foldID // timeUnit // description private enum FileType { /** * Writes/loads the first 3 lines, and all prediction info on the remaining numInstances lines * * Usable in all evaluations and post-processed ensembles etc. */ PREDICTIONS, /** * Writes/can only be guaranteed to contain the first 3 lines, and the summative metrics, NOT * full prediction info * * Usable in evaluations that are restricted to the metrics described in this file, * but not post-processed ensembles */ METRICS, /** * To be defined more precisely at later date. Intended use would be a classifiers' internal * storage, perhaps for checkpointing etc if full writing/reading would simply take up too much space * and IO compute overhead. Goastler to define */ COMPACT }; private FileType fileType = FileType.PREDICTIONS; //LINE 2: classifier setup/info, parameters. precise format is up to user. //e.g maybe this line includes the accuracy of each parameter set searched for in a tuning process, etc //old versions of file format also include build time. private String paras = "No parameter info"; //LINE 3: acc, buildTime, testTime, memoryUsage //simple summarative performance stats. /** * Calculated from the stored predictions, cannot be explicitly set by user */ private double acc = -1; // buildTime // testTime // benchmarkTime // memoryUsage /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * See the experiments parameter trainEstimateMethod * * This defines the method and parameter of train estimate used, if one was done */ private String errorEstimateMethod = ""; /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * This defines the total time taken to estimate the classifier's error. This currently * does not mean anything for classifiers implementing the TrainAccuracyEstimate interface, * and as such would need to set this themselves (but likely do not) * * For those classifiers that do not implement that, ClassifierExperiments.findOrSetupTrainEstimate(...) will set this value * as a wrapper around the entire evaluate call for whichever errorEstimateMethod is being used */ private long errorEstimateTime = -1; /** * This measures the total time to build the classifier on the train data * AND to estimate the classifier's error on the same train data. For classifiers * that do not estimate their own error in some way during the build process, * this will simply be the buildTime and the errorEstimateTime added together. * * For classifiers that DO estimate their own error, buildPlusEstimateTime may * be anywhere between buildTime and buildTime+errorEstimateTime. Some or all of * the work needed to form an estimate (which the field errorEstimateTime measures from scratch) * may have already been accounted for by the buildTime */ private long buildPlusEstimateTime = -1; //REMAINDER OF THE FILE - 1 prediction per line //raw performance data. currently just give parallel arrays private ArrayList<Double> trueClassValues; private ArrayList<Double> predClassValues; private ArrayList<double[]> predDistributions; private ArrayList<Long> predTimes; private ArrayList<String> predDescriptions; //inferred/supplied dataset meta info private int numClasses; private int numInstances; //calculated performance metrics //accuracy can be re-calced, as well as stored on line three in files public double balancedAcc; public double sensitivity; public double specificity; public double precision; public double recall; public double f1; public double mcc; //mathews correlation coefficient public double nll; public double meanAUROC; public double stddev; //across cv folds, where applicable public double[][] confusionMatrix; //[actual class][predicted class] public double[] countPerClass; //Early classification public double earliness = -1; public double harmonicMean; /** * Used to avoid infinite NLL scores when prob of true class =0 or */ private static double NLL_PENALTY=-6.64; //Log_2(0.01) //self-management flags /** * essentially controls whether a classifierresults object can have finaliseResults(trueClassVals) * called upon it. In theory, every class using the classifierresults object should make new * instantiations of it each time a set of results is being computed, and so this is not needed * * this was relevant especially prior to on-line prediction storage being supported, and effectively * the intention was to turn the results into a const object after all the results were stored * * todo: verify that this can be removed, or update to be more relevant. */ private boolean finalised = false; private boolean allStatsFound = false; private boolean buildTimeDuplicateWarningPrinted = false; //flag such that a warning about build times in parseThirdLine(String) is only printed once, not spammed /** * System.nanoTime() can STILL return zero on some tiny datasets with simple classifiers, * because it does not have enough precision. This flag, if true, will allow timings * of zero, under the partial assumption/understanding from the user that times under * ~200 nanoseconds can be equated to 0. * * The flag defaults to false, however. Correct usage of this flag would be * to set it to true in circumstances where you, the coder supplying some kind of * timing, KNOW that you are measuring in millis, AND the classifierResults object's * timeunit is in millis, AND you reset the flag to false again immediately after * adding the potentially offending time, such that the flag is not mistakenly left * on for genuinely erroneous timing additions later on. * * This is in effect a double check that you the user know what you are doing, and old * code that sets (buildtimes in millis, mostly) times can be caught and updated if they cause * problems * * E.g * results.turnOffZeroTimingsErrorSuppression(); * results.setBuildTime(time); // or e.g results.addPrediction(...., time, ...) * results.turnOnZeroTimingsErrorSuppression(); */ private boolean errorOnTimingOfZero = false; //functional getters to retrieve info from a classifierresults object, initialised/stored here for convenience. //these are currently on used in PerformanceMetric.java, can take any results type as a hack to allow other //results in evaluation. public static final Function<EstimatorResults, Double> GETTER_Accuracy = (EstimatorResults cr) -> ((ClassifierResults)cr).acc; public static final Function<EstimatorResults, Double> GETTER_BalancedAccuracy = (EstimatorResults cr) -> ((ClassifierResults)cr).balancedAcc; public static final Function<EstimatorResults, Double> GETTER_AUROC = (EstimatorResults cr) -> ((ClassifierResults)cr).meanAUROC; public static final Function<EstimatorResults, Double> GETTER_NLL = (EstimatorResults cr) -> ((ClassifierResults)cr).nll; public static final Function<EstimatorResults, Double> GETTER_F1 = (EstimatorResults cr) -> ((ClassifierResults)cr).f1; public static final Function<EstimatorResults, Double> GETTER_MCC = (EstimatorResults cr) -> ((ClassifierResults)cr).mcc; public static final Function<EstimatorResults, Double> GETTER_Precision = (EstimatorResults cr) -> ((ClassifierResults)cr).precision; public static final Function<EstimatorResults, Double> GETTER_Recall = (EstimatorResults cr) -> ((ClassifierResults)cr).recall; public static final Function<EstimatorResults, Double> GETTER_Sensitivity = (EstimatorResults cr) -> ((ClassifierResults)cr).sensitivity; public static final Function<EstimatorResults, Double> GETTER_Specificity = (EstimatorResults cr) -> ((ClassifierResults)cr).specificity; public static final Function<EstimatorResults, Double> GETTER_Earliness = (EstimatorResults cr) -> ((ClassifierResults)cr).earliness; public static final Function<EstimatorResults, Double> GETTER_HarmonicMean = (EstimatorResults cr) -> ((ClassifierResults)cr).harmonicMean; //todo revisit these when more willing to refactor stats pipeline to avoid assumption of doubles. //a double can accurately (except for the standard double precision problems) hold at most ~7 weeks worth of nano seconds // a double's mantissa = 52bits, 2^52 / 1000000000 / 60 / 60 / 24 / 7 = 7.something weeks //so, will assume the usage/requirement for milliseconds in the stats pipeline, to avoid the potential future problem //of meta-ensembles taking more than a week, etc. (or even just summing e.g 30 large times to be averaged) //it is still preferable of course to store any timings in nano's in the classifierresults object since they'll //store them as longs. public static final Function<EstimatorResults, Double> GETTER_fromScratchEstimateTimeDoubleMillis = (EstimatorResults cr) -> toDoubleMillis(((ClassifierResults)cr).errorEstimateTime, cr.timeUnit); public static final Function<EstimatorResults, Double> GETTER_totalBuildPlusEstimateTimeDoubleMillis = (EstimatorResults cr) -> toDoubleMillis(((ClassifierResults)cr).buildPlusEstimateTime, cr.timeUnit); public static final Function<EstimatorResults, Double> GETTER_additionalTimeForEstimateDoubleMillis = (EstimatorResults cr) -> toDoubleMillis(((ClassifierResults)cr).buildPlusEstimateTime - cr.buildTime, cr.timeUnit); public static final Function<EstimatorResults, Double> GETTER_fromScratchEstimateTimeDoubleMillisBenchmarked = (EstimatorResults cr) -> divideAvoidInfinity(GETTER_fromScratchEstimateTimeDoubleMillis.apply(cr), GETTER_benchmarkTime.apply(cr)); public static final Function<EstimatorResults, Double> GETTER_totalBuildPlusEstimateTimeDoubleMillisBenchmarked = (EstimatorResults cr) -> divideAvoidInfinity(GETTER_totalBuildPlusEstimateTimeDoubleMillis.apply(cr), GETTER_benchmarkTime.apply(cr)); public static final Function<EstimatorResults, Double> GETTER_additionalTimeForEstimateDoubleMillisBenchmarked = (EstimatorResults cr) -> divideAvoidInfinity(GETTER_additionalTimeForEstimateDoubleMillis.apply(cr), GETTER_benchmarkTime.apply(cr)); /********************************* * * CONSTRUCTORS * */ /** * Create an empty classifierResults object. * * If number of classes is known when making the object, it is safer to use the constructor * the takes an int representing numClasses and supply the number of classes directly. * * In some extreme use cases, predictions on dataset splits that a particular classifier results represents * may not have examples of each class that actually exists in the full dataset. If it is left * to infer the number of classes, some may be missing. */ public ClassifierResults() { trueClassValues= new ArrayList<>(); predClassValues = new ArrayList<>(); predDistributions = new ArrayList<>(); predTimes = new ArrayList<>(); predDescriptions = new ArrayList<>(); finalised = false; } /** * Create an empty classifierResults object. * * If number of classes is known when making the object, it is safer to use this constructor * and supply the number of classes directly. * * In some extreme use cases, predictions on dataset splits that a particular classifier results represents * may not have examples of each class that actually exists in the full dataset. If it is left * to infer the number of classes, some may be missing. */ public ClassifierResults(int numClasses) { trueClassValues= new ArrayList<>(); predClassValues = new ArrayList<>(); predDistributions = new ArrayList<>(); predTimes = new ArrayList<>(); predDescriptions = new ArrayList<>(); this.numClasses = numClasses; finalised = false; } /** * Load a classifierresults object from the file at the specified path */ public ClassifierResults(String filePathAndName) throws FileNotFoundException, Exception { loadResultsFromFile(filePathAndName); } /** * Create a classifier results object with complete predictions (equivalent to addAllPredictions()). The results are * FINALISED after initialisation. Meta info such as classifier name, datasetname... can still be set after construction. * * The descriptions array argument may be null, in which case the descriptions are stored as empty strings. * * All other arguments are required in full, however */ public ClassifierResults(double[] trueClassVals, double[] predictions, double[][] distributions, long[] predTimes, String[] descriptions) throws Exception { trueClassValues= new ArrayList<>(); predClassValues = new ArrayList<>(); predDistributions = new ArrayList<>(); this.predTimes = new ArrayList<>(); predDescriptions = new ArrayList<>(); addAllPredictions(trueClassVals, predictions, distributions, predTimes, descriptions); finaliseResults(); } /** * System.nanoTime() can STILL return zero on some tiny datasets with simple classifiers, * because it does not have enough precision. This flag, if true, will allow timings * of zero, under the partial assumption/understanding from the user that times under * ~200 nanoseconds can be equated to 0. * * The flag defaults to false, however. Correct usage of this flag would be * to set it to true in circumstances where you, the coder supplying some kind of * timing, KNOW that you are measuring in nanos, AND the classifierResults object's * timeunit is in nanos, AND you reset the flag to false again immediately after * adding the potentially offending time, such that the flag is not mistakenly left * on for genuinely erroneous timing additions later on. * * This is in effect a double check that you the user know what you are doing, and old * code that sets (buildtimes in millis, mostly) times can be caught and updated if they cause * problems * * E.g * results.turnOffZeroTimingsErrorSuppression(); * results.setBuildTime(time); // or e.g results.addPrediction(...., time, ...) * results.turnOnZeroTimingsErrorSuppression(); */ public void turnOffZeroTimingsErrors() { errorOnTimingOfZero = false; } /** * System.nanoTime() can STILL return zero on some tiny datasets with simple classifiers, * because it does not have enough precision. This flag, if true, will allow timings * of zero, under the partial assumption/understanding from the user that times under * ~200 nanoseconds can be equated to 0. * * The flag defaults to false, however. Correct usage of this flag would be * to set it to true in circumstances where you, the coder supplying some kind of * timing, KNOW that you are measuring in millis, AND the classifierResults object's * timeunit is in millis, AND you reset the flag to false again immediately after * adding the potentially offending time, such that the flag is not mistakenly left * on for genuinely erroneous timing additions later on. * * This is in effect a double check that you the user know what you are doing, and old * code that sets (buildtimes in millis, mostly) times can be caught and updated if they cause * problems * * E.g * results.turnOffZeroTimingsErrorSuppression(); * results.setBuildTime(time); // or e.g results.addPrediction(...., time, ...) * results.turnOnZeroTimingsErrorSuppression(); */ public void turnOnZeroTimingsErrors() { errorOnTimingOfZero = true; } /*********************** * * DATASET META INFO * * */ /** * Will return the number of classes if it has been a) explicitly set or b) found via * the size of the probability distributions attached to predictions that have been * stored/loaded, otherwise this will return 0. */ public int numClasses() { if (numClasses <= 0) inferNumClasses(); return numClasses; } public void setNumClasses(int numClasses) { this.numClasses = numClasses; } private void inferNumClasses() { if (predDistributions.isEmpty()) this.numClasses = 0; else this.numClasses = predDistributions.get(0).length; } public int numInstances() { if (numInstances <= 0) inferNumInstances(); return numInstances; } private void inferNumInstances() { this.numInstances = predClassValues.size(); } /***************************** * * LINE 2 GETS/SETS * */ /** * For now, user dependent on the formatting of this string, and really, the contents of it. * It is notionally intended to contain the parameters of the classifier used to produce the * attached predictions, but could also store other things as well. */ public String getParas() { return paras; } /** * For now, user dependent on the formatting of this string, and really, the contents of it. * It is notionally intended to contain the parameters of the classifier used to produce the * attached predictions, but could also store other things as well. */ public void setParas(String paras) { this.paras = paras; } /***************************** * * LINE 3 GETS/SETS * */ /** * This setter exists purely for backwards compatibility, for classifiers that * for whatever reason do not have per-instance prediction info. * * This might be because * a) The accuracy is gathered from some internal/weka eval process that we dont * want to edit, e.g out of bag error in some forests. * b) The classifier (typically implementing TrainAccuracyEstimate) does not yet * save prediction info, simply because it was written before we did that and * hasnt been updated. These SHOULD be refactored over time. * * This method will print a suitably annoying message when first called, as a reminder * until the accuracy is no longer directly set * * If you REALLY dont want this message being printed, since e.g. it's messing up your own print formatting, * set ClassifierResults.printSetAccWarning to false. This also acts a way of ensuring that you've read this * message... * * Todo: remove this method, i.e. the possibility to directly set the accuracy instead of * have it calculated implicitly, when possible. */ public void setAcc(double acc) { if (printSetAccWarning && firstTimeInSetAcc) { System.out.println("*********"); System.out.println(""); System.out.println("ClassifierResults.setAcc(double acc) called, friendly reminder to refactor the code that " + "made this call. If you REALLY dont want this message being printed right now, since e.g. it's messing up your " + "own print formatting, set ClassifierResults.printSetAccWarning to false."); System.out.println(""); System.out.println("*********"); firstTimeInSetAcc = false; } this.acc = acc; } public static boolean printSetAccWarning = true; private boolean firstTimeInSetAcc = true; @Override public double getAcc() { if (acc < 0) calculateAcc(); return acc; } public boolean isAccSet(){ return acc<0 ? false: true; } private void calculateAcc() { if (trueClassValues == null || trueClassValues.isEmpty() || trueClassValues.get(0) == -1) { System.out.println("**getAcc():calculateAcc() no true class values supplied yet, cannot calculate accuracy"); return; } int size = predClassValues.size(); double correct = .0; for (int i = 0; i < size; i++) { if (predClassValues.get(i).equals(trueClassValues.get(i))) correct++; } acc = correct / size; } public long getBuildTime() { return buildTime; } public long getBuildTimeInNanos() { return timeUnit.toNanos(buildTime); } /** * @throws Exception if buildTime is less than 1 */ public void setBuildTime(long buildTime) { if (errorOnTimingOfZero && buildTime < 1) throw new RuntimeException("Build time passed has invalid value, " + buildTime + ". If greater resolution" + " is needed, " + "use nano seconds (e.g System.nanoTime()) and set the TimeUnit of the classifierResults object to nanoseconds.\n\n" + "If you are using nanoseconds but STILL getting this error, read the javadoc for and use turnOffZeroTimingsErrors() " + "for this call"); this.buildTime = buildTime; } public long getTestTime() { return testTime; } public long getTestTimeInNanos() { return timeUnit.toNanos(testTime); } /** * @throws Exception if testTime is less than 1 */ public void setTestTime(long testTime) throws Exception { if (errorOnTimingOfZero && testTime < 1) throw new Exception("Test time passed has invalid value, " + testTime + ". If greater resolution is needed, " + "use nano seconds (e.g System.nanoTime()) and set the TimeUnit of the classifierResults object to nanoseconds.\n\n" + "If you are using nanoseconds but STILL getting this error, read the javadoc for and use turnOffZeroTimingsErrors() " + "for this call"); this.testTime = testTime; } public long getMemory() { return memoryUsage; } public void setMemory(long memory) { this.memoryUsage = memory; } /** * The time taken to perform some standard benchmarking operation, to allow for a (not necessarily precise) * way to measure the general speed of the hardware that these results were made on, such that users * analysing the results may scale the timings in this file proportional to the benchmarks to get a consistent relative scale * across different results sets. * * It is up to the user what this benchmark operation is, and how long it is (roughly) expected to take. If no benchmark * time is supplied, the default value is -1 */ public long getBenchmarkTime() { return benchmarkTime; } /** * The time taken to perform some standard benchmarking operation, to allow for a (not necessarily precise) * way to measure the general speed of the hardware that these results were made on, such that users * analysing the results may scale the timings in this file proportional to the benchmarks to get a consistent relative scale * across different results sets. * * It is up to the user what this benchmark operation is, and how long it is (roughly) expected to take. If no benchmark * time is supplied, the default value is -1 */ public void setBenchmarkTime(long benchmarkTime) { this.benchmarkTime = benchmarkTime; } /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * See the experiments parameter trainEstimateMethod * * This defines the method and parameter of train estimate used, if one was done */ public String getErrorEstimateMethod() { return errorEstimateMethod; } /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * See the experiments parameter trainEstimateMethod * * This defines the method and parameter of train estimate used, if one was done */ public void setErrorEstimateMethod(String errorEstimateMethod) { this.errorEstimateMethod = errorEstimateMethod; } /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * This defines the total time taken to estimate the classifier's error. This currently * does not mean anything for classifiers implementing the TrainAccuracyEstimate interface, * and as such would need to set this themselves (but likely do not) * * For those classifiers that do not implement that, ClassifierExperiments.findOrSetupTrainEstimate(...) will set this value * as a wrapper around the entire evaluate call for whichever errorEstimateMethod is being used */ public long getErrorEstimateTime() { return errorEstimateTime; } public long getErrorEstimateTimeInNanos() { return timeUnit.toNanos(errorEstimateTime); } /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * This defines the total time taken to estimate the classifier's error. This currently * does not mean anything for classifiers implementing the TrainAccuracyEstimate interface, * and as such would need to set this themselves (but likely do not) * * For those classifiers that do not implement that, ClassifierExperiments.findOrSetupTrainEstimate(...) will set this value * as a wrapper around the entire evaluate call for whichever errorEstimateMethod is being used */ public void setErrorEstimateTime(long errorEstimateTime) { this.errorEstimateTime = errorEstimateTime; } /** * This measures the total time to build the classifier on the train data * AND to estimate the classifier's error on the same train data. For classifiers * that do not estimate their own error in some way during the build process, * this will simply be the buildTime and the errorEstimateTime added together. * * For classifiers that DO estimate their own error, buildPlusEstimateTime may * be anywhere between buildTime and buildTime+errorEstimateTime. Some or all of * the work needed to form an estimate (which the field errorEstimateTime measures from scratch) * may have already been accounted for by the buildTime */ public long getBuildPlusEstimateTime() { return buildPlusEstimateTime; } public long getBuildPlusEstimateTimeInNanos() { return timeUnit.toNanos(buildPlusEstimateTime); } /** * This measures the total time to build the classifier on the train data * AND to estimate the classifier's error on the same train data. For classifiers * that do not estimate their own error in some way during the build process, * this will simply be the buildTime and the errorEstimateTime added together. * * For classifiers that DO estimate their own error, buildPlusEstimateTime may * be anywhere between buildTime and buildTime+errorEstimateTime. Some or all of * the work needed to form an estimate (which the field errorEstimateTime measures from scratch) * may have already been accounted for by the buildTime */ public void setBuildPlusEstimateTime(long buildPlusEstimateTime) { this.buildPlusEstimateTime = buildPlusEstimateTime; } /**************************** * * PREDICTION STORAGE * */ /** * Will update the internal prediction info using the values passed. User must pass the predicted class * so that they may resolve ties how they want (e.g first, randomly, take modal class, etc). * The standard, used in most places, would be utilities.GenericTools.indexOfMax(double[] dist) * * The description argument may be null, however all other arguments are required in full * * Todo future, maaaybe add enum/functor arg for tie resolution to handle it here. * * The true class is missing, however can be added in one go later with the * method finaliseResults(double[] trueClassVals) */ public void addPrediction(double[] dist, double predictedClass, long predictionTime, String description) throws RuntimeException { predDistributions.add(dist); predClassValues.add(predictedClass); if (description == null) predDescriptions.add(""); else predDescriptions.add(description); if (errorOnTimingOfZero && predictionTime < 1) throw new RuntimeException("Prediction time passed has invalid value, " + predictionTime + ". If greater resolution is needed, " + "use nano seconds (e.g System.nanoTime()) and set the TimeUnit of the classifierResults object to nanoseconds.\n\n" + "If you are using nanoseconds but STILL getting this error, read the javadoc for and use turnOffZeroTimingsErrors() " + "for this call"); else { predTimes.add(predictionTime); if (testTime == -1) testTime = predictionTime; else testTime += predictionTime; } numInstances++; } /** * Will update the internal prediction info using the values passed. User must pass the predicted class * so that they may resolve ties how they want (e.g first, randomly, take modal class, etc). * The standard, used in most places, would be utilities.GenericTools.indexOfMax(double[] dist) * * The description argument may be null, however all other arguments are required in full * * Todo future, maaaybe add enum for tie resolution to handle it here. */ public void addPrediction(double trueClassVal, double[] dist, double predictedClass, long predictionTime, String description) throws RuntimeException { addPrediction(dist,predictedClass,predictionTime,description); trueClassValues.add(trueClassVal); } /** * Adds all the prediction info onto this classifierResults object. Does NOT finalise the results, * such that (e.g) predictions from multiple dataset splits can be added to the same object if wanted * * The description argument may be null, however all other arguments are required in full */ public void addAllPredictions(double[] trueClassVals, double[] predictions, double[][] distributions, long[] predTimes, String[] descriptions) throws RuntimeException { assert(trueClassVals.length == predictions.length); assert(trueClassVals.length == distributions.length); assert(trueClassVals.length == predTimes.length); if (descriptions != null) assert(trueClassVals.length == descriptions.length); for (int i = 0; i < trueClassVals.length; i++) { if (descriptions == null) addPrediction(trueClassVals[i], distributions[i], predictions[i], predTimes[i], null); else addPrediction(trueClassVals[i], distributions[i], predictions[i], predTimes[i], descriptions[i]); } } /** * Adds all the prediction info onto this classifierResults object. Does NOT finalise the results, * such that (e.g) predictions from multiple dataset splits can be added to the same object if wanted * * True class values can later be supplied (ALL IN ONE GO, if working to the above example usage..) using * finaliseResults(double[] testClassVals) * * The description argument may be null, however all other arguments are required in full */ public void addAllPredictions(double[] predictions, double[][] distributions, long[] predTimes, String[] descriptions ) throws RuntimeException { //todo replace asserts with actual exceptions assert(predictions.length == distributions.length); assert(predictions.length == predTimes.length); if (descriptions != null) assert(predictions.length == descriptions.length); for (int i = 0; i < predictions.length; i++) { if (descriptions == null) addPrediction(distributions[i], predictions[i], predTimes[i], ""); else addPrediction(distributions[i], predictions[i], predTimes[i], descriptions[i]); } } /** * Will perform some basic validation to make sure that everything is here * that is expected, and compute the accuracy etc ready for file writing. * * Typical usage: results.finaliseResults(instances.attributeToDoubleArray(instances.classIndex())) */ public void finaliseResults(double[] testClassVals) throws Exception { //todo extra verification if (finalised) { System.out.println("finaliseResults(double[] testClassVals): Results already finalised, skipping re-finalisation"); return; } if (testClassVals.length != predClassValues.size()) throw new Exception("finaliseTestResults(double[] testClassVals): Number of predictions " + "made and number of true class values passed do not match"); trueClassValues = new ArrayList<>(); for(double d:testClassVals) trueClassValues.add(d); finaliseResults(); } /** * Will perform some basic validation to make sure that everything is here * that is expected, and compute the accuracy etc ready for file writing. * * You can use this method, instead of the version that takes the double[] testClassVals * as an argument, if you've been storing predictions via the addPrediction overload * that takes the true class value of each prediction. */ public void finaliseResults() throws Exception { if (finalised) { printlnDebug("finaliseResults(): Results already finalised, skipping re-finalisation"); return; } if (numInstances <= 0) inferNumInstances(); if (numClasses <= 0) inferNumClasses(); //todo extra verification if (predDistributions == null || predClassValues == null || predDistributions.isEmpty() || predClassValues.isEmpty()) throw new Exception("finaliseTestResults(): no predictions stored for this module"); double correct = .0; for (int inst = 0; inst < predClassValues.size(); inst++) if (trueClassValues.get(inst).equals(predClassValues.get(inst))) ++correct; acc = correct/trueClassValues.size(); finalised = true; } public boolean hasProbabilityDistributionInformation() { return predDistributions != null && !predDistributions.isEmpty() && predDistributions.size() == predClassValues.size() && predDistributions.get(0) != null; } /** * If this results object does not contain probability distributions but does * contain predicted classes, this will infer distributions as one-hot vectors * from the predicted class values, i.e if class 0 is predicted in a three class * problem, dist would be [ 1.0, 0.0, 0.0 ] * * If this object already contains distributions, this method will do nothing * * Returns whether or not values were missing but have been populated * * The number of classes is inferred from via length(unique(trueclassvalues)). As a * reminder of why this method should not generally be used unless you have a specific * reason, this may not be entirely correct, if e.g a particular cv fold of a particular * subsample does not contain instances of every class. And also in general it assumes * that the true class values supplied (as they would be if read from file) Consider yourself warned * * Intended to help with old results files that may not have distributions stored. * Should not be used by default anywhere and everywhere to overcome laziness in * newly generated results, thus in part it's implementation as a single method applied * to an already populated set of results. * * Intended usage: * res.loadFromFile(someOldFilePotentiallyMissingDists); * if (ignoreMissingDists) { * res.populateMissingDists(); * } * // res.findAllStats() etcetcetc */ public boolean populateMissingDists() { if (this.hasProbabilityDistributionInformation()) return false; if (this.numClasses <= 0) //ayyyy java8 being used for something numClasses = (int) trueClassValues.stream().distinct().count(); predDistributions = new ArrayList<>(predClassValues.size()); for (double d : predClassValues) { double[] dist = new double[numClasses]; dist[(int)d] = 1; predDistributions.add(dist); } return true; } /****************************** * * RAW DATA ACCESSORS * * getAsList, getAsArray, and getSingleElement of the four lists describing predictions * */ /** * */ public ArrayList<Double> getTrueClassVals() { return trueClassValues; } public double[] getTrueClassValsAsArray(){ double[] d=new double[trueClassValues.size()]; int i=0; for(double x:trueClassValues) d[i++]=x; return d; } public double getTrueClassValue(int index){ return trueClassValues.get(index); } public ArrayList<Double> getPredClassVals(){ return predClassValues; } public double[] getPredClassValsAsArray(){ double[] d=new double[predClassValues.size()]; int i=0; for(double x:predClassValues) d[i++]=x; return d; } public double getPredClassValue(int index){ return predClassValues.get(index); } public ArrayList<double[]> getProbabilityDistributions() { return predDistributions; } public double[][] getProbabilityDistributionsAsArray() { return predDistributions.toArray(new double[][] {}); } public double[] getProbabilityDistribution(int i){ if(i<predDistributions.size()) return predDistributions.get(i); return null; } public ArrayList<Long> getPredictionTimes() { return predTimes; } public long[] getPredictionTimesAsArray() { long[] l=new long[predTimes.size()]; int i=0; for(long x:predTimes) l[i++]=x; return l; } public long getPredictionTime(int index) { return predTimes.get(index); } public long getPredictionTimeInNanos(int index) { return timeUnit.toNanos(getPredictionTime(index)); } public ArrayList<String> getPredDescriptions() { return predDescriptions; } public String[] getPredDescriptionsAsArray() { String[] ds=new String[predDescriptions.size()]; int i=0; for(String d:predDescriptions) ds[i++]=d; return ds; } public String getPredDescription(int index) { return predDescriptions.get(index); } @Override public void cleanPredictionInfo() { predDistributions = null; predClassValues = null; trueClassValues = null; predTimes = null; predDescriptions = null; } /******************************** * * FILE READ/WRITING * */ public static boolean exists(File file) { return file.exists() && file.length()>0; //todo and is valid, maybe } public static boolean exists(String path) { return exists(new File(path)); } private boolean firstTimeDistMissing = true; public static boolean printDistMissingWarning = true; /** * Reads and STORES the prediction in this classifierresults object * returns true if the prediction described by this string was correct (i.e. trueclass==predclass) * * INCREMENTS NUMINSTANCES * * If numClasses is still less than 0, WILL set numclasses if distribution info is present. * * [true],[pred], ,[dist[0]],...,[dist[c]], ,[predTime], ,[description until end of line, may have commas in it] */ private boolean instancePredictionFromString(String predLine) throws Exception { String[] split=predLine.split(","); //collect actual/predicted class double trueClassVal=Double.valueOf(split[0].trim()); double predClassVal=Double.valueOf(split[1].trim()); if(split.length<3) { //no probabilities, no timing. VERY old files will not have them if (printDistMissingWarning && firstTimeDistMissing) { System.out.println("*********"); System.out.println(""); System.out.println("Probability distribution information missing in file. Be aware that certain stats cannot be computed, usability will be diminished. " + "If you know this and dont want this message being printed right now, since e.g. it's messing up your " + "own print formatting, set ClassifierResults.printDistMissingWarning to false."); System.out.println(""); System.out.println("*********"); firstTimeDistMissing = false; } addPrediction(trueClassVal, null, predClassVal, -1, ""); return trueClassVal==predClassVal; } //else //collect probabilities final int distStartInd = 3; //actual, predicted, space, distStart double[] dist = null; if (numClasses < 2) { List<Double> distL = new ArrayList<>(); for(int i = distStartInd; i < split.length; i++) { if (split[i].equals("")) break; //we're at the empty-space-separator between probs and timing else distL.add(Double.valueOf(split[i].trim())); } numClasses = distL.size(); assert(numClasses >= 2); dist = new double[numClasses]; for (int i = 0; i < numClasses; i++) dist[i] = distL.get(i); } else { //we know how many classes there should be, use this as implicit //file verification dist = new double[numClasses]; for (int i = 0; i < numClasses; i++) { //now need to offset by 3. dist[i] = Double.valueOf(split[i+distStartInd].trim()); } } //collect timings long predTime = -1; final int timingInd = distStartInd + (numClasses-1) + 1 + 1; //actual, predicted, space, dist, space, timing if (split.length > timingInd) predTime = Long.parseLong(split[timingInd].trim()); //collect description String description = ""; final int descriptionInd = timingInd + 1 + 1; //actual, predicted, space, dist, space, timing, space, description if (split.length > descriptionInd) { description = split[descriptionInd]; //no reason currently why the description passed cannot have commas in it, //might be a natural way to separate it in to different parts. //description reall just fills up the remainder of the line. for (int i = descriptionInd+1; i < split.length; i++) description += "," + split[i]; } addPrediction(trueClassVal, dist, predClassVal, predTime, description); return trueClassVal==predClassVal; } private void instancePredictionsFromScanner(Scanner in) throws Exception { double correct = 0; while (in.hasNext()) { String line = in.nextLine(); //may be trailing empty lines at the end of the file if (line == null || line.equals("")) break; if (instancePredictionFromString(line)) correct++; } acc = correct / numInstances; } /** * [true],[pred], ,[dist[0]],...,[dist[c]], ,[predTime], ,[description until end of line, may have commas in it] */ private String instancePredictionToString(int i) { StringBuilder sb = new StringBuilder(); sb.append(trueClassValues.get(i).intValue()).append(","); sb.append(predClassValues.get(i).intValue()); //probs sb.append(","); //<empty space> double[] probs=predDistributions.get(i); for(double d:probs) sb.append(",").append(GenericTools.RESULTS_DECIMAL_FORMAT.format(d)); //timing sb.append(",,").append(predTimes.get(i)); //<empty space>, timing //description sb.append(",,").append(predDescriptions.get(i)); //<empty space>, description return sb.toString(); } public String instancePredictionsToString() throws Exception{ //todo extra verification if (trueClassValues == null || trueClassValues.size() == 0 || trueClassValues.get(0) == -1) throw new Exception("No true class value stored, call finaliseResults(double[] trueClassVal)"); if(numInstances()>0 &&(predDistributions.size()==trueClassValues.size()&& predDistributions.size()==predClassValues.size())){ StringBuilder sb=new StringBuilder(""); for(int i=0;i<numInstances();i++){ sb.append(instancePredictionToString(i)); if(i<numInstances()-1) sb.append("\n"); } return sb.toString(); } else return "No Instance Prediction Information"; } @Override public String toString() { return generateFirstLine(); } public String writeFullResultsToString() throws Exception { finaliseResults(); fileType = FileType.PREDICTIONS; StringBuilder st = new StringBuilder(); st.append(generateFirstLine()).append("\n"); st.append(generateSecondLine()).append("\n"); st.append(generateThirdLine()).append("\n"); st.append(instancePredictionsToString()); return st.toString(); } public void writeFullResultsToFile(String path) throws Exception { OutFile out = null; try { out = new OutFile(path); out.writeString(writeFullResultsToString()); } catch (Exception e) { throw new Exception("Error writing results file.\n" + "Outfile most likely didnt open successfully, probably directory doesnt exist yet.\n" + "Path: " + path +"\nError: "+ e); } finally { if (out != null) out.closeFile(); } } public String writeCompactResultsToString() throws Exception { finaliseResults(); fileType = FileType.COMPACT; StringBuilder st = new StringBuilder(); throw new UnsupportedOperationException("COMPACT file writing not yet supported "); // return st.toString(); } public void writeCompactResultsToFile(String path) throws Exception { OutFile out = null; try { out = new OutFile(path); out.writeString(writeFullResultsToString()); } catch (Exception e) { throw new Exception("Error writing results file.\n" + "Outfile most likely didnt open successfully, probably directory doesnt exist yet.\n" + "Path: " + path +"\nError: "+ e); } finally { if (out != null) out.closeFile(); } } /** * Writes the first three meta-data lines of the file as normal, but INSTEAD OF * writing predictions, writes the evaluative metrics produced by allPerformanceMetricsToString() * to fill the rest of the file. This is intended to save disk space and/or memory where * full prediction info is not needed, only the summative information. Results files * written using this method would not be used to train a post-processed ensemble at a * later date, forexample, but could still be used as part of a comparative evaluation */ public String writeSummaryResultsToString() throws Exception { finaliseResults(); findAllStatsOnce(); fileType = FileType.METRICS; StringBuilder st = new StringBuilder(); st.append(generateFirstLine()).append("\n"); st.append(generateSecondLine()).append("\n"); st.append(generateThirdLine()).append("\n"); st.append(allPerformanceMetricsToString()); return st.toString(); } /** * Writes the first three meta-data lines of the file as normal, but INSTEAD OF * writing predictions, writes the evaluative metrics produced by allPerformanceMetricsToString() * to fill the rest of the file. This is intended to save disk space and/or memory where * full prediction info is not needed, only the summative information. Results files * written using this method would not be used to train a post-processed ensemble at a * later date, forexample, but could still be used as part of a comparative evaluation */ public void writeSummaryResultsToFile(String path) throws Exception { OutFile out = null; try { out = new OutFile(path); out.writeString(writeSummaryResultsToString()); } catch (Exception e) { throw new Exception("Error writing results file.\n" + "Outfile most likely didnt open successfully, probably directory doesnt exist yet.\n" + "Path: " + path +"\nError: "+ e); } finally { if (out != null) out.closeFile(); } } private void parseFirstLine(String line) { String[] parts = line.split(","); if (parts.length == 0) return; //old tuned classifiers (and maybe others) just wrote a classifier name identifier //covering for backward compatability, otherwise datasetname is first if (parts.length == 1) estimatorName = parts[0]; else { datasetName = parts[0]; estimatorName = parts[1]; } if (parts.length > 2) split = parts[2]; if (parts.length > 3) foldID = Integer.parseInt(parts[3]); if (parts.length > 4) setTimeUnitFromString(parts[4]); else //time unit is missing, assumed to be older file, which recorded build times in milliseconds by default timeUnit = TimeUnit.MILLISECONDS; if (parts.length > 5) fileType = FileType.valueOf(parts[5]); if (parts.length > 6) description = parts[6]; //todo duplicating with for loop? //nothing stopping the description from having its own commas in it, jsut read until end of line for (int i = 6; i < parts.length; i++) description += "," + parts[i]; } private String generateFirstLine() { return datasetName + "," + estimatorName + "," + split + "," + foldID + "," + getTimeUnitAsString() + "," + fileType.name() + "," + description; } private void parseSecondLine(String line) { paras = line; //handle buildtime if it's on this line like older files may have, //taking it out of the generic paras string and putting the value into the actual field String[] parts = paras.split(","); if (parts.length > 0 && parts[0].contains("BuildTime")) { buildTime = (long)Double.parseDouble(parts[1].trim()); if (parts.length > 2) { //this has actual paras too, rebuild this string without buildtime paras = parts[2]; for (int i = 3; i < parts.length; i++) { paras += "," + parts[i]; } } } } private String generateSecondLine() { //todo decide what to do with this return paras; } /** * Returns the test acc reported on this line, for comparison with acc * computed later to assert they align. Accuracy has always been reported * on this line in this file format, so fair to assume if this fails * then the file is simply malformed */ private double parseThirdLine(String line) { String[] parts = line.split(","); acc = Double.parseDouble(parts[0]); //if buildtime is here, it shouldn't be on the paras line too. //if it is, likely an old SaveParameterInfo implementation put it there //for now, overwriting that buildtime with this one, but printing warning if (parts.length > 1) { if (buildTime != -1 && !buildTimeDuplicateWarningPrinted) { System.out.println("CLASSIFIERRESULTS READ WARNING: build time reported on both " + "second and third line. Using the value reported on the third line"); buildTimeDuplicateWarningPrinted = true; } double x=Double.parseDouble(parts[1]); //todo fix this hack. Why just these two? should doubles be allowed? buildTime = (long)x; } if (parts.length > 2) { double x = Double.parseDouble(parts[1]); testTime = (long)x; } if (parts.length > 3) benchmarkTime = Long.parseLong(parts[3]); if (parts.length > 4) memoryUsage = Long.parseLong(parts[4]); if (parts.length > 5) numClasses = Integer.parseInt(parts[5]); if (parts.length > 6) errorEstimateMethod = parts[6]; if (parts.length > 7) errorEstimateTime = Long.parseLong(parts[7]); if (parts.length > 8) buildPlusEstimateTime = Long.parseLong(parts[8]); return acc; } private String generateThirdLine() { String res = acc + "," + buildTime + "," + testTime + "," + benchmarkTime + "," + memoryUsage + "," + numClasses() + "," + errorEstimateMethod + "," + errorEstimateTime + "," + buildPlusEstimateTime; return res; } private String getTimeUnitAsString() { return timeUnit.name(); } private void setTimeUnitFromString(String str) { timeUnit = TimeUnit.valueOf(str); } public void loadResultsFromFile(String path) throws FileNotFoundException, Exception { try { //init trueClassValues = new ArrayList<>(); predClassValues = new ArrayList<>(); predDistributions = new ArrayList<>(); predTimes = new ArrayList<>(); predDescriptions = new ArrayList<>(); numInstances = 0; acc = -1; buildTime = -1; testTime = -1; memoryUsage = -1; //check file exists File f = new File(path); if (!(f.exists() && f.length() > 0)) throw new FileNotFoundException("File " + path + " NOT FOUND"); Scanner inf = new Scanner(f); //parse meta infos parseFirstLine(inf.nextLine()); parseSecondLine(inf.nextLine()); double reportedTestAcc = parseThirdLine(inf.nextLine()); //fileType was read in from first line. switch (fileType) { case PREDICTIONS: { //have all meta info, start reading predictions or metrics instancePredictionsFromScanner(inf); //acts as a basic form of verification, does the acc reported on line 3 align with //the acc calculated while reading predictions double eps = 1.e-8; if (Math.abs(reportedTestAcc - acc) > eps) { throw new ArithmeticException("Calculated accuracy (" + acc + ") differs from written accuracy (" + reportedTestAcc + ") " + "by more than eps (" + eps + "). File = " + path + ". numinstances = " + numInstances + ". numClasses = " + numClasses); } if (predDistributions == null || predDistributions.isEmpty() || predDistributions.get(0) == null) { if (printDistMissingWarning) System.out.println("Probability distributions missing from file: " + path); } break; } case METRICS: allPerformanceMetricsFromScanner(inf); break; case COMPACT: throw new UnsupportedOperationException("COMPACT file reading not yet supported"); } finalised = true; inf.close(); } catch (FileNotFoundException fnf) { if (printOnFailureToLoad) System.out.println("File " + path + " NOT FOUND"); throw fnf; } catch (Exception ex) { if (printOnFailureToLoad) System.out.println("File " + path + " FAILED TO LOAD"); throw ex; } } /****************************************** * * METRIC CALCULATIONS * */ /** * Will calculate all the metrics that can be found from the prediction information * stored in this object. Will NOT call finaliseResults(..), and finaliseResults(..) * not have been called elsewhere, however if it has not been called then true * class values must have been supplied while storing predictions. * * This is to allow iterative calculation of the metrics (in e.g. batches * of added predictions) */ public void findAllStats(){ //meta info if (numInstances <= 0) inferNumInstances(); if (numClasses <= 0) inferNumClasses(); //predictions-only confusionMatrix=buildConfusionMatrix(); countPerClass=new double[confusionMatrix.length]; for(int i=0;i<trueClassValues.size();i++) countPerClass[trueClassValues.get(i).intValue()]++; if (acc < 0) calculateAcc(); balancedAcc=findBalancedAcc(confusionMatrix); mcc = computeMCC(confusionMatrix); f1=findF1(confusionMatrix); //also handles spec/sens/prec/recall in the process of finding f1 //need probabilities. very old files that have been read in may not have them. if (predDistributions != null && !predDistributions.isEmpty() && predDistributions.get(0) != null ) { nll=findNLL(); meanAUROC=findMeanAUROC(); } //timing medianPredTime=findMedianPredTime(predTimes); //early classification //earliness=findEarliness(); //harmonicMean=findHarmonicMean(); allStatsFound = true; } /** * Will calculate all the metrics that can be found from the prediction information * stored in this object, UNLESS this object has been finalised (finaliseResults(..)) AND * has already had it's stats found (findAllStats()), e.g. if it has already been called * by another process. * * In this latter case, this method does nothing. */ @Override public void findAllStatsOnce(){ if (finalised && allStatsFound) { printlnDebug("Stats already found, ignoring findAllStatsOnce()"); return; } else { findAllStats(); } } /** * @return [actual class][predicted class] */ private double[][] buildConfusionMatrix() { double[][] matrix = new double[numClasses][numClasses]; for (int i = 0; i < predClassValues.size(); ++i){ double actual=trueClassValues.get(i); double predicted=predClassValues.get(i); ++matrix[(int)actual][(int)predicted]; } return matrix; } /** * uses only the probability of the true class */ public double findNLL(){ double nll=0; for(int i=0;i<trueClassValues.size();i++){ double[] dist=getProbabilityDistribution(i); int trueClass = trueClassValues.get(i).intValue(); if(dist[trueClass]==0) nll+=NLL_PENALTY; else nll+=Math.log(dist[trueClass])/Math.log(2);//Log 2 } return -nll/trueClassValues.size(); } public double findMeanAUROC(){ double a=0; if(numClasses==2){ a=findAUROC(1); /* if(countPerClass[0]<countPerClass[1]) else a=findAUROC(1); */ } else{ double[] classDist = InstanceTools.findClassDistributions(trueClassValues, numClasses); for(int i=0;i<numClasses;i++){ a+=findAUROC(i) * classDist[i]; } //original, unweighted // for(int i=0;i<numClasses;i++){ // a+=findAUROC(i); // } // a/=numClasses; } return a; } /** * todo could easily be optimised further if really wanted */ public double computeMCC(double[][] confusionMatrix) { double num=0.0; for (int k = 0; k < confusionMatrix.length; ++k) for (int l = 0; l < confusionMatrix.length; ++l) for (int m = 0; m < confusionMatrix.length; ++m) num += (confusionMatrix[k][k]*confusionMatrix[m][l])- (confusionMatrix[l][k]*confusionMatrix[k][m]); if (num == 0.0) return 0; double den1 = 0.0; double den2 = 0.0; for (int k = 0; k < confusionMatrix.length; ++k) { double den1Part1=0.0; double den2Part1=0.0; for (int l = 0; l < confusionMatrix.length; ++l) { den1Part1 += confusionMatrix[l][k]; den2Part1 += confusionMatrix[k][l]; } double den1Part2=0.0; double den2Part2=0.0; for (int kp = 0; kp < confusionMatrix.length; ++kp) if (kp!=k) { for (int lp = 0; lp < confusionMatrix.length; ++lp) { den1Part2 += confusionMatrix[lp][kp]; den2Part2 += confusionMatrix[kp][lp]; } } den1 += den1Part1 * den1Part2; den2 += den2Part1 * den2Part2; } return num / (Math.sqrt(den1)*Math.sqrt(den2)); } /** * Balanced accuracy: average of the accuracy for each class * @param cm * @return */ public double findBalancedAcc(double[][] cm){ double[] accPerClass=new double[cm.length]; for(int i=0;i<cm.length;i++) accPerClass[i]=cm[i][i]/countPerClass[i]; double b=accPerClass[0]; for(int i=1;i<cm.length;i++) b+=accPerClass[i]; b/=cm.length; return b; } /** * F1: If it is a two class problem we use the minority class * if it is multiclass we average over all classes. * @param cm * @return */ public double findF1(double[][] cm){ double f=0; if(numClasses==2){ if(countPerClass[0]<countPerClass[1]) f=findConfusionMatrixMetrics(cm,0,1); else f=findConfusionMatrixMetrics(cm,1,1); } else{//Average over all of them for(int i=0;i<numClasses;i++) f+=findConfusionMatrixMetrics(cm,i,1); f/=numClasses; } return f; } protected double findConfusionMatrixMetrics(double[][] confMat, int c,double beta) { double tp = confMat[c][c]; //[actual class][predicted class] //some very small non-zero value, in the extreme case that no cases of //this class were correctly classified if (tp == .0) return .0000001; double fp = 0.0, fn = 0.0,tn=0.0; for (int i = 0; i < confMat.length; i++) { if (i!=c) { fp += confMat[i][c]; fn += confMat[c][i]; tn+=confMat[i][i]; } } precision = tp / (tp+fp); recall = tp / (tp+fn); sensitivity=recall; specificity=tn/(fp+tn); //jamesl //one in a million case on very small AND unbalanced datasets (lenses...) that particular train/test splits and their cv splits //lead to a divide by zero on one of these stats (C4.5, lenses, trainFold7 (and a couple others), specificity in the case i ran into) //as a little work around, if this case pops up, will simply set the stat to 0 if (Double.compare(precision, Double.NaN) == 0) precision = 0; if (Double.compare(recall, Double.NaN) == 0) recall = 0; if (Double.compare(sensitivity, Double.NaN) == 0) sensitivity = 0; if (Double.compare(specificity, Double.NaN) == 0) specificity = 0; return (1+beta*beta) * (precision*recall) / ((beta*beta)*precision + recall); } protected double findAUROC(int c){ class Pair implements Comparable<Pair>{ Double x; Double y; public Pair(Double a, Double b){ x=a; y=b; } @Override public int compareTo(Pair p) { return p.x.compareTo(x); } public String toString(){ return "("+x+","+y+")";} } ArrayList<Pair> p=new ArrayList<>(); double nosPositive=0,nosNegative; for(int i=0;i<numInstances;i++){ Pair temp=new Pair(predDistributions.get(i)[c],trueClassValues.get(i)); if(c==trueClassValues.get(i)) nosPositive++; p.add(temp); } nosNegative=trueClassValues.size()-nosPositive; Collections.sort(p); /* http://www.cs.waikato.ac.nz/~remco/roc.pdf Determine points on ROC curve as follows; starts in the origin and goes one unit up, for every negative outcome the curve goes one unit to the right. Units on the x-axis are 1 #TN and on the y-axis 1 #TP where #TP (#TN) is the total number of true positives (true negatives). This gives the points on the ROC curve (0; 0); (x1; y1); : : : ; (xn; yn); (1; 1). */ ArrayList<Pair> roc=new ArrayList<>(); double x=0; double oldX=0; double y=0; int xAdd=0, yAdd=0; boolean xLast=false,yLast=false; roc.add(new Pair(x,y)); for(int i=0;i<numInstances;i++){ if(p.get(i).y==c){ if(yLast) roc.add(new Pair(x,y)); xLast=true; yLast=false; x+=1/nosPositive; xAdd++; if(xAdd==nosPositive) x=1.0; } else{ if(xLast) roc.add(new Pair(x,y)); yLast=true; xLast=false; y+=1/nosNegative; yAdd++; if(yAdd==nosNegative) y=1.0; } } roc.add(new Pair(1.0,1.0)); //Calculate the area under the ROC curve, as the sum over all trapezoids with //base xi+1 to xi , that is, A double auroc=0; for(int i=0;i<roc.size()-1;i++){ auroc+=(roc.get(i+1).y-roc.get(i).y)*(roc.get(i+1).x); } return auroc; } //Early classification //Currently assumes each predictions earliness is stored in the prediction description alone. public double findEarliness(){ double e = 0; for (String d : predDescriptions){ e += Double.parseDouble(d); } earliness = e / predDescriptions.size(); return earliness; } //Early classification public double findHarmonicMean(){ if (earliness < 0) earliness = findEarliness(); if (acc < 0) calculateAcc(); harmonicMean = (2 * acc * (1 - earliness)) / (acc + (1 - earliness)); return harmonicMean; } public String allPerformanceMetricsToString() { String str="numClasses,"+numClasses+"\n"; str+="numInstances,"+numInstances+"\n"; str+="acc,"+acc+"\n"; str+="balancedAcc,"+balancedAcc+"\n"; str+="sensitivity,"+sensitivity+"\n"; str+="precision,"+precision+"\n"; str+="recall,"+recall+"\n"; str+="specificity,"+specificity+"\n"; str+="f1,"+f1+"\n"; str+="mcc,"+mcc+"\n"; str+="nll,"+nll+"\n"; str+="meanAUROC,"+meanAUROC+"\n"; str+="stddev,"+stddev+"\n"; str+="medianPredTime,"+medianPredTime+"\n"; str+="countPerClass:\n"; for(int i=0;i<countPerClass.length;i++) str+="Class "+i+","+countPerClass[i]+"\n"; str+="confusionMatrix:\n"; for(int i=0;i<confusionMatrix.length;i++){ for(int j=0;j<confusionMatrix[i].length;j++) str+=confusionMatrix[i][j]+","; str+="\n"; } return str; } public void allPerformanceMetricsFromScanner(Scanner scan) throws NoSuchElementException, NumberFormatException { try { numClasses = Integer.parseInt(scan.nextLine().split(",")[1]); numInstances = Integer.parseInt(scan.nextLine().split(",")[1]); acc = Double.parseDouble(scan.nextLine().split(",")[1]); balancedAcc = Double.parseDouble(scan.nextLine().split(",")[1]); sensitivity = Double.parseDouble(scan.nextLine().split(",")[1]); precision = Double.parseDouble(scan.nextLine().split(",")[1]); recall = Double.parseDouble(scan.nextLine().split(",")[1]); specificity = Double.parseDouble(scan.nextLine().split(",")[1]); f1 = Double.parseDouble(scan.nextLine().split(",")[1]); mcc = Double.parseDouble(scan.nextLine().split(",")[1]); nll = Double.parseDouble(scan.nextLine().split(",")[1]); meanAUROC = Double.parseDouble(scan.nextLine().split(",")[1]); stddev = Double.parseDouble(scan.nextLine().split(",")[1]); medianPredTime= Long.parseLong(scan.nextLine().split(",")[1]); assert(scan.nextLine() == "countPerClass");//todo change to if not throws countPerClass = new double[numClasses]; for (int i = 0; i < numClasses; i++) countPerClass[i] = Double.parseDouble(scan.nextLine().split(",")[1]); assert(scan.nextLine() == "confusionMatrix"); //todo change to if not throws confusionMatrix = new double[numClasses][numClasses]; for (int i = 0; i < numClasses; i++) { String[] vals = scan.nextLine().split(","); for (int j = 0; j < numClasses; j++) confusionMatrix[i][j] = Double.parseDouble(vals[j]); } } catch (NoSuchElementException e) { System.err.println("Error reading metrics in allPerformanceMetricsFromString(str), scanner reached end prematurely"); throw e; } catch (NumberFormatException e) { System.err.println("Error reading metrics in allPerformanceMetricsFromString(str), parsing metric value failed"); throw e; } } /** * Concatenates the predictions of classifiers made on different folds on the data * into one results object * * If ClassifierResults ever gets split into separate classes for prediction and meta info, * this obviously gets cleaned up a lot * * @param cresults ClassifierResults[fold] * @return single ClassifierResults object */ public static ClassifierResults concatenateClassifierResults( /*fold*/ ClassifierResults[] cresults) throws Exception { return concatenateClassifierResults(new ClassifierResults[][]{cresults})[0]; } /** * Concatenates the predictions of classifiers made on different folds on the data * into one results object per classifier. * * If ClassifierResults ever gets split into separate classes for prediction and meta info, * this obviously gets cleaned up a lot * * @param cresults ClassifierResults[classifier][fold] * @return ClassifierResults[classifier] */ public static ClassifierResults[] concatenateClassifierResults( /*classiifer*/ /*fold*/ ClassifierResults[][] cresults) throws Exception { ClassifierResults[] concatenatedResults = new ClassifierResults[cresults.length]; for (int classifierid = 0; classifierid < cresults.length; classifierid++) { if (cresults[classifierid].length == 1) { concatenatedResults[classifierid] = cresults[classifierid][0]; } else { ClassifierResults newCres = new ClassifierResults(); for (int foldid = 0; foldid < cresults[classifierid].length; foldid++) { ClassifierResults foldCres = cresults[classifierid][foldid]; for (int predid = 0; predid < foldCres.numInstances(); predid++) { newCres.addPrediction(foldCres.getTrueClassValue(predid), foldCres.getProbabilityDistribution(predid), foldCres.getPredClassValue(predid), foldCres.getPredictionTime(predid), foldCres.getPredDescription(predid)); // TODO previously didnt copy of pred times and predictions // not sure if there was any particular reason why i didnt, // aside from saving space? } } concatenatedResults[classifierid] = newCres; } } return concatenatedResults; } /** * Creates a (shallow) copy of the given results object, and returns one that * is identical in all ways except for each probability distribution is rounded * to the number of decimal places it would be written to file with (default 6), * GenericTools.RESULTS_DECIMAL_FORMAT.format(d) */ public static ClassifierResults util_roundAllPredictionDistsToDefaultPlaces(ClassifierResults res) throws Exception { double[][] oldDists = res.getProbabilityDistributionsAsArray(); double[][] roundedDists = new double[oldDists.length][oldDists[0].length]; for (int i = 0; i < oldDists.length; i++) for (int j = 0; j < oldDists[i].length; j++) //TODO this is horrible. roundedDists[i][j] = Double.valueOf(GenericTools.RESULTS_DECIMAL_FORMAT.format(oldDists[i][j])); ClassifierResults newres = new ClassifierResults(res.getTrueClassValsAsArray(), res.getPredClassValsAsArray(), roundedDists, res.getPredictionTimesAsArray(), res.getPredDescriptionsAsArray()); newres.setEstimatorName(res.getEstimatorName()); newres.setDatasetName(res.getDatasetName()); newres.setFoldID(res.getFoldID()); newres.setTimeUnit(res.getTimeUnit()); newres.setDescription(res.getDescription()); newres.setParas(res.paras); newres.setBuildTime(res.getBuildTime()); newres.setErrorEstimateTime(res.getErrorEstimateTime()); newres.setErrorEstimateMethod(res.getErrorEstimateMethod()); newres.setBenchmarkTime(res.getBenchmarkTime()); newres.setMemory(res.getMemory()); newres.findAllStatsOnce(); return newres; } public static void main(String[] args) throws Exception { readWriteTest(); } private static void readWriteTest() throws Exception { ClassifierResults res = new ClassifierResults(); res.setEstimatorName("testClassifier"); res.setDatasetName("testDataset"); //empty split //empty foldid res.setDescription("boop, guest"); res.setParas("test,west,best"); //acc handled internally res.setBuildTime(2); res.setTestTime(1); //empty benchmark //empty memory Random rng = new Random(0); for (int i = 0; i < 10; i++) { //obvs dists dont make much sense, not important here res.addPrediction(rng.nextInt(2), new double[] { rng.nextDouble(), rng.nextDouble()}, rng.nextInt(2), rng.nextInt(5)+1, "test,again"); } res.finaliseResults(); System.out.println(res.writeFullResultsToString()); System.out.println("\n\n"); res.writeFullResultsToFile("test.csv"); ClassifierResults res2 = new ClassifierResults("test.csv"); System.out.println(res2.writeFullResultsToString()); } }
85,312
39.490271
252
java
tsml-java
tsml-java-master/src/main/java/evaluation/storage/ClustererResults.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.storage; import blogspot.software_and_algorithms.stern_library.optimization.HungarianAlgorithm; import fileIO.OutFile; import utilities.DebugPrinting; import utilities.GenericTools; import java.io.File; import java.io.FileNotFoundException; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Scanner; import java.util.concurrent.TimeUnit; import java.util.function.Function; import static org.apache.commons.math3.special.Gamma.logGamma; /** * This is a container class for the storage of predictions and meta-info of a * clusterer on a single set of instances. * * Predictions can be stored via addPrediction(...) or addAllPredictions(...) * Currently, the information stored about each prediction is: * - The true class value (double getTrueClassValue(index)) * - The predicted cluster (double getPredClassValue(index)) * - The probability distribution for this instance (double[] getProbabilityDistribution(index)) * - An optional description of the prediction (String getPredDescription(index)) * * The meta info stored is: * [LINE 1 OF FILE] * - get/setDatasetName(String) * - get/setClustererName(String) * - get/setSplit(String) * - get/setFoldId(String) * - get/setTimeUnit(TimeUnit) * - get/setDescription(String) * [LINE 2 OF FILE] * - get/setParas(String) * [LINE 3 OF FILE] * - getAccuracy() (calculated from predictions, only settable with a suitably annoying message) * - get/setBuildTime(long) * - get/setTestTime(long) * - get/setBenchmarkTime(long) * - get/setMemory(long) * - get/setNumClasses(int) * - get/setNumClusters(int) (either set by user or indirectly found through predicted probability distributions) * * [REMAINING LINES: PREDICTIONS] * - trueClassVal, predClusterVal, [empty], dist[0], dist[1] ... dist[c], [empty], predTime, [empty], predDescription * * Supports reading/writing of results from/to file, in the 'ClustererResults file-format' * - loadResultsFromFile(String path) * - writeFullResultsToFile(String path) * * Supports recording of timings in different time units. Nanoseconds is the default. * Also supports the calculation of various evaluative performance metrics based on the predictions (accuracy, * rand index, mutual information etc.) * * EXAMPLE USAGE: * ClustererResults res = new ClustererResults(numClasses); * //set a particular timeunit, if using something other than nanos. Nanos recommended * //set any meta info you want to keep, e.g clusterername, datasetname... * * for (Instance inst : test) { * res.addPrediction(inst.classValue(), clusterDist, clusterPred, 0, ""); //description is optional * } * * res.finaliseResults(); //performs some basic validation, and calcs some relevant internal info * * //can now find summary scores for these predictions * //stats stored in simple public members for now * res.findAllStats(); * * //and/or save to file * res.writeFullResultsToFile(path); * * //and could then load them back in * ClassifierResults res2 = new ClassifierResults(path); * * //the are automatically finalised, however the stats are not automatically found * res2.findAllStats(); * * @author Matthew Middlehurst, adapted from ClassifierResults (James Large) */ public class ClustererResults extends EstimatorResults implements DebugPrinting, Serializable { /** * Print a message with the filename to stdout when a file cannot be loaded. * Can get very tiresome if loading thousands of files with some expected failures, * and a higher level process already summarises them, thus this option to * turn off the messages */ public static boolean printOnFailureToLoad = true; /** * Print a message when result file clustering accuracy does not match calculated accuracy. * Some results files do not have valid accuracies, setting this to false will stop print outs from this check. */ public static boolean clAccTestPrint = true; //LINE 1: meta info, set by user //estimatorName // datasetName // split // foldID // timeUnit // description //LINE 2: clusterer setup/info, parameters. precise format is up to user. /** * For now, user dependent on the formatting of this string, and really, the contents of it. * It is notionally intended to contain the parameters of the clusterer used to produce the * attached predictions, but could also store other things as well. */ private String paras = "No parameter info"; //LINE 3: rand, buildTime, memoryUsage //simple summarative performance stats. /** * Calculated from the stored cluster predictions, cannot be explicitly set by user */ private double accuracy = -1; /** * Number of clusters, can be inferred from the number of distributions */ private int numClusters = -1; // buildTime // testTime // benchmarkTime // memoryUsage //REMAINDER OF THE FILE - 1 case per line //raw performance data. currently just five parallel arrays private ArrayList<Double> trueClassValues; private ArrayList<Double> clusterValues; private ArrayList<double[]> distributions; private ArrayList<Long> predTimes; private ArrayList<String> descriptions; //inferred/supplied dataset meta info private int numClasses; private int numInstances; //calculated performance metrics //accuracy can be re-calced, as well as stored on line three in files private double ri = -1; private double ari = -1; private double mi = -1; private double nmi = -1; private double ami = -1; //self-management flags /** * essentially controls whether a ClustererResults object can have finaliseResults(trueClassVals) * called upon it. In theory, every class using the ClustererResults object should make new * instantiations of it each time a set of results is being computed, and so this is not needed */ private boolean finalised = false; private boolean allStatsFound = false; /** * System.nanoTime() can STILL return zero on some tiny datasets with simple classifiers, * because it does not have enough precision. This flag, if true, will allow timings * of zero, under the partial assumption/understanding from the user that times under * ~200 nanoseconds can be equated to 0. */ private boolean errorOnTimingOfZero = false; //functional getters to retrieve info from a clustererresults object, initialised/stored here for convenience //these are currently on used in PerformanceMetric.java, can take any results type as a hack to allow other //results in evaluation public static final Function<EstimatorResults, Double> GETTER_Accuracy = (EstimatorResults cr) -> ((ClustererResults)cr).accuracy; public static final Function<EstimatorResults, Double> GETTER_RandIndex = (EstimatorResults cr) -> ((ClustererResults)cr).ri; public static final Function<EstimatorResults, Double> GETTER_AdjustedRandIndex = (EstimatorResults cr) -> ((ClustererResults)cr).ari; public static final Function<EstimatorResults, Double> GETTER_MutualInformation = (EstimatorResults cr) -> ((ClustererResults)cr).mi; public static final Function<EstimatorResults, Double> GETTER_NormalizedMutualInformation = (EstimatorResults cr) -> ((ClustererResults)cr).nmi; public static final Function<EstimatorResults, Double> GETTER_AdjustedMutualInformation = (EstimatorResults cr) -> ((ClustererResults)cr).ami; /********************************* * * CONSTRUCTORS * */ /** * Create an empty ClustererResults object. * * If number of classes is known when making the object, it is safer to use this constructor * and supply the number of classes directly. * * In some extreme use cases, predictions on dataset splits that a particular classifier results represents * may not have examples of each class that actually exists in the full dataset. If it is left * to infer the number of classes, some may be missing. */ public ClustererResults(int numClasses) { trueClassValues = new ArrayList<>(); clusterValues = new ArrayList<>(); distributions = new ArrayList<>(); predTimes = new ArrayList<>(); descriptions = new ArrayList<>(); this.numClasses = numClasses; finalised = false; } /** * Load a ClustererResults object from the file at the specified path */ public ClustererResults(String filePathAndName) throws Exception { loadResultsFromFile(filePathAndName); } /** * Create a ClustererResults object with complete predictions (equivalent to addAllPredictions()). The results are * FINALISED after initialisation. Meta info such as clusterer name, datasetname... can still be set after * construction. * * The descriptions array argument may be null, in which case the descriptions are stored as empty strings. * * All other arguments are required in full, however */ public ClustererResults(int numClasses, double[] trueClassVals, double[] predictions, double[][] distributions, long[] predTimes, String[] descriptions) throws Exception { this.trueClassValues = new ArrayList<>(); this.clusterValues = new ArrayList<>(); this.distributions = new ArrayList<>(); this.predTimes = new ArrayList<>(); this.descriptions = new ArrayList<>(); this.numClasses = numClasses; addAllPredictions(trueClassVals, predictions, distributions, predTimes, descriptions); finaliseResults(); } /*********************** * * DATASET META INFO * * */ public int getNumClasses() { return numClasses; } public void setNumClasses(int numClasses) { this.numClasses = numClasses; } public int numInstances() { if (numInstances <= 0) inferNumInstances(); return numInstances; } private void inferNumInstances() { this.numInstances = clusterValues.size(); } public void turnOffZeroTimingsErrors() { errorOnTimingOfZero = false; } public void turnOnZeroTimingsErrors() { errorOnTimingOfZero = true; } /***************************** * * LINE 2 GETS/SETS * */ public String getParas() { return paras; } public void setParas(String paras) { this.paras = paras; } /***************************** * * LINE 3 GETS/SETS * */ @Override public double getAcc() { if (accuracy < 0) calculateAcc(); return accuracy; } private void calculateAcc() { if (trueClassValues == null || trueClassValues.isEmpty() || trueClassValues.get(0) == -1) { System.out.println("**getAcc():calculateAcc() no true class values supplied yet, cannot calculate accuracy"); return; } int d = Math.max(numClasses, numClusters); double[][] w = new double[d][d]; for (int i = 0; i < numInstances; i++) { w[clusterValues.get(i).intValue()][trueClassValues.get(i).intValue()]++; } double max = 0; for (int i = 0; i < d; i++) { for (int n = 0; n < d; n++) { if (w[i][n] > max) max = w[i][n]; } } double[][] nw = new double[d][d]; for (int i = 0; i < d; i++) { for (int n = 0; n < d; n++) { nw[i][n] = max - w[i][n]; } } int[] a = new HungarianAlgorithm(nw).execute(); double sum = 0; for (int i = 0; i < d; i++) { sum += w[i][a[i]]; } accuracy = sum / numInstances; } public long getBuildTime() { return buildTime; } public long getBuildTimeInNanos() { return timeUnit.toNanos(buildTime); } /** * @throws Exception if buildTime is less than 1 */ public void setBuildTime(long buildTime) { if (errorOnTimingOfZero && buildTime < 1) throw new RuntimeException("Build time passed has invalid value, " + buildTime + ". If greater resolution" + " is needed, use nano seconds (e.g System.nanoTime()) and set the TimeUnit of the " + "classifierResults object to nanoseconds.\n\nIf you are using nanoseconds but STILL getting this " + "error, read the javadoc for and use turnOffZeroTimingsErrors() for this call"); this.buildTime = buildTime; } public long getTestTime() { return testTime; } public long getTestTimeInNanos() { return timeUnit.toNanos(testTime); } public void setTestTime(long testTime) { this.testTime = testTime; } public long getMemory() { return memoryUsage; } public void setMemory(long memory) { this.memoryUsage = memory; } public int getNumClusters() { if (numClusters <= 0) inferNumClusters(); return numClusters; } public void setNumClusters(int numClusters) { this.numClusters = numClusters; } private void inferNumClusters() { this.numClusters = distributions.get(0).length; } public long getBenchmarkTime() { return benchmarkTime; } public void setBenchmarkTime(long benchmarkTime) { this.benchmarkTime = benchmarkTime; } /**************************** * * PREDICTION STORAGE * */ /** * Will update the internal prediction info using the values passed. User must pass the predicted cluster * so that they may resolve ties how they want (e.g first, randomly, etc). * The standard, used in most places, would be utilities.GenericTools.indexOfMax(double[] dist) * * The description argument may be null, however all other arguments are required in full * * The true class is missing, however can be added in one go later with the * method finaliseResults(double[] trueClassVals) */ public void addPrediction(double[] dist, double cluster, long predictionTime, String description) throws Exception { distributions.add(dist); clusterValues.add(cluster); predTimes.add(predictionTime); if (testTime == -1) testTime = predictionTime; else testTime += predictionTime; if (description == null) descriptions.add(""); else descriptions.add(description); numInstances++; } /** * Will update the internal prediction info using the values passed. User must pass the predicted cluster * so that they may resolve ties how they want (e.g first, randomly, etc). * The standard, used in most places, would be utilities.GenericTools.indexOfMax(double[] dist) * * The description argument may be null, however all other arguments are required in full */ public void addPrediction(double trueClassVal, double[] dist, double cluster, long predictionTime, String description) throws Exception { addPrediction(dist, cluster, predictionTime, description); trueClassValues.add(trueClassVal); } /** * Adds all the prediction info onto this ClustererResults object. Does NOT finalise the results, * such that (e.g) predictions from multiple dataset splits can be added to the same object if wanted * <p> * The description argument may be null, however all other arguments are required in full */ public void addAllPredictions(double[] trueClassVals, double[] predictions, double[][] distributions, long[] predictionTimes, String[] descriptions) throws Exception { assert (trueClassVals.length == predictions.length); assert (trueClassVals.length == distributions.length); assert (trueClassVals.length == predictionTimes.length); if (descriptions != null) assert (trueClassVals.length == descriptions.length); for (int i = 0; i < trueClassVals.length; i++) { if (descriptions == null) addPrediction(trueClassVals[i], distributions[i], predictions[i], predictionTimes[i], null); else addPrediction(trueClassVals[i], distributions[i], predictions[i], predictionTimes[i], descriptions[i]); } } /** * Adds all the prediction info onto this ClustererResults object. Does NOT finalise the results, * such that (e.g) predictions from multiple dataset splits can be added to the same object if wanted * <p> * True class values can later be supplied (ALL IN ONE GO, if working to the above example usage..) using * finaliseResults(double[] testClassVals) * <p> * The description argument may be null, however all other arguments are required in full */ public void addAllPredictions(double[] predictions, double[][] distributions, long[] predictionTimes, String[] descriptions) throws Exception { assert (predictions.length == distributions.length); assert (predictions.length == predictionTimes.length); if (descriptions != null) assert (predictions.length == descriptions.length); for (int i = 0; i < predictions.length; i++) { if (descriptions == null) addPrediction(distributions[i], predictions[i], predictionTimes[i], ""); else addPrediction(distributions[i], predictions[i], predictionTimes[i], descriptions[i]); } } /** * Will perform some basic validation to make sure that everything is here * that is expected, and compute the accuracy etc ready for file writing. * <p> * Typical usage: results.finaliseResults(instances.attributeToDoubleArray(instances.classIndex())) */ public void finaliseResults(double[] testClassVals) throws Exception { if (finalised) { System.out.println("finaliseResults(double[] testClassVals): Results already finalised, skipping " + "re-finalisation"); return; } if (testClassVals.length != clusterValues.size()) throw new Exception("finaliseTestResults(double[] testClassVals): Number of predictions " + "made and number of true class values passed do not match"); trueClassValues = new ArrayList<>(); for (double d : testClassVals) trueClassValues.add(d); finaliseResults(); } /** * Will perform some basic validation to make sure that everything is here * that is expected, and compute the accuracy etc ready for file writing. * <p> * You can use this method, instead of the version that takes the double[] testClassVals * as an argument, if you've been storing predictions via the addPrediction overload * that takes the true class value of each prediction. */ public void finaliseResults() throws Exception { if (finalised) { printlnDebug("finaliseResults(): Results already finalised, skipping re-finalisation"); return; } if (numInstances <= 0) inferNumInstances(); if (numClusters <= 0) inferNumClusters(); if (distributions == null || clusterValues == null || distributions.isEmpty() || clusterValues.isEmpty()) throw new Exception("finaliseTestResults(): no predictions stored for this module"); assert trueClassValues.size() == clusterValues.size(); calculateAcc(); finalised = true; } /****************************** * * RAW DATA ACCESSORS * * getAsList, getAsArray, and getSingleElement of the four lists describing predictions * */ /** * */ public ArrayList<Double> getTrueClassVals() { return trueClassValues; } public double[] getTrueClassValsAsArray() { double[] d = new double[trueClassValues.size()]; int i = 0; for (double x : trueClassValues) d[i++] = x; return d; } public double getTrueClassValue(int index) { return trueClassValues.get(index); } public ArrayList<Double> getClusterValues() { return clusterValues; } public double[] getClusterValuesAsArray() { double[] d = new double[clusterValues.size()]; int i = 0; for (double x : clusterValues) d[i++] = x; return d; } public int[] getClusterValuesAsIntArray() { int[] d = new int[clusterValues.size()]; int i = 0; for (double x : clusterValues) d[i++] = (int) x; return d; } public double getClusterValue(int index) { return clusterValues.get(index); } public ArrayList<double[]> getProbabilityDistributions() { return distributions; } public double[][] getProbabilityDistributionsAsArray() { return distributions.toArray(new double[][]{}); } public double[] getProbabilityDistribution(int i) { if (i < distributions.size()) return distributions.get(i); return null; } public ArrayList<Long> getPredictionTimes() { return predTimes; } public long[] getPredictionTimesAsArray() { long[] l=new long[predTimes.size()]; int i=0; for(long x:predTimes) l[i++]=x; return l; } public long getPredictionTime(int index) { return predTimes.get(index); } public long getPredictionTimeInNanos(int index) { return timeUnit.toNanos(getPredictionTime(index)); } public ArrayList<String> getDescriptions() { return descriptions; } public String[] getPredDescriptionsAsArray() { String[] ds = new String[descriptions.size()]; int i = 0; for (String d : descriptions) ds[i++] = d; return ds; } public String getPredDescription(int index) { return descriptions.get(index); } @Override public void cleanPredictionInfo() { distributions = null; clusterValues = null; trueClassValues = null; predTimes = null; descriptions = null; } /******************************** * * FILE READ/WRITING * */ public static boolean exists(File file) { return file.exists() && file.length() > 0; } public static boolean exists(String path) { return exists(new File(path)); } /** * Reads and STORES the prediction in this ClustererResults object. * * INCREMENTS NUMINSTANCES * * If numClasses is still less than 0, WILL set numclasses if distribution info is present. * * [true],[pred], ,[dist[0]],...,[dist[c]], ,[description until end of line, may have commas in it] */ private void instancePredictionFromString(String predLine) throws Exception { String[] split = predLine.split(","); //collect actual class and cluster double trueClassVal = Double.parseDouble(split[0].trim()); double clusterVal = Double.parseDouble(split[1].trim()); final int distStartInd = 3; //actual, cluster, space, distStart double[] dist = null; if (numClusters < 2) { List<Double> distL = new ArrayList<>(); for (int i = distStartInd; i < split.length; i++) { if (split[i].equals("")) break; //we're at the empty-space-separator between probs and timing else distL.add(Double.valueOf(split[i].trim())); } numClusters = distL.size(); assert (numClusters >= 2); dist = new double[numClusters]; for (int i = 0; i < numClusters; i++) dist[i] = distL.get(i); } else { //we know how many clusters there should be, use this as implicit //file verification dist = new double[numClusters]; for (int i = 0; i < numClusters; i++) { //now need to offset by 3. dist[i] = Double.parseDouble(split[i + distStartInd].trim()); } } //collect timings long predTime = -1; final int timingInd = distStartInd + numClusters + 1; //actual, predicted, space, dist, space, timing if (split.length > timingInd) predTime = Long.parseLong(split[timingInd].trim()); //collect description String description = ""; final int descriptionInd = timingInd + 2; //actual, predicted, space, dist, , space, timing, space, description if (split.length > descriptionInd) { description = split[descriptionInd]; //no reason currently why the description passed cannot have commas in it, //might be a natural way to separate it in to different parts. //description really just fills up the remainder of the line. for (int i = descriptionInd + 1; i < split.length; i++) description += "," + split[i]; } addPrediction(trueClassVal, dist, clusterVal, predTime, description); } private void instancePredictionsFromScanner(Scanner in) throws Exception { while (in.hasNext()) { String line = in.nextLine(); //may be trailing empty lines at the end of the file if (line == null || line.equals("")) break; instancePredictionFromString(line); } calculateAcc(); } /** * [true],[pred], ,[dist[0]],...,[dist[c]], ,[predTime], ,[description until end of line, may have commas in it] */ private String instancePredictionToString(int i) { StringBuilder sb = new StringBuilder(); sb.append(trueClassValues.get(i).intValue()).append(","); sb.append(clusterValues.get(i).intValue()); //probs sb.append(","); //<empty space> double[] probs = distributions.get(i); for (double d : probs) sb.append(",").append(GenericTools.RESULTS_DECIMAL_FORMAT.format(d)); //timing sb.append(",,").append(predTimes.get(i)); //<empty space>, timing //description sb.append(",,").append(descriptions.get(i)); //<empty space>, description return sb.toString(); } public String instancePredictionsToString() throws Exception { if (trueClassValues == null || trueClassValues.size() == 0 || trueClassValues.get(0) == -1) throw new Exception("No true class value stored, call finaliseResults(double[] trueClassVal)"); if (numInstances() > 0 && (distributions.size() == trueClassValues.size() && distributions.size() == clusterValues.size())) { StringBuilder sb = new StringBuilder(""); for (int i = 0; i < numInstances(); i++) { sb.append(instancePredictionToString(i)); if (i < numInstances() - 1) sb.append("\n"); } return sb.toString(); } else return "No Instance Prediction Information"; } @Override public String toString() { return generateFirstLine(); } public String statsToString() { String s = ""; s += "Clustering Accuracy: " + accuracy; s += "\nRand Index: " + ri; s += "\nAdjusted Rand Index: " + ari; s += "\nMutual Information: " + mi; s += "\nNormalised Mutual Information: " + nmi; s += "\nAdjusted Mutual Information: " + ami; return s; } public String writeFullResultsToString() throws Exception { finaliseResults(); StringBuilder st = new StringBuilder(); st.append(generateFirstLine()).append("\n"); st.append(generateSecondLine()).append("\n"); st.append(generateThirdLine()).append("\n"); st.append(instancePredictionsToString()); return st.toString(); } public void writeFullResultsToFile(String path) throws Exception { OutFile out = null; try { out = new OutFile(path); out.writeString(writeFullResultsToString()); } catch (Exception e) { throw new Exception("Error writing results file.\n" + "Outfile most likely didnt open successfully, probably directory doesnt exist yet.\n" + "Path: " + path + "\nError: " + e); } finally { if (out != null) out.closeFile(); } } private void parseFirstLine(String line) { String[] parts = line.split(","); if (parts.length == 0) return; datasetName = parts[0]; estimatorName = parts[1]; split = parts[2]; foldID = Integer.parseInt(parts[3]); setTimeUnitFromString(parts[4]); //nothing stopping the description from having its own commas in it, just read until end of line for (int i = 5; i < parts.length; i++) description += "," + parts[i]; } private String generateFirstLine() { return datasetName + "," + estimatorName + "," + split + "," + foldID + "," + getTimeUnitAsString() + "," + description; } private void parseSecondLine(String line) { paras = line; } private String generateSecondLine() { return paras; } /** * Returns the test acc reported on this line, for comparison with acc * computed later to assert they align. Accuracy has always been reported * on this line in this file format, so fair to assume if this fails * then the file is simply malformed */ private double parseThirdLine(String line) { String[] parts = line.split(","); accuracy = Double.parseDouble(parts[0]); buildTime = Long.parseLong(parts[1]); testTime = Long.parseLong(parts[2]); benchmarkTime = Long.parseLong(parts[3]); memoryUsage = Long.parseLong(parts[4]); numClasses = Integer.parseInt(parts[5]); numClusters = Integer.parseInt(parts[6]); return accuracy; } private String generateThirdLine() { String res = accuracy + "," + buildTime + "," + testTime + "," + benchmarkTime + "," + memoryUsage + "," + getNumClasses() + "," + getNumClusters(); return res; } private String getTimeUnitAsString() { return timeUnit.name(); } private void setTimeUnitFromString(String str) { timeUnit = TimeUnit.valueOf(str); } public void loadResultsFromFile(String path) throws Exception { try { //init trueClassValues = new ArrayList<>(); clusterValues = new ArrayList<>(); distributions = new ArrayList<>(); predTimes = new ArrayList<>(); descriptions = new ArrayList<>(); numInstances = 0; accuracy = -1; buildTime = -1; memoryUsage = -1; //check file exists File f = new File(path); if (!(f.exists() && f.length() > 0)) throw new FileNotFoundException("File " + path + " NOT FOUND"); Scanner inf = new Scanner(f); //parse meta infos parseFirstLine(inf.nextLine()); parseSecondLine(inf.nextLine()); double reportedTestAcc = parseThirdLine(inf.nextLine()); //parse predictions instancePredictionsFromScanner(inf); //acts as a basic form of verification, does the acc reported on line 3 align with //the acc calculated while reading predictions double eps = 1.e-8; if (clAccTestPrint && Math.abs(reportedTestAcc - accuracy) > eps) { System.out.println("Calculated accuracy (" + accuracy + ") differs from written accuracy " + "(" + reportedTestAcc + ") by more than eps (" + eps + "). File = " + path + ". numinstances = " + numInstances + ". numClasses = " + numClasses); } finalised = true; inf.close(); } catch (FileNotFoundException fnf) { if (printOnFailureToLoad) System.out.println("File " + path + " NOT FOUND"); throw fnf; } catch (Exception ex) { if (printOnFailureToLoad) System.out.println("File " + path + " FAILED TO LOAD"); throw ex; } } /****************************************** * * METRIC CALCULATIONS * */ private double tp = 0, tn = 0, fn = 0, fp = 0; private boolean foundPairConfusionMatrix = false; private double[] classCounts, clusterCounts; private boolean foundCounts = false; private double[][] contingencyMatrix; private boolean foundContingencyMatrix = false; private double classEntropy, clusterEntropy; private boolean foundEntropy = false; /** * Will calculate all the metrics that can be found from the prediction information * stored in this object. Will NOT call finaliseResults(..), and finaliseResults(..) * not have been called elsewhere, however if it has not been called then true * class values must have been supplied while storing predictions. * * This is to allow iterative calculation of the metrics (in e.g. batches * of added predictions) */ public void findAllStats() { //meta info if (numInstances <= 0) inferNumInstances(); if (numClusters <= 0) inferNumClusters(); if (accuracy < 0) calculateAcc(); findPairConfusionMatrix(); findCounts(); findContingencyMatrix(); findEntropy(); ri = findRI(); ari = findARI(); mi = findMI(); nmi = findNMI(); ami = findAMI(); medianPredTime = findMedianPredTime(predTimes); allStatsFound = true; } public double findRI() { if (!foundPairConfusionMatrix) findPairConfusionMatrix(); return (tp + tn) / (tp + tn + fn + fp); } public double findARI() { if (!foundPairConfusionMatrix) findPairConfusionMatrix(); return 2 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn)); } public double findMI() { if (!foundContingencyMatrix) findContingencyMatrix(); if (!foundCounts) findCounts(); double logNI = Math.log(numInstances); double sum = 0; for (int i = 0; i < numClusters; i++) { for (int n = 0; n < numClasses; n++) { if (contingencyMatrix[i][n] != 0) { double a = contingencyMatrix[i][n] / numInstances; sum += a * (Math.log(contingencyMatrix[i][n]) - logNI) + a * (-Math.log(clusterCounts[i] * classCounts[n]) + logNI * 2); } } } if (sum < 0) return 0; return sum; } public double findNMI() { if (mi == -1) mi = findMI(); if (!foundEntropy) findEntropy(); double norm = (classEntropy + clusterEntropy) / 2; return mi / norm; } public double findAMI() { if (mi == -1) mi = findMI(); if (!foundEntropy) findEntropy(); //expected mutual information double max = 0; double[] logClassCounts = new double[classCounts.length]; double[] glnClass = new double[classCounts.length]; double[] glnNClass = new double[classCounts.length]; for (int i = 0; i < classCounts.length; i++) { if (classCounts[i] > max) max = classCounts[i]; logClassCounts[i] = Math.log(classCounts[i]); glnClass[i] = logGamma(classCounts[i] + 1); glnNClass[i] = logGamma(numInstances - classCounts[i] + 1); } double[] logClusterCounts = new double[clusterCounts.length]; double[] glnCluster = new double[clusterCounts.length]; double[] glnNCluster = new double[clusterCounts.length]; for (int i = 0; i < clusterCounts.length; i++) { if (clusterCounts[i] > max) max = clusterCounts[i]; logClusterCounts[i] = Math.log(clusterCounts[i]); glnCluster[i] = logGamma(clusterCounts[i] + 1); glnNCluster[i] = logGamma(numInstances - clusterCounts[i] + 1); } double logNI = Math.log(numInstances); double[] nijs = new double[(int) max + 1]; double[] logNnij = new double[nijs.length]; double[] term1 = new double[nijs.length]; double[] glnNij = new double[nijs.length]; for (int i = 0; i < nijs.length; i++) { nijs[i] = i; logNnij[i] = logNI + Math.log(i); term1[i] = (double) i / numInstances; glnNij[i] = logGamma(i + 1); } double glnN = logGamma(numInstances + 1); int[][] start = new int[numClusters][numClasses]; int[][] end = new int[numClusters][numClasses]; for (int i = 0; i < numClusters; i++) { for (int n = 0; n < numClasses; n++) { double v = classCounts[n] - numInstances + clusterCounts[i]; start[i][n] = (int) Math.max(v, 1); end[i][n] = (int) Math.min(clusterCounts[i], classCounts[n]) + 1; } } double emi = 0; for (int i = 0; i < numClusters; i++) { for (int n = 0; n < numClasses; n++) { for (int j = start[i][n]; j < end[i][n]; j++) { double term2 = logNnij[j] - logClassCounts[n] - logClusterCounts[i]; double a = logGamma(classCounts[n] - j + 1); double b = logGamma(clusterCounts[i] - j + 1); double c = logGamma(numInstances - classCounts[n] - clusterCounts[i] + j + 1); double gln = glnClass[n] + glnCluster[i] + glnNClass[n] + glnNCluster[i] - glnN - glnNij[j] - a - b - c; double term3 = Math.exp(gln); emi += term1[j] * term2 * term3; } } } double norm = (classEntropy + clusterEntropy) / 2 - emi; if (norm == 0) norm = Double.MIN_VALUE; return (mi - emi) / norm; } private void findPairConfusionMatrix() { for (int i = 0; i < numInstances; i++) { for (int n = 0; n < numInstances; n++) { if (i == n) continue; if (clusterValues.get(i).equals(clusterValues.get(n)) && trueClassValues.get(i).equals(trueClassValues.get(n))) { tp++; } else if (!clusterValues.get(i).equals(clusterValues.get(n)) && !trueClassValues.get(i).equals(trueClassValues.get(n))) { tn++; } else if (clusterValues.get(i).equals(clusterValues.get(n)) && !trueClassValues.get(i).equals(trueClassValues.get(n))) { fn++; } else { fp++; } } } foundPairConfusionMatrix = true; } private void findCounts() { classCounts = new double[numClasses]; clusterCounts = new double[numClusters]; for (int i = 0; i < numInstances; i++) { classCounts[trueClassValues.get(i).intValue()]++; clusterCounts[clusterValues.get(i).intValue()]++; } foundCounts = true; } private void findContingencyMatrix() { contingencyMatrix = new double[numClusters][numClasses]; for (int i = 0; i < numInstances; i++) { contingencyMatrix[clusterValues.get(i).intValue()][trueClassValues.get(i).intValue()]++; } foundContingencyMatrix = true; } private void findEntropy() { if (!foundCounts) findCounts(); classEntropy = entropy(classCounts); clusterEntropy = entropy(clusterCounts); foundEntropy = true; } private double entropy(double[] arr) { double x = 0; double logNI = Math.log(numInstances); for (double p : arr) { x -= p > 0 ? (p / numInstances) * (Math.log(p) - logNI) : 0; } return x; } /** * Will calculate all the metrics that can be found from the prediction information * stored in this object, UNLESS this object has been finalised (finaliseResults(..)) AND * has already had it's stats found (findAllStats()), e.g. if it has already been called * by another process. * * In this latter case, this method does nothing. */ @Override public void findAllStatsOnce() { if (finalised && allStatsFound) { printlnDebug("Stats already found, ignoring findAllStatsOnce()"); return; } else { findAllStats(); } } /** * Concatenates the predictions of clusterers made on different folds on the data * into one results object * <p> * If ClustererResults ever gets split into separate classes for prediction and meta info, * this obviously gets cleaned up a lot * * @param cresults ClustererResults[fold] * @return single ClustererResults object */ public static ClustererResults concatenateClustererResults( /*fold*/ ClustererResults[] cresults) throws Exception { return concatenateClustererResults(new ClustererResults[][]{cresults})[0]; } /** * Concatenates the predictions of clusterers made on different folds on the data * into one results object per clusterer. * <p> * If ClustererResults ever gets split into separate classes for prediction and meta info, * this obviously gets cleaned up a lot * * @param cresults ClustererResults[clusterer][fold] * @return ClustererResults[clusterer] */ public static ClustererResults[] concatenateClustererResults( /*clusterer*/ /*fold*/ ClustererResults[][] cresults) throws Exception { ClustererResults[] concatenatedResults = new ClustererResults[cresults.length]; for (int clustererid = 0; clustererid < cresults.length; clustererid++) { if (cresults[clustererid].length == 1) { concatenatedResults[clustererid] = cresults[clustererid][0]; } else { ClustererResults newCres = new ClustererResults(cresults[clustererid][0].numClasses); for (int foldid = 0; foldid < cresults[clustererid].length; foldid++) { ClustererResults foldCres = cresults[clustererid][foldid]; for (int predid = 0; predid < foldCres.numInstances(); predid++) { newCres.addPrediction(foldCres.getTrueClassValue(predid), foldCres.getProbabilityDistribution(predid), foldCres.getClusterValue(predid), foldCres.getPredictionTime(predid), foldCres.getPredDescription(predid)); } } concatenatedResults[clustererid] = newCres; } } return concatenatedResults; } public static void main(String[] args) { ClustererResults cr = new ClustererResults(3); Collections.addAll(cr.trueClassValues, 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 2., 2., 2., 2., 2.); Collections.addAll(cr.clusterValues, 0., 1., 1., 0., 0., 1., 0., 3., 3., 3., 2., 2., 2., 2., 2.); Collections.addAll(cr.predTimes, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L); cr.numInstances = 15; cr.numClusters = 4; cr.findAllStats(); System.out.println(cr.statsToString()); } }
45,743
33.839299
138
java
tsml-java
tsml-java-master/src/main/java/evaluation/storage/EstimatorResults.java
package evaluation.storage; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Function; public abstract class EstimatorResults { //LINE 1: meta info, set by user protected String estimatorName = ""; protected String datasetName = ""; protected String split = ""; //e.g train or test protected int foldID = -1; protected String description= ""; //human-friendly optional extra info if wanted. //LINE 3: acc, buildTime, testTime, memoryUsage /** * The time taken to complete the build of an estimator, aka training. May be cumulative time over many parameter * set builds, etc It is assumed that the time given will be in the unit of measurement set by this object TimeUnit, * default nanoseconds. If no benchmark time is supplied, the default value is -1 */ protected long buildTime = -1; /** * The cumulative prediction time, equal to the sum of the individual prediction times stored. Intended as a quick * helper/summary in case complete prediction information is not stored, and/or for a human reader to quickly * compare times. * * It is assumed that the time given will be in the unit of measurement set by this object TimeUnit, * default nanoseconds. * If no benchmark time is supplied, the default value is -1 */ protected long testTime = -1; protected long medianPredTime; /** * The time taken to perform some standard benchmarking operation, to allow for a (not necessarily precise) * way to measure the general speed of the hardware that these results were made on, such that users * analysing the results may scale the timings in this file proportional to the benchmarks to get a consistent * relative scale across different results sets. It is up to the user what this benchmark operation is, and how * long it is (roughly) expected to take. * <p> * It is assumed that the time given will be in the unit of measurement set by this object TimeUnit, default * nanoseconds. If no benchmark time is supplied, the default value is -1 */ protected long benchmarkTime = -1; /** * It is user dependent on exactly what this field means and how accurate it may be (because of Java's lazy gc). * Intended purpose would be the size of the model at the end of/after estimator build, aka the estimator * has been trained. * <p> * The assumption, for now, is that this is measured in BYTES, but this is not enforced/ensured * If no memoryUsage value is supplied, the default value is -1 */ protected long memoryUsage = -1; /** * Consistent time unit ASSUMED across build times. Default to nanoseconds. * <p> * A long can contain 292 years worth of nanoseconds, which I assume to be enough for now. * Could be conceivable that the cumulative time of a large meta ensemble that is run * multi-threaded on a large dataset might exceed this. */ protected TimeUnit timeUnit = TimeUnit.NANOSECONDS; /*************************** * * LINE 1 GETS/SETS * * Just basic descriptive stuff, nothing fancy goign on here * */ public String getEstimatorName() { return estimatorName; } public void setEstimatorName(String estimatorName) { this.estimatorName = estimatorName; } public String getDatasetName() { return datasetName; } public void setDatasetName(String datasetName) { this.datasetName = datasetName; } public int getFoldID() { return foldID; } public void setFoldID(int foldID) { this.foldID = foldID; } /** * e.g "train", "test", "validation" */ public String getSplit() { return split; } /** * e.g "train", "test", "validation" */ public void setSplit(String split) { this.split = split; } /** * Consistent time unit ASSUMED across build times, test times, individual prediction times. * Before considering different timeunits, all timing were in milliseconds, via * System.currentTimeMillis(). Some classifiers on some datasets may run in less than 1 millisecond * however, so as of 19/2/2019, classifierResults now defaults to working in nanoseconds. * * A long can contain 292 years worth of nanoseconds, which I assume to be enough for now. * Could be conceivable that the cumulative time of a large meta ensemble that is run * multi-threaded on a large dataset might exceed this. * * In results files made before 19/2/2019, which only stored build times and * milliseconds was assumed, there will be no unit of measurement for the time. */ public TimeUnit getTimeUnit() { return timeUnit; } /** * This will NOT convert any timings already stored in this classifier results object * to the new time unit. e.g if build time was had already been stored in seconds as 10, THEN * setTimeUnit(TimeUnit.MILLISECONDS) was called, the actual value of build time would still be 10, * but now assumed to mean 10 milliseconds. * * Consistent time unit ASSUMED across build times, test times, individual prediction times. * Before considering different timeunits, all timing were in milliseconds, via * System.currentTimeMillis(). Some classifiers on some datasets may run in less than 1 millisecond * however, so as of 19/2/2019, classifierResults now defaults to working in nanoseconds. * * A long can contain 292 years worth of nanoseconds, which I assume to be enough for now. * Could be conceivable that the cumulative time of a large meta ensemble that is run * multi-threaded on a large dataset might exceed this. * * In results files made before 19/2/2019, which only stored build times and * milliseconds was assumed, there will be no unit of measurement for the time. */ public void setTimeUnit(TimeUnit timeUnit) { this.timeUnit = timeUnit; } /** * This is a free-form description that can hold any info you want, with the only caveat * being that it cannot contain newline characters. Description could be the experiment * that these results were made for, e.g "Initial Univariate Benchmarks". Entirely * up to the user to process if they want to. * * By default, it is an empty string. */ public String getDescription() { return description; } /** * This is a free-form description that can hold any info you want, with the only caveat * being that it cannot contain newline characters. Description could be the experiment * that these results were made for, e.g "Initial Univariate Benchmarks". Entirely * up to the user to process if they want to. * * By default, it is an empty string. */ public void setDescription(String description) { this.description = description; } //todo revisit these when more willing to refactor stats pipeline to avoid assumption of doubles. //a double can accurately (except for the standard double precision problems) hold at most ~7 weeks worth of nano seconds // a double's mantissa = 52bits, 2^52 / 1000000000 / 60 / 60 / 24 / 7 = 7.something weeks //so, will assume the usage/requirement for milliseconds in the stats pipeline, to avoid the potential future problem //of meta-ensembles taking more than a week, etc. (or even just summing e.g 30 large times to be averaged) //it is still preferable of course to store any timings in nano's in the classifierresults object since they'll //store them as longs. public static final Function<EstimatorResults, Double> GETTER_buildTimeDoubleMillis = (EstimatorResults cr) -> toDoubleMillis(cr.buildTime, cr.timeUnit); public static final Function<EstimatorResults, Double> GETTER_totalTestTimeDoubleMillis = (EstimatorResults cr) -> toDoubleMillis(cr.testTime,cr.timeUnit); public static final Function<EstimatorResults, Double> GETTER_avgTestPredTimeDoubleMillis = (EstimatorResults cr) -> toDoubleMillis(cr.medianPredTime, cr.timeUnit); public static final Function<EstimatorResults, Double> GETTER_benchmarkTime = (EstimatorResults cr) -> toDoubleMillis(cr.benchmarkTime, cr.timeUnit); public static final Function<EstimatorResults, Double> GETTER_buildTimeDoubleMillisBenchmarked = (EstimatorResults cr) -> divideAvoidInfinity(GETTER_buildTimeDoubleMillis.apply(cr), GETTER_benchmarkTime.apply(cr)); public static final Function<EstimatorResults, Double> GETTER_totalTestTimeDoubleMillisBenchmarked = (EstimatorResults cr) -> divideAvoidInfinity(GETTER_totalTestTimeDoubleMillis.apply(cr), GETTER_benchmarkTime.apply(cr)); public static final Function<EstimatorResults, Double> GETTER_avgTestPredTimeDoubleMillisBenchmarked = (EstimatorResults cr) -> divideAvoidInfinity(GETTER_avgTestPredTimeDoubleMillis.apply(cr), GETTER_benchmarkTime.apply(cr)); public static final Function<EstimatorResults, Double> GETTER_MemoryMB = (EstimatorResults cr) -> (double)(cr.memoryUsage/1e+6); protected static double divideAvoidInfinity(double a, double b) { if(b == 0) { // avoid divide by 0 --> infinity return a; } else { return a / b; } } protected static double toDoubleMillis(long time, TimeUnit unit) { if (time < 0) return -1; if (time == 0) return 0; if (unit.equals(TimeUnit.MICROSECONDS)) { long pre = time / 1000; //integer division for pre - decimal point long post = time % 1000; //the remainder that needs to be converted to post decimal point, some value < 1000 double convertedPost = (double)post / 1000; // now some fraction < 1 return pre + convertedPost; } else if (unit.equals(TimeUnit.NANOSECONDS)) { long pre = time / 1000000; //integer division for pre - decimal point long post = time % 1000000; //the remainder that needs to be converted to post decimal point, some value < 1000 double convertedPost = (double)post / 1000000; // now some fraction < 1 return pre + convertedPost; } else { //not higher resolution than millis, no special conversion needed just cast to double return (double)unit.toMillis(time); } } /** * Makes copy of pred times to easily maintain original ordering */ protected long findMedianPredTime(ArrayList<Long> predTimes) { List<Long> copy = new ArrayList<>(predTimes); Collections.sort(copy); int mid = copy.size()/2; if (copy.size() % 2 == 0) return (copy.get(mid) + copy.get(mid-1)) / 2; else return copy.get(mid); } public abstract double getAcc(); public abstract void cleanPredictionInfo(); /** * Will calculate all the metrics that can be found from the prediction information * stored in this object, UNLESS this object has been finalised (finaliseResults(..)) AND * has already had it's stats found (findAllStats()), e.g. if it has already been called * by another process. * <p> * In this latter case, this method does nothing. */ public abstract void findAllStatsOnce(); }
11,463
45.225806
230
java
tsml-java
tsml-java-master/src/main/java/evaluation/storage/EstimatorResultsCollection.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.storage; import experiments.data.DatasetLists; import java.io.File; import java.io.FileNotFoundException; import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; import utilities.DebugPrinting; import utilities.ErrorReport; /** * Essentially a loader for many results over a given set of estimators, datasets, folds, and splits * * This as been implemented as barebones arrays instead of large collections for speed (of execution and implementation) * and memory efficiency, however depending on demand, use cases and time could be redone to be represented by e.g. maps underneath * * Usage: * Construct the object * Set estimators, datasets, folds and splits to read at MINIMUM * Set any optional settings on how to read and store the results * Call LOAD() * Either use the big old EstimatorResults[][][][] returned, or interact with the * collection via the SLICE or RETRIEVE methods * * SLICE...() methods get subsets of the results already loaded into memory * RETRIEVE...(...) methods get a particular stat or info from each results object * retrieveAccuracies() wraps the accuracies getter as a shortcut/example * * todo integrate into multipleestimatorevaluation/estimatorresultsanalysis * todo replace old DebugPrinting stuff with loggers if/when going full enterprise * todo proper missing results summaries, option to reduce to largest complete subset * of split/estimator/dataset/folds * todo maybe use this class for other things to, e.g. instead of loading results, just check * existence, large-scale zipping/copying/moving of results files, etc * * @author James Large (james.large@uea.ac.uk) */ public class EstimatorResultsCollection implements DebugPrinting { public enum ResultsType { CLASSIFICATION, CLUSTERING, REGRESSION } private ResultsType resultsType = ResultsType.CLASSIFICATION; /** * EstimatorResults[split][estimator][dataset][fold] * Split taken to be first dimension for easy retrieval of a single split if only one is loaded * and you want results in the 3d form [estimator][dataset][fold] */ private EstimatorResults[][][][] allResults; private int numDatasets; private String[] datasetNamesInStorage; private String[] datasetNamesInOutput; private int numEstimators; private String[] estimatorNamesInStorage; private String[] estimatorNamesInOutput; private int numFolds; private int[] folds; private int numSplits; private String[] splits; private int numMissingResults; private HashSet<String> splitsWithMissingResults; private HashSet<String> estimatorsWithMissingResults; private HashSet<String> datasetsWithMissingResults; private HashSet<Integer> foldsWithMissingResults; public static boolean printOnEstimatorNameMismatch = true; /** * Paths to directories containing all the estimatorNamesInStorage directories * with the results, in format {baseReadPath}/{estimators}/Predictions/{datasets}/{split}Fold{folds}.csv * * If readResultsFilesDirectories.length == 1, all estimator's results read from that one path * else, resultsPaths.length must equal estimators.length, with each index aligning * to the path to read the estimator's results from. * * e.g to read 2 estimators from one directory, and another 2 from 2 different directories: * * Index | Paths | Estimatorr * -------------------------- * 0 | pathA | e1 * 1 | pathA | e2 * 2 | pathB | e3 * 3 | pathC | e4 * */ private String resultsFilesDirectories[]; /** * If true, will null the individual prediction info of each EstimatorResults object after stats are found for it * * Defaults to true */ private boolean cleanResults = true; /** * If true, the returned lists are guaranteed to be of size numEstimators*numDsets*numFolds*2, * but entries may be null; * * Defaults to false */ private boolean allowMissingResults = false; /** * If true, will fill in missing probability distributions with one-hot vectors * for files read in that are missing them. intended for very old files, where you still * want to calc auroc etc (metrics that need dists) for all the other classifiers * that DO provide them, but also want to compare e.g accuracy with classifier that don't * * Defaults to false */ private boolean ignoreMissingDistributions = false; public EstimatorResultsCollection() { } /** * Creates complete copy of the other collection, but keeps the subResults instead of the * other's results. Intended for use when slicing, and then manually edit the particular * bit of meta info that was sliced */ private EstimatorResultsCollection(EstimatorResultsCollection other, EstimatorResults[][][][] subResults) { this.allResults = subResults; this.numDatasets = other.numDatasets; this.datasetNamesInStorage = other.datasetNamesInStorage; this.datasetNamesInOutput = other.datasetNamesInOutput; this.numEstimators = other.numEstimators; this.estimatorNamesInStorage = other.estimatorNamesInStorage; this.estimatorNamesInOutput = other.estimatorNamesInOutput; this.numFolds = other.numFolds; this.folds = other.folds; this.numSplits = other.numSplits; this.splits = other.splits; this.resultsFilesDirectories = other.resultsFilesDirectories; this.cleanResults = other.cleanResults; this.allowMissingResults = other.allowMissingResults; this.ignoreMissingDistributions = other.ignoreMissingDistributions; } /** * Sets the type of results to load in, i.e. classification or clustering */ public void setResultsType(ResultsType resultsType) { this.resultsType = resultsType; } /** * Sets the number folds/resamples to read in for each estimator/dataset. * Will create a range of fold ids from 0(inclusive) to maxFolds(exclusive) */ public void setFolds(int maxFolds) { setFolds(0, maxFolds); } /** * Sets the folds/resamples to read in for each estimator/dataset * Will create a range of fold ids from minFolds(inclusive) to maxFolds(exclusive) */ public void setFolds(int minFolds, int maxFolds) { setFolds(buildRange(minFolds, maxFolds)); } /** * Sets the specific folds/resamples to read in for each estimator/dataset, * to be used if the folds wanted to not lie in a continuous range for example */ public void setFolds(int[] foldIds) { this.folds = foldIds; this.numFolds = foldIds.length; } /** * Sets the estimators to be read in from baseReadPath. Names must correspond to directory names * in which {estimators}/Predictions/{datasets}/{split}Fold{folds}.csv directories/files exist */ public void setEstimators(String[] estimatorNames, String[] baseReadPaths) { setEstimators(estimatorNames, estimatorNames, baseReadPaths); } /** * Sets the estimators to be read in from baseReadPath. Names must correspond to directory names * in which {estimator}/Predictions/{datasets}/{split}Fold{folds}.csv directories/files exist */ public void setEstimators(String[] estimatorNamesInStorage, String[] estimatorNamesInOutput, String[] baseReadPaths) { if (estimatorNamesInStorage.length != estimatorNamesInOutput.length || estimatorNamesInStorage.length != baseReadPaths.length) throw new IllegalArgumentException("Estimator names lengths and paths not equal, " + "estimatorNamesInStorage.length="+estimatorNamesInStorage.length + " estimatorNamesInOutput.length="+estimatorNamesInOutput.length + " baseReadPaths.length="+baseReadPaths.length); this.estimatorNamesInStorage = estimatorNamesInStorage; this.estimatorNamesInOutput = estimatorNamesInOutput; this.resultsFilesDirectories = baseReadPaths; numEstimators = estimatorNamesInOutput.length; } /** * Adds estimators to be read in from baseReadPath. Names must correspond to directory names * in which {estimators}/Predictions/{datasets}/{split}Fold{folds}.csv directories/files exist */ public void addEstimators(String[] estimatorNames, String baseReadPath) { addEstimators(estimatorNames, estimatorNames, baseReadPath); } /** * Adds estimators to be read in from baseReadPath. cestimatorNamesInStorage must correspond to directory names * in which {estimators}/Predictions/{datasets}/{split}Fold{folds}.csv directories/files exist, * while estimatorNamesInOutputs can be 'cleaner' names intended for image or spreadsheet * outputs. The two arrays should be parallel */ public void addEstimators(String[] estimatorNamesInStorage, String[] estimatorNamesInOutput, String baseReadPath) { if (estimatorNamesInStorage.length != estimatorNamesInOutput.length) throw new IllegalArgumentException("Estimator names lengths not equal, " + "estimatorNamesInStorage.length="+estimatorNamesInStorage.length + " estimatorNamesInOutput.length="+estimatorNamesInOutput.length); if (this.estimatorNamesInOutput == null) { //nothing initialisd yet, just set them directly String[] t = new String[estimatorNamesInOutput.length]; for (int i = 0; i < estimatorNamesInOutput.length; i++) t[i] = baseReadPath; setEstimators(estimatorNamesInStorage, estimatorNamesInOutput, t); return; } //yay arrays int origLength = this.estimatorNamesInStorage.length; int addedLength = estimatorNamesInStorage.length; this.estimatorNamesInStorage = Arrays.copyOf(this.estimatorNamesInStorage, origLength + addedLength); for (int i = 0; i < addedLength; i++) this.estimatorNamesInStorage[origLength + i] = estimatorNamesInStorage[i]; this.estimatorNamesInOutput = Arrays.copyOf(this.estimatorNamesInOutput, origLength + addedLength); for (int i = 0; i < addedLength; i++) this.estimatorNamesInOutput[origLength + i] = estimatorNamesInOutput[i]; baseReadPath.replace("\\", "/"); if (baseReadPath.charAt(baseReadPath.length()-1) != '/') baseReadPath += "/"; this.resultsFilesDirectories = Arrays.copyOf(this.resultsFilesDirectories, origLength + addedLength); for (int i = 0; i < addedLength; i++) this.resultsFilesDirectories[origLength + i] = baseReadPath; numEstimators = origLength + addedLength; } /** * Sets the datasets to be read in for each estimator. Names must correspond to directory names * in which {datasets}/{split}Fold{folds}.csv directories/files exist for each estimator, */ public void setDatasets(String[] datasetNames) { setDatasets(datasetNames, datasetNames); } /** * Sets the datasets to be read in for each estimator. datasetNamesInStorage must correspond to directory names * in which {datasets}/{split}Fold{folds}.csv directories/files exist for each estimator, * while estimatorNamesInOutputs can be 'cleaner' names intended for image or spreadsheet * outputs. The two arrays should be parallel */ public void setDatasets(String[] datasetNamesInStorage, String[] datasetNamesInOutput) { if (datasetNamesInStorage.length != datasetNamesInStorage.length) throw new IllegalArgumentException("Estimator datasetNamesInOutput lengths not equal, " + "datasetNamesInStorage.length="+datasetNamesInStorage.length + " datasetNamesInOutput.length="+datasetNamesInOutput.length); this.numDatasets = datasetNamesInStorage.length; this.datasetNamesInStorage = datasetNamesInStorage; this.datasetNamesInOutput = datasetNamesInOutput; } /** * Set to look for train fold files only for each estimator/dataset/fold */ public void setSplit_Train() { setSplit("train"); } /** * Sets to look for test fold files only for each estimator/dataset/fold */ public void setSplit_Test() { setSplit("test"); } /** * Sets to look for train AND test fold files for each estimator/dataset/fold */ public void setSplit_TrainTest() { setSplits(new String[] { "train", "test" }); } /** * Sets to look for a particular dataset split, test and test are currently * the only options generated by e.g. ClassifierExperiments.java. In the future, things * like validation, cvFoldX, etc might be possible */ public void setSplit(String split) { this.splits = new String[] { split }; this.numSplits = 1; } /** * Sets to look for a particular dataset split, test and test are currently * the only options generated by e.g. ClassifierExperiments.java. In the future, things * like validation, cvFoldX, etc might be possible */ public void setSplits(String[] splits) { this.splits = splits; this.numSplits = splits.length; } /** * if true, will null the individual prediction info of each EstimatorResults object after stats are found for it * * defaults to true */ public void setCleanResults(boolean cleanResults) { this.cleanResults = cleanResults; } /** * if true, the returned lists are guaranteed to be of size numEstimator*numDsets*numFolds*2, * but entries may be null; * * defaults to false */ public void setAllowMissingResults(boolean allowMissingResults) { this.allowMissingResults = allowMissingResults; } /** * if true, will fill in missing probability distributions with one-hot vectors * for files read in that are missing them. intended for very old files, where you still * want to calc auroc etc (metrics that need dists) for all the other classifiers * that DO provide them, but also want to compare e.g accuracy with classifier that don't * * defaults to false */ public void setIgnoreMissingDistributions(boolean ignoreMissingDistributions) { this.ignoreMissingDistributions = ignoreMissingDistributions; } public int getNumDatasets() { return numDatasets; } public String[] getDatasetNamesInStorage() { return datasetNamesInStorage; } public String[] getDatasetNamesInOutput() { return datasetNamesInOutput; } public int getNumEstimators() { return numEstimators; } public String[] getEstimatorNamesInStorage() { return estimatorNamesInStorage; } public String[] getEstimatorNamesInOutput() { return estimatorNamesInOutput; } public int getNumFolds() { return numFolds; } public int[] getFolds() { return folds; } public int getNumSplits() { return numSplits; } public String[] getSplits() { return splits; } public String[] getBaseReadPaths() { return resultsFilesDirectories; } public int getNumMissingResults() { return numMissingResults; } /** * If true, will null the individual prediction info of each EstimatorResults object after stats are found for it * * Defaults to true */ public boolean getCleanResults() { return cleanResults; } /** * If true, the returned lists are guaranteed to be of size numEstimators*numDsets*numFolds*2, * but entries may be null; * * Defaults to false */ public boolean getAllowMissingResults() { return allowMissingResults; } /** * If true, will fill in missing probability distributions with one-hot vectors * for files read in that are missing them. intended for very old files, where you still * want to calc auroc etc (metrics that need dists) for all the other classifiers * that DO provide them, but also want to compare e.g accuracy with classifier that don't * * Defaults to false */ public boolean getIgnoreMissingDistributions() { return ignoreMissingDistributions; } public int getTotalNumResultsIgnoreMissing() { return (numSplits * numEstimators * numDatasets * numFolds); } public int getTotalNumResults() { return getTotalNumResultsIgnoreMissing() - numMissingResults; } @Override public String toString() { StringBuilder sb = new StringBuilder("EstimatorResultsCollection: " + getTotalNumResults() + " total, " + numMissingResults + " missing"); sb.append("\n\tSplits: ").append(Arrays.toString(splits)); sb.append("\n\tEstimator: ").append(Arrays.toString(estimatorNamesInOutput)); sb.append("\n\tDatasets: ").append(Arrays.toString(datasetNamesInOutput)); sb.append("\n\tFolds: ").append(Arrays.toString(folds)); return sb.toString(); } private void confirmMinimalInfoGivenAndValid() throws Exception { ErrorReport err = new ErrorReport("Required results collection info missing:\n"); if (resultsFilesDirectories == null) { err.log("\tBase path to read results from not set\n"); } else if (resultsFilesDirectories.length == 1) { if (!(new File(resultsFilesDirectories[0]).exists())) { err.log("\tBase path to read results from cannot be found: " + resultsFilesDirectories[0] + "\n"); } } else { //many read paths if (resultsFilesDirectories.length != estimatorNamesInOutput.length) { err.log("\tEither need to specify a single read path, or a read path for each estimator. Read paths given: " + resultsFilesDirectories.length + ", estimators given: " + estimatorNamesInOutput.length + "\n"); } for (String dir : resultsFilesDirectories) { if (!(new File(dir).exists())) { err.log("\tA base path to read results from cannot be found: " + dir + "\n"); } } } if (estimatorNamesInStorage == null || estimatorNamesInStorage.length == 0) err.log("\tEstimators to read not set\n"); if (datasetNamesInStorage == null || datasetNamesInStorage.length == 0) err.log("\tDatasets to read not set\n"); if (folds == null || folds.length == 0) err.log("\tFolds to read not set\n"); if (splits == null || splits.length == 0) err.log("\tSplits to read not set\n"); err.throwIfErrors(); } private static int[] buildRange(int minFolds, int maxFolds) { int[] folds = new int[maxFolds - minFolds]; int c = minFolds; for (int i = 0; i < maxFolds - minFolds; i++, c++) folds[i] = c; return folds; } private static int find(String[] arr, String k) { for (int i = 0; i < arr.length; i++) if (arr[i].equals(k)) return i; return -1; } private static int find(int[] arr, int k) { for (int i = 0; i < arr.length; i++) if (arr[i] == (k)) return i; return -1; } public class SliceException extends Exception { public SliceException(String str, String[] arr, String key) { super("SLICE ERROR: Attempted to slice " + str + " by " + key + " but that does not exist in " + Arrays.toString(arr)); } public SliceException(String str, int[] arr, int key) { super("SLICE ERROR: Attempted to slice " + str + " by " + key + " but that does not exist in " + Arrays.toString(arr)); } } public EstimatorResults loadEstimator(String path) throws FileNotFoundException, Exception { if (resultsType == ResultsType.CLASSIFICATION){ return new ClassifierResults(path); } else if (resultsType == ResultsType.REGRESSION){ return new RegressorResults(path); } else if (resultsType == ResultsType.CLUSTERING){ return new ClustererResults(path); } else{ throw new Exception("Invalid ResultType."); } } /** * Loads the splits, estimators, datasets, and folds specified from disk into memory * subject to the options set. * * @return the EstimatorResults[splits][estimators][datasets][folds] loaded in, also accessible after the call with retrieveResults() * @throws Exception on any number of missing file if allowMissingResults is false */ public EstimatorResults[][][][] load() throws Exception { confirmMinimalInfoGivenAndValid(); ErrorReport masterError = new ErrorReport("Results files not found:\n"); allResults = new EstimatorResults[numSplits][numEstimators][numDatasets][numFolds]; numMissingResults = 0; //train files may be produced via TrainAccuracyEstimate, older code //while test files likely by experiments, but still might be a very old file //so having separate checks for each. boolean ignoringDistsFirstTime = true; splitsWithMissingResults = new HashSet<>(splits.length); estimatorsWithMissingResults = new HashSet<>(estimatorNamesInOutput.length); datasetsWithMissingResults = new HashSet<>(datasetNamesInOutput.length); foldsWithMissingResults = new HashSet<>(folds.length); for (int c = 0; c < numEstimators; c++) { String estimatorStorage = estimatorNamesInStorage[c]; String estimatorOutput = estimatorNamesInOutput[c]; printlnDebug(estimatorStorage + "(" + estimatorOutput + ") reading"); int estimatorFnfs = 0; try { ErrorReport perEstimatorError = new ErrorReport("FileNotFoundExceptions thrown:\n"); for (int d = 0; d < numDatasets; d++) { String datasetStorage = datasetNamesInStorage[d]; String datasetOutput = datasetNamesInOutput[d]; printlnDebug("\t" + datasetStorage + "(" + datasetOutput + ") reading"); for (int f = 0; f < numFolds; f++) { int fold = folds[f]; printlnDebug("\t\t" + fold + " reading"); for (int s = 0; s < numSplits; s++) { String split = splits[s]; printlnDebug("\t\t\t" + split + " reading"); String readPath = resultsFilesDirectories.length == 1 ? resultsFilesDirectories[0] : resultsFilesDirectories[c]; try { //Look for a Resample first (new name), else look for a Fold (old name). try { allResults[s][c][d][f] = loadEstimator(readPath + estimatorStorage + "/Predictions/" + datasetStorage + "/" + split + "Resample" + fold + ".csv"); } catch (FileNotFoundException ex) { allResults[s][c][d][f] = loadEstimator(readPath + estimatorStorage + "/Predictions/" + datasetStorage + "/" + split + "Fold" + fold + ".csv"); } //This is only an issue for old ClassifierResults files, we should probably stop //accepting those and just alter the results files if there are any left. if (ignoreMissingDistributions && allResults[s][c][d][f] instanceof ClassifierResults) { boolean wasMissing = ((ClassifierResults)allResults[s][c][d][f]).populateMissingDists(); if (wasMissing && ignoringDistsFirstTime) { System.out.println("---------Probability distributions missing, but ignored: " + estimatorStorage + " - " + datasetStorage + " - " + f + " - train"); ignoringDistsFirstTime = false; } } if (printOnEstimatorNameMismatch && !allResults[s][c][d][f].estimatorName.equalsIgnoreCase(estimatorNamesInStorage[c])){ System.err.println("Estimator file name: \"" + allResults[s][c][d][f].estimatorName + "\" is different from input name \"" + estimatorNamesInStorage[c] + "\" on dataset \"" + datasetStorage + "\"."); } allResults[s][c][d][f].findAllStatsOnce(); if (cleanResults) allResults[s][c][d][f].cleanPredictionInfo(); } catch (FileNotFoundException ex) { String fileName = readPath + estimatorStorage + "/Predictions/" + datasetStorage + "/" + split + "(Resample/Fold)" + fold + ".csv"; if (allowMissingResults) { allResults[s][c][d][f] = null; System.out.println("Failed to load " + fileName); } else { perEstimatorError.log(fileName + "\n"); } estimatorFnfs++; splitsWithMissingResults.add(split); estimatorsWithMissingResults.add(estimatorStorage); datasetsWithMissingResults.add(datasetStorage); foldsWithMissingResults.add(fold); } printlnDebug("\t\t\t" + split + " successfully read in"); } printlnDebug("\t\t" + fold + " successfully read in"); } printlnDebug("\t" + datasetStorage + "(" + datasetOutput + ") successfully read in"); } if (!perEstimatorError.isEmpty()) perEstimatorError.log("Total num errors for " + estimatorStorage + ": " + estimatorFnfs); perEstimatorError.throwIfErrors(); printlnDebug(estimatorStorage + "(" + estimatorOutput + ") successfully read in"); } catch (Exception e) { masterError.log("Estimator Errors: " + estimatorNamesInStorage[c] + "\n" + e+" "); e.printStackTrace(); } numMissingResults += estimatorFnfs; } masterError.throwIfErrors(); return allResults; } /** * Returns a EstimatorResultsCollection that contains the same estimator, dataset and fold * sets, but only the SPLITS for which all results exist for all estimators, datasets and folds. */ public EstimatorResultsCollection reduceToMinimalCompleteResults_splits() throws Exception { if (!allowMissingResults || splitsWithMissingResults.size() == 0) return new EstimatorResultsCollection(this, this.allResults); else { List<String> completeSplits = new ArrayList<>(Arrays.asList(splits)); completeSplits.removeAll(splitsWithMissingResults); EstimatorResultsCollection reducedCol = sliceSplits(completeSplits.toArray(new String[] { })); reductionSummary("SPLITS", completeSplits, splitsWithMissingResults); return reducedCol; } } /** * Returns a EstimatorResultsCollection that contains the same split, dataset and fold * sets, but only the ESTIMATORS for which all results exist for all splits, datasets and folds. */ public EstimatorResultsCollection reduceToMinimalCompleteResults_estimators() throws Exception { if (!allowMissingResults) return new EstimatorResultsCollection(this, this.allResults); //should be all populated anyway else { List<String> completeEstimators = new ArrayList<>(Arrays.asList(estimatorNamesInStorage)); completeEstimators.removeAll(estimatorsWithMissingResults); EstimatorResultsCollection reducedCol = sliceEstimator(completeEstimators.toArray(new String[] { })); reductionSummary("ESTIMATORS", completeEstimators, estimatorsWithMissingResults); return reducedCol; } } /** * Returns a EstimatorResultsCollection that contains the same split, estimator and fold * sets, but only the DATASETS for which all results exist for all splits, estimators and folds. * * Mainly for use with MultipleEstimatorEvaluation, where for prototyping etc we only want * to compare over completed datasets for a fair comparison. */ public EstimatorResultsCollection reduceToMinimalCompleteResults_datasets() throws Exception { if (!allowMissingResults) return new EstimatorResultsCollection(this, this.allResults); //should be all populated anyway else { List<String> completeDsets = new ArrayList<>(Arrays.asList(datasetNamesInStorage)); completeDsets.removeAll(datasetsWithMissingResults); EstimatorResultsCollection reducedCol = sliceDatasets(completeDsets.toArray(new String[] { })); reductionSummary("DATASETS", completeDsets, datasetsWithMissingResults); return reducedCol; } } /** * Returns a EstimatorResultsCollection that contains the same split, estimator and dataset * sets, but only the FOLDS for which all results exist for all splits, estimators and datasets. */ public EstimatorResultsCollection reduceToMinimalCompleteResults_folds() throws Exception { if (!allowMissingResults) return new EstimatorResultsCollection(this, this.allResults); //should be all populated anyway else { // ayy java 8 List<Integer> completeFolds = new ArrayList<>(Arrays.stream(folds).boxed().collect(Collectors.toList())); completeFolds.removeAll(foldsWithMissingResults); EstimatorResultsCollection reducedCol = sliceFolds(completeFolds.stream().mapToInt(Integer::intValue).toArray()); reductionSummary("FOLDS", completeFolds, foldsWithMissingResults); return reducedCol; } } private void reductionSummary(String dim, Collection<? extends Object> remaining, Collection<? extends Object> removed) { System.out.println("\n\n\n*****".replace("*", "**********")); System.out.println("*****".replace("*", "**********")); System.out.println("*****".replace("*", "**********")); System.out.println("Not all results were present. Have reduced the results space " + "in order to only compare the results across mutually completed "+dim+"."); System.out.println("\n"+dim+" removed ("+removed.size()+"): " + removed.toString()); System.out.println("\n"+dim+" remaining for comparison ("+remaining.size()+"): " + remaining.toString()); System.out.println("*****".replace("*", "**********")); System.out.println("*****".replace("*", "**********")); System.out.println("*****\n\n\n".replace("*", "**********")); } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the provided split is returned for each estimator/dataset/fold * * @param split split to keep * @return new EstimatorResultsCollection with results for all estimators/datasets/folds, but only the split given * @throws java.lang.Exception if the split searched for was not loaded into this collection */ public EstimatorResultsCollection sliceSplit(String split) throws Exception { return sliceSplits(new String[] { split }); } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the provided splits are returned for each estimator/dataset/fold * * @param splitsToSlice splits to keep * @return new EstimatorResultsCollection with results for all estimators/datasets/folds, but only the splits given * @throws java.lang.Exception if any of the splits were not loaded into this collection */ public EstimatorResultsCollection sliceSplits(String[] splitsToSlice) throws Exception { //perform existence checks before allocating the mem for (String split : splitsToSlice) if (find(splits, split) == -1) throw new SliceException("splits", splits, split); //copy across the results, for splits it's nice and easy EstimatorResults[][][][] subResults = new EstimatorResults[splitsToSlice.length][][][]; for (int sts = 0; sts < splitsToSlice.length; sts++) { int sidOrig = find(splits, splitsToSlice[sts]); //know it exists, did checks above subResults[sts] = this.allResults[sidOrig]; } //copy across the meta info to new collection object EstimatorResultsCollection newCol = new EstimatorResultsCollection(this, subResults); newCol.setSplits(splitsToSlice); //setting the particular meta info sliced return newCol; } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the provided estimator is returned for each split/dataset/fold * * If different names were provided for storage and output, the name in storage should be provided * * @param estimator to keep * @return new EstimatorResultsCollection with results for all split/datasets/folds, but only the estimator given * @throws java.lang.Exception if the estimator searched for was not loaded into this collection */ public EstimatorResultsCollection sliceEstimator(String estimator) throws Exception { return sliceEstimator(new String[] { estimator }); } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the provided estimators are returned for each split/dataset/fold * * If different names were provided for storage and output, the name in storage should be provided * * @param estimatorsToSlice estimators to keep * @return new EstimatorResultsCollection with results for all split/datasets/folds, but only the estimators given * @throws java.lang.Exception if the estimators searched for were not loaded into this collection */ public EstimatorResultsCollection sliceEstimator(String[] estimatorsToSlice) throws Exception { int[] origEstimatorIds = new int[estimatorsToSlice.length]; String[] keptNamesStorage = new String[estimatorsToSlice.length]; String[] keptNamesOutput = new String[estimatorsToSlice.length]; String[] keptReadPaths = new String[estimatorsToSlice.length]; //perform existence checks before allocating the mem for (int i = 0; i < estimatorsToSlice.length; i++) { String estimator = estimatorsToSlice[i]; origEstimatorIds[i] = find(estimatorNamesInStorage, estimator); if (origEstimatorIds[i] == -1) throw new SliceException("estimators", estimatorNamesInStorage, estimator); else { keptNamesStorage[i] = estimatorNamesInStorage[origEstimatorIds[i]]; keptNamesOutput[i] = estimatorNamesInOutput[origEstimatorIds[i]]; keptReadPaths[i] = resultsFilesDirectories[origEstimatorIds[i]]; } } //copy across the results EstimatorResults[][][][] subResults = new EstimatorResults[numSplits][estimatorsToSlice.length][][]; for (int s = 0; s < numSplits; s++) for (int cts = 0; cts < estimatorsToSlice.length; cts++) subResults[s][cts] = this.allResults[s][origEstimatorIds[cts]]; //copy across the meta info to new collection object EstimatorResultsCollection newCol = new EstimatorResultsCollection(this, subResults); newCol.setEstimators(keptNamesStorage, keptNamesOutput, keptReadPaths); //setting the particular meta info sliced return newCol; } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the provided dataset is returned for each split/estimator/fold * * If different names were provided for storage and output, the name in storage should be provided * * @param dataset dataset to keep * @return new EstimatorResultsCollection with results for all split/estimator/folds, but only the dataset given * @throws java.lang.Exception if the dataset searched for was not loaded into this collection */ public EstimatorResultsCollection sliceDataset(String dataset) throws Exception { return sliceDatasets(new String[] { dataset }); } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the provided datasets are returned for each split/estimator/fold * * If different names were provided for storage and output, the name in storage should be provided * * @param datasetsToSlice datasets to keep * @return new EstimatorResultsCollection with results for all split/estimator/folds, but only the datasets given * @throws java.lang.Exception if the datasets searched for were not loaded into this collection */ public EstimatorResultsCollection sliceDatasets(String[] datasetsToSlice) throws Exception { //perform existence checks before allocating the mem for (String dataset : datasetsToSlice) if (find(datasetNamesInStorage, dataset) == -1) throw new SliceException("datasets", datasetNamesInStorage, dataset); //copy across the results EstimatorResults[][][][] subResults = new EstimatorResults[numSplits][numEstimators][datasetsToSlice.length][]; for (int s = 0; s < numSplits; s++) { for (int c = 0; c < numEstimators; c++) { for (int dts = 0; dts < datasetsToSlice.length; dts++) { int didOrig = find(datasetNamesInStorage, datasetsToSlice[dts]); //know it exists, did checks above subResults[s][c][dts] = this.allResults[s][c][didOrig]; } } } //copy across the meta info to new collection object EstimatorResultsCollection newCol = new EstimatorResultsCollection(this, subResults); newCol.setDatasets(datasetsToSlice); //setting the particular meta info sliced return newCol; } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the provided fold is returned for each split/estimator/dataset * * @param fold fold to keep * @return new EstimatorResultsCollection with results for all split/estimator/dataset, but only the fold given * @throws java.lang.Exception if the fold searched for was not loaded into this collection */ public EstimatorResultsCollection sliceFold(int fold) throws Exception { return sliceFolds(new int[] { fold }); } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the folds in the provided range are returned for each split/estimator/dataset * * @param minFolds bottom of range, inclusive * @param maxFolds top of range, exclusive * @return new EstimatorResultsCollection with results for all split/estimator/datasets, but only the fold range given * @throws java.lang.Exception if the fold range searched for was not loaded into this collection */ public EstimatorResultsCollection sliceFolds(int minFolds, int maxFolds) throws Exception { return sliceFolds(buildRange(minFolds, maxFolds)); } /** * Returns a new EstimatorResultsCollection that is identical to this one (in terms of * settings etc) aside from only the results of the folds provided are returned for each split/estimator/dataset * * @param foldsToSlice individual fold ids to keep * @return new EstimatorResultsCollection with results for all split/estimator/datasets, but only the folds given * @throws java.lang.Exception if any of the folds searched for was not loaded into this collection */ public EstimatorResultsCollection sliceFolds(int[] foldsToSlice) throws Exception { //perform existence checks before allocating the mem for (int fold : foldsToSlice) if (find(folds, fold) == -1) throw new SliceException("folds", folds, fold); //copy across the results EstimatorResults[][][][] subResults = new EstimatorResults[numSplits][numEstimators][numDatasets][foldsToSlice.length]; for (int s = 0; s < numSplits; s++) { for (int c = 0; c < numEstimators; c++) { for (int d = 0; d < numDatasets; d++) { for (int fts = 0; fts < foldsToSlice.length; fts++) { int fidOrig = find(folds, foldsToSlice[fts]); //know it exists, did checks above subResults[s][c][d][fts] = this.allResults[s][c][d][fidOrig]; } } } } //copy across the meta info to new collection object EstimatorResultsCollection newCol = new EstimatorResultsCollection(this, subResults); newCol.setFolds(foldsToSlice); //setting the particular meta info sliced return newCol; } /** * Returns the accuracy (or MSE for regression) of each result object loaded in as a large array double[split][estimator][dataset][fold] Wrapper retrieveDoubles for accuracies * @return Array [split][estimator][dataset][fold] of doubles with accuracy from each result */ public double[][][][] retrieveAccuracies() throws Exception { if (resultsType == ResultsType.CLASSIFICATION){ return retrieveDoubles(ClassifierResults.GETTER_Accuracy); } else if (resultsType == ResultsType.REGRESSION){ return retrieveDoubles(RegressorResults.GETTER_MSE); } else if (resultsType == ResultsType.CLUSTERING){ return retrieveDoubles(ClustererResults.GETTER_Accuracy); } else{ throw new Exception("Invalid ResultType."); } } /** * Given a function that extracts information in the form of a double from a results object, * returns a big array [split][estimator][dataset][fold] of that information from * every result object loaded * * todo make generic * * @param getter function that takes a EstimatorResults object, and returns a Double * @return Array [split][estimator][dataset][fold] of doubles with info from each result */ public double[][][][] retrieveDoubles(Function<EstimatorResults, Double> getter) { double[][][][] info = new double[numSplits][numEstimators][numDatasets][numFolds]; for (int i = 0; i < numSplits; i++) for (int j = 0; j < numEstimators; j++) for (int k = 0; k < numDatasets; k++) for (int l = 0; l < numFolds; l++) info[i][j][k][l] = getter.apply(allResults[i][j][k][l]); return info; } /** * Given a function that extracts information in the form of a String from a results object, * returns a big array [split][estimator][dataset][fold] of that information from * every result object loaded * * todo make generic * * @param getter function that takes a EstimatorResults object, and returns a String * @return Array [split][estimator][dataset][fold] of String with info from each result */ public String[][][][] retrieveStrings(Function<EstimatorResults, String> getter) { String[][][][] info = new String[numSplits][numEstimators][numDatasets][numFolds]; for (int i = 0; i < numSplits; i++) for (int j = 0; j < numEstimators; j++) for (int k = 0; k < numDatasets; k++) for (int l = 0; l < numFolds; l++) info[i][j][k][l] = getter.apply(allResults[i][j][k][l]); return info; } /** * Simply get all of the results in their raw/complete form. If allowMissingResults was set to true when loading results, * one or more entries may be null, otherwise each should be complete (the loading would have failed * otherwise). If cleanResults was set to true when loading results, each results object will contain the * evaluation statistics and meta info for that split/estimator/dataset/fold, but not the individual * predictions. * * @return the big EstimatorResults[split][estimator][dataset][fold] arrays in its raw form */ public EstimatorResults[][][][] retrieveResults() { return allResults; } public static void main(String[] args) throws Exception { EstimatorResultsCollection col = new EstimatorResultsCollection(); col.addEstimators(new String[] { "Logistic", "SVML", "MLP" }, "C:/JamesLPHD/CAWPEExtension/Results/"); col.setDatasets(Arrays.copyOfRange(DatasetLists.ReducedUCI, 0, 5)); col.setFolds(10); col.setSplit_Test(); EstimatorResults[][][][] res = col.load(); System.out.println(res.length); System.out.println(res[0].length); System.out.println(res[0][0].length); System.out.println(res[0][0][0].length); System.out.println(res[0][0][0][0].getAcc()); System.out.println(""); double[][][][] accs = col.retrieveAccuracies(); System.out.println(accs.length); System.out.println(accs[0].length); System.out.println(accs[0][0].length); System.out.println(accs[0][0][0].length); System.out.println(accs[0][0][0][0]); System.out.println(""); EstimatorResultsCollection subcol = col.sliceEstimator("Logistic"); EstimatorResults[][][][] subres = subcol.retrieveResults(); System.out.println(subres.length); System.out.println(subres[0].length); System.out.println(subres[0][0].length); System.out.println(subres[0][0][0].length); System.out.println(subres[0][0][0][0].getAcc()); System.out.println(""); subcol = col.sliceDataset(DatasetLists.ReducedUCI[0]); subres = subcol.retrieveResults(); System.out.println(subres.length); System.out.println(subres[0].length); System.out.println(subres[0][0].length); System.out.println(subres[0][0][0].length); System.out.println(subres[0][0][0][0].getAcc()); System.out.println(""); subcol = col.sliceFolds(new int[] { 0, 3 }); subres = subcol.retrieveResults(); System.out.println(subres.length); System.out.println(subres[0].length); System.out.println(subres[0][0].length); System.out.println(subres[0][0][0].length); System.out.println(subres[0][0][0][0].getAcc()); System.out.println(""); } }
50,481
43.952805
157
java
tsml-java
tsml-java-master/src/main/java/evaluation/storage/RegressorResults.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.storage; import fileIO.OutFile; import utilities.DebugPrinting; import java.io.File; import java.io.FileNotFoundException; import java.io.Serializable; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.function.Function; /** * This is a container class for the storage of predictions and meta-info of a * regressor on a single set of instances (for example, the test set of a particular * resample of a particular dataset). * * Predictions can be stored via addPrediction(...) or addAllPredictions(...) * Currently, the information stored about each prediction is: * - The true label value (double getTrueClassValue(index)) * - The predicted label value (double getPredClassValue(index)) * - The time taken to predict this instance id (long getPredictionTime(index)) * - An optional description of the prediction (String getPredDescription(index)) * * The meta info stored is: * [LINE 1 OF FILE] * - get/setDatasetName(String) * - get/setRegressorName(String) * - get/setSplit(String) * - get/setFoldId(String) * - get/setTimeUnit(TimeUnit) * - get/setDescription(String) * [LINE 2 OF FILE] * - get/setParas(String) * [LINE 3 OF FILE] * 1 - getAccuracy() (calculated from predictions, only settable with a suitably annoying message) * 2 - get/setBuildTime(long) * 3 - get/setTestTime(long) * 4 - get/setBenchmarkTime(long) * 5 - get/setMemory(long) * 6 - get/setErrorEstimateMethod(String) (loosely formed, e.g. cv_10) * 7 - get/setErrorEstimateTime(long) (time to form an estimate from scratch, e.g. time of cv_10) * 8 - get/setBuildPlusEstimateTime(long) (time to train on full data, AND estimate error on it) * * [REMAINING LINES: PREDICTIONS] * - trueLabelVal, predLabelVal, [empty], predTime, [empty], predDescription * * Supports reading/writing of results from/to file, in the 'classifierResults file-format' * - loadResultsFromFile(String path) * - writeFullResultsToFile(String path) * * Supports recording of timings in different time units. Nanoseconds is the default. * Also supports the calculation of various evaluative performance metrics based on the predictions (MSE, MAE, R2 etc.) * * EXAMPLE USAGE: * RegressorResults res = new RegressorResults(); * //set a particular timeunit, if using something other than nanos. Nanos recommended * //set any meta info you want to keep, e.g regressorname, datasetname... * * for (Instance inst : test) { * long startTime = //time * double dist = regressor.classifyInstance(inst); * long predTime = //time - startTime * * res.addPrediction(inst.classValue(), pred, predTime, ""); //description is optional * } * * res.finaliseResults(); //performs some basic validation, and calcs some relevant internal info * * //can now find summary scores for these predictions * //stats stored in simple public members for now * res.findAllStats(); * * //and/or save to file * res.writeFullResultsToFile(path); * * //and could then load them back in * ClassifierResults res2 = new ClassifierResults(path); * * //the are automatically finalised, however the stats are not automatically found * res2.findAllStats(); * * @author Matthew Middlehurst, adapted from ClassifierResults (James Large) */ public class RegressorResults extends EstimatorResults implements DebugPrinting, Serializable { /** * Print a message with the filename to stdout when a file cannot be loaded. * Can get very tiresome if loading thousands of files with some expected failures, * and a higher level process already summarises them, thus this option to * turn off the messages */ public static boolean printOnFailureToLoad = true; /** * Print a message when result file MSE does not match calculated MSE. * Setting this to false will stop print outs from this check. */ public static boolean mseTestPrint = true; //LINE 1: meta info, set by user // estimatorName // datasetName // split // foldID // timeUnit // description //LINE 2: classifier setup/info, parameters. precise format is up to user. /** * For now, user dependent on the formatting of this string, and really, the contents of it. * It is notionally intended to contain the parameters of the clusterer used to produce the * attached predictions, but could also store other things as well. */ private String paras = "No parameter info"; //LINE 3: acc, buildTime, testTime, memoryUsage //simple summarative performance stats. /** * Calculated from the stored predictions, cannot be explicitly set by user */ public double mse = -1; // buildTime // testTime // benchmarkTime // memoryUsage /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * See the experiments parameter trainEstimateMethod * * This defines the method and parameter of train estimate used, if one was done */ private String errorEstimateMethod = ""; /** * todo initially intended as a temporary measure, but might stay here until a switch * over to json etc is made * * This defines the total time taken to estimate the classifier's error. This currently * does not mean anything for classifiers implementing the TrainAccuracyEstimate interface, * and as such would need to set this themselves (but likely do not) * * For those classifiers that do not implement that, ClassifierExperiments.findOrSetupTrainEstimate(...) will set this value * as a wrapper around the entire evaluate call for whichever errorEstimateMethod is being used */ private long errorEstimateTime = -1; /** * This measures the total time to build the classifier on the train data * AND to estimate the classifier's error on the same train data. For classifiers * that do not estimate their own error in some way during the build process, * this will simply be the buildTime and the errorEstimateTime added together. * * For classifiers that DO estimate their own error, buildPlusEstimateTime may * be anywhere between buildTime and buildTime+errorEstimateTime. Some or all of * the work needed to form an estimate (which the field errorEstimateTime measures from scratch) * may have already been accounted for by the buildTime */ private long buildPlusEstimateTime = -1; //REMAINDER OF THE FILE - 1 prediction per line //raw performance data. currently just give parallel arrays private ArrayList<Double> trueLabelValues; private ArrayList<Double> predLabelValues; private ArrayList<Long> predTimes; private ArrayList<String> predDescriptions; //inferred/supplied dataset meta info private int numInstances; //calculated performance metrics //accuracy can be re-calced, as well as stored on line three in files public double rmse; public double mae; public double r2; public double mape; //self-management flags /** * essentially controls whether a RegressorResults object can have finaliseResults(trueLabelVals) * called upon it. In theory, every class using the RegressorResults object should make new * instantiations of it each time a set of results is being computed, and so this is not needed */ private boolean finalised = false; private boolean allStatsFound = false; /** * System.nanoTime() can STILL return zero on some tiny datasets with simple classifiers, * because it does not have enough precision. This flag, if true, will allow timings * of zero, under the partial assumption/understanding from the user that times under * ~200 nanoseconds can be equated to 0. */ private boolean errorOnTimingOfZero = false; //functional getters to retrieve info from a regressorresults object, initialised/stored here for convenience. //these are currently on used in PerformanceMetric.java, can take any results type as a hack to allow other //results in evaluation. public static final Function<EstimatorResults, Double> GETTER_MSE = (EstimatorResults cr) -> ((RegressorResults)cr).mse; public static final Function<EstimatorResults, Double> GETTER_RMSE = (EstimatorResults cr) -> ((RegressorResults)cr).rmse; public static final Function<EstimatorResults, Double> GETTER_MAE = (EstimatorResults cr) -> ((RegressorResults)cr).mae; public static final Function<EstimatorResults, Double> GETTER_R2 = (EstimatorResults cr) -> ((RegressorResults)cr).r2; public static final Function<EstimatorResults, Double> GETTER_MAPE = (EstimatorResults cr) -> ((RegressorResults)cr).mape; /********************************* * * CONSTRUCTORS * */ /** * Create an empty RegressorResults object. */ public RegressorResults() { trueLabelValues = new ArrayList<>(); predLabelValues = new ArrayList<>(); predTimes = new ArrayList<>(); predDescriptions = new ArrayList<>(); finalised = false; } /** * Load a RegressorResults object from the file at the specified path */ public RegressorResults(String filePathAndName) throws Exception { loadResultsFromFile(filePathAndName); } /** * Create a RegressorResults object with complete predictions (equivalent to addAllPredictions()). The results are * FINALISED after initialisation. Meta info such as regressor name, datasetname... can still be set after construction. * * The descriptions array argument may be null, in which case the descriptions are stored as empty strings. * * All other arguments are required in full, however */ public RegressorResults(double[] trueLabelVals, double[] predictions, long[] predTimes, String[] descriptions) throws Exception { this.trueLabelValues = new ArrayList<>(); this.predLabelValues = new ArrayList<>(); this.predTimes = new ArrayList<>(); this.predDescriptions = new ArrayList<>(); addAllPredictions(trueLabelVals, predictions, predTimes, descriptions); finaliseResults(); } /*********************** * * DATASET META INFO * * */ public int numInstances() { if (numInstances <= 0) inferNumInstances(); return numInstances; } private void inferNumInstances() { this.numInstances = predLabelValues.size(); } public void turnOffZeroTimingsErrors() { errorOnTimingOfZero = false; } public void turnOnZeroTimingsErrors() { errorOnTimingOfZero = true; } /***************************** * * LINE 2 GETS/SETS * */ public String getParas() { return paras; } public void setParas(String paras) { this.paras = paras; } /***************************** * * LINE 3 GETS/SETS * */ @Override public double getAcc() { if (mse < 0) calculateMSE(); return mse; } private void calculateMSE() { if (trueLabelValues == null || trueLabelValues.isEmpty() || trueLabelValues.get(0) == -1) { System.out.println("**getAcc():calculateAcc() no true class values supplied yet, cannot calculate accuracy"); return; } int size = predLabelValues.size(); double sum = 0; for (int i = 0; i < size; i++) { sum += Math.abs(Math.pow(trueLabelValues.get(i) - predLabelValues.get(i), 2)); } mse = sum / size; } public long getBuildTime() { return buildTime; } public long getBuildTimeInNanos() { return timeUnit.toNanos(buildTime); } /** * @throws Exception if buildTime is less than 1 */ public void setBuildTime(long buildTime) { if (errorOnTimingOfZero && buildTime < 1) throw new RuntimeException("Build time passed has invalid value, " + buildTime + ". If greater resolution" + " is needed, " + "use nano seconds (e.g System.nanoTime()) and set the TimeUnit of the classifierResults object to nanoseconds.\n\n" + "If you are using nanoseconds but STILL getting this error, read the javadoc for and use turnOffZeroTimingsErrors() " + "for this call"); this.buildTime = buildTime; } public long getTestTime() { return testTime; } public long getTestTimeInNanos() { return timeUnit.toNanos(testTime); } /** * @throws Exception if testTime is less than 1 */ public void setTestTime(long testTime) throws Exception { if (errorOnTimingOfZero && testTime < 1) throw new Exception("Test time passed has invalid value, " + testTime + ". If greater resolution is needed, " + "use nano seconds (e.g System.nanoTime()) and set the TimeUnit of the classifierResults object to nanoseconds.\n\n" + "If you are using nanoseconds but STILL getting this error, read the javadoc for and use turnOffZeroTimingsErrors() " + "for this call"); this.testTime = testTime; } public long getMemory() { return memoryUsage; } public void setMemory(long memory) { this.memoryUsage = memory; } public long getBenchmarkTime() { return benchmarkTime; } public void setBenchmarkTime(long benchmarkTime) { this.benchmarkTime = benchmarkTime; } public String getErrorEstimateMethod() { return errorEstimateMethod; } public void setErrorEstimateMethod(String errorEstimateMethod) { this.errorEstimateMethod = errorEstimateMethod; } public long getErrorEstimateTime() { return errorEstimateTime; } public long getErrorEstimateTimeInNanos() { return timeUnit.toNanos(errorEstimateTime); } public void setErrorEstimateTime(long errorEstimateTime) { this.errorEstimateTime = errorEstimateTime; } public long getBuildPlusEstimateTime() { return buildPlusEstimateTime; } public long getBuildPlusEstimateTimeInNanos() { return timeUnit.toNanos(buildPlusEstimateTime); } public void setBuildPlusEstimateTime(long buildPlusEstimateTime) { this.buildPlusEstimateTime = buildPlusEstimateTime; } /**************************** * * PREDICTION STORAGE * */ /** * Will update the internal prediction info using the values passed. * * The description argument may be null, however all other arguments are required in full * * The true label is missing, however can be added in one go later with the * method finaliseResults(double[] trueClassVals) */ public void addPrediction(double predictedClass, long predictionTime, String description) throws RuntimeException { predLabelValues.add(predictedClass); predTimes.add(predictionTime); if (testTime == -1) testTime = predictionTime; else testTime += predictionTime; if (description == null) predDescriptions.add(""); else predDescriptions.add(description); numInstances++; } /** * Will update the internal prediction info using the values passed. * * The description argument may be null, however all other arguments are required in full */ public void addPrediction(double trueClassVal, double predictedClass, long predictionTime, String description) throws RuntimeException { addPrediction(predictedClass,predictionTime,description); trueLabelValues.add(trueClassVal); } /** * Adds all the prediction info onto this RegressorResults object. Does NOT finalise the results, * such that (e.g) predictions from multiple dataset splits can be added to the same object if wanted * * The description argument may be null, however all other arguments are required in full */ public void addAllPredictions(double[] trueLabelVals, double[] predictions, long[] predTimes, String[] descriptions) throws RuntimeException { assert(trueLabelVals.length == predictions.length); assert(trueLabelVals.length == predTimes.length); if (descriptions != null) assert(trueLabelVals.length == descriptions.length); for (int i = 0; i < trueLabelVals.length; i++) { if (descriptions == null) addPrediction(trueLabelVals[i], predictions[i], predTimes[i], null); else addPrediction(trueLabelVals[i], predictions[i], predTimes[i], descriptions[i]); } } /** * Adds all the prediction info onto this RegressorResults object. Does NOT finalise the results, * such that (e.g) predictions from multiple dataset splits can be added to the same object if wanted * * True label values can later be supplied (ALL IN ONE GO, if working to the above example usage..) using * finaliseResults(double[] testClassVals) * * The description argument may be null, however all other arguments are required in full */ public void addAllPredictions(double[] predictions,long[] predTimes, String[] descriptions ) throws RuntimeException { assert(predictions.length == predTimes.length); if (descriptions != null) assert(predictions.length == descriptions.length); for (int i = 0; i < predictions.length; i++) { if (descriptions == null) addPrediction(predictions[i], predTimes[i], ""); else addPrediction(predictions[i], predTimes[i], descriptions[i]); } } /** * Will perform some basic validation to make sure that everything is here * that is expected, and compute the accuracy etc ready for file writing. * * Typical usage: results.finaliseResults(instances.attributeToDoubleArray(instances.classIndex())) */ public void finaliseResults(double[] testLabelVals) throws Exception { if (finalised) { System.out.println("finaliseResults(double[] testLabelVals): Results already finalised, skipping re-finalisation"); return; } if (testLabelVals.length != predLabelValues.size()) throw new Exception("finaliseTestResults(double[] testLabelVals): Number of predictions " + "made and number of true class values passed do not match"); trueLabelValues = new ArrayList<>(); for(double d:testLabelVals) trueLabelValues.add(d); finaliseResults(); } /** * Will perform some basic validation to make sure that everything is here * that is expected, and compute the accuracy etc ready for file writing. * * You can use this method, instead of the version that takes the double[] testLabelVals * as an argument, if you've been storing predictions via the addPrediction overload * that takes the true class value of each prediction. */ public void finaliseResults() throws Exception { if (finalised) { printlnDebug("finaliseResults(): Results already finalised, skipping re-finalisation"); return; } if (numInstances <= 0) inferNumInstances(); if (predLabelValues == null || predLabelValues.isEmpty()) throw new Exception("finaliseTestResults(): no predictions stored for this module"); calculateMSE(); finalised = true; } /****************************** * * RAW DATA ACCESSORS * * getAsList, getAsArray, and getSingleElement of the four lists describing predictions * */ /** * */ public ArrayList<Double> getTrueLabelVals() { return trueLabelValues; } public double[] getTrueLabelValsAsArray(){ double[] d=new double[trueLabelValues.size()]; int i=0; for(double x: trueLabelValues) d[i++]=x; return d; } public double getTrueLabelValue(int index){ return trueLabelValues.get(index); } public ArrayList<Double> getPredLabelVals(){ return predLabelValues; } public double[] getPredLabelValsAsArray(){ double[] d=new double[predLabelValues.size()]; int i=0; for(double x: predLabelValues) d[i++]=x; return d; } public double getPredLabelValue(int index){ return predLabelValues.get(index); } public ArrayList<Long> getPredictionTimes() { return predTimes; } public long[] getPredictionTimesAsArray() { long[] l=new long[predTimes.size()]; int i=0; for(long x:predTimes) l[i++]=x; return l; } public long getPredictionTime(int index) { return predTimes.get(index); } public long getPredictionTimeInNanos(int index) { return timeUnit.toNanos(getPredictionTime(index)); } public ArrayList<String> getPredDescriptions() { return predDescriptions; } public String[] getPredDescriptionsAsArray() { String[] ds=new String[predDescriptions.size()]; int i=0; for(String d:predDescriptions) ds[i++]=d; return ds; } public String getPredDescription(int index) { return predDescriptions.get(index); } @Override public void cleanPredictionInfo() { predLabelValues = null; trueLabelValues = null; predTimes = null; predDescriptions = null; } /******************************** * * FILE READ/WRITING * */ public static boolean exists(File file) { return file.exists() && file.length() > 0; } public static boolean exists(String path) { return exists(new File(path)); } /** * Reads and STORES the prediction in this RegressorResults object * * INCREMENTS NUMINSTANCES * * [true],[pred], ,[predTime], ,[description until end of line, may have commas in it] */ private void instancePredictionFromString(String predLine) { String[] split=predLine.split(","); //collect actual/predicted label double trueLabelVal = Double.parseDouble(split[0].trim()); double predLabelVal = Double.parseDouble(split[1].trim()); //collect timings long predTime = -1; final int timingInd = 3; //actual, predicted, space, timing if (split.length > timingInd) predTime = Long.parseLong(split[timingInd].trim()); //collect description String description = ""; final int descriptionInd = timingInd + 1 + 1; //actual, predicted, space, timing, space, description if (split.length > descriptionInd) { description = split[descriptionInd]; //no reason currently why the description passed cannot have commas in it, //might be a natural way to separate it in to different parts. //description reall just fills up the remainder of the line. for (int i = descriptionInd+1; i < split.length; i++) description += "," + split[i]; } addPrediction(trueLabelVal, predLabelVal, predTime, description); } private void instancePredictionsFromScanner(Scanner in) throws Exception { while (in.hasNext()) { String line = in.nextLine(); //may be trailing empty lines at the end of the file if (line == null || line.equals("")) break; instancePredictionFromString(line); } calculateMSE(); } /** * [true],[pred], ,[predTime], ,[description until end of line, may have commas in it] */ private String instancePredictionToString(int i) { StringBuilder sb = new StringBuilder(); sb.append(trueLabelValues.get(i).intValue()).append(","); sb.append(predLabelValues.get(i).intValue()); //timing sb.append(",,").append(predTimes.get(i)); //<empty space>, timing //description sb.append(",,").append(predDescriptions.get(i)); //<empty space>, description return sb.toString(); } public String instancePredictionsToString() throws Exception{ if (trueLabelValues == null || trueLabelValues.size() == 0 || trueLabelValues.get(0) == -1) throw new Exception("No true class value stored, call finaliseResults(double[] trueClassVal)"); if(numInstances() > 0 && (predLabelValues.size() == trueLabelValues.size())){ StringBuilder sb=new StringBuilder(""); for(int i=0;i<numInstances();i++){ sb.append(instancePredictionToString(i)); if(i<numInstances()-1) sb.append("\n"); } return sb.toString(); } else return "No Instance Prediction Information"; } @Override public String toString() { return generateFirstLine(); } public String statsToString() { String s = ""; s += "Mean Squared Error: " + mse; s += "\nMean Absolute error: " + mae; s += "\nR² Score: " + r2; s += "\nMean Absolute Percentage Error: " + mape; return s; } public String writeFullResultsToString() throws Exception { finaliseResults(); StringBuilder st = new StringBuilder(); st.append(generateFirstLine()).append("\n"); st.append(generateSecondLine()).append("\n"); st.append(generateThirdLine()).append("\n"); st.append(instancePredictionsToString()); return st.toString(); } public void writeFullResultsToFile(String path) throws Exception { OutFile out = null; try { out = new OutFile(path); out.writeString(writeFullResultsToString()); } catch (Exception e) { throw new Exception("Error writing results file.\n" + "Outfile most likely didnt open successfully, probably directory doesnt exist yet.\n" + "Path: " + path +"\nError: "+ e); } finally { if (out != null) out.closeFile(); } } private void parseFirstLine(String line) { String[] parts = line.split(","); if (parts.length == 0) return; datasetName = parts[0]; estimatorName = parts[1]; split = parts[2]; foldID = Integer.parseInt(parts[3]); setTimeUnitFromString(parts[4]); //nothing stopping the description from having its own commas in it, just read until end of line for (int i = 5; i < parts.length; i++) description += "," + parts[i]; } private String generateFirstLine() { return datasetName + "," + estimatorName + "," + split + "," + foldID + "," + getTimeUnitAsString() + "," + description; } private void parseSecondLine(String line) { paras = line; } private String generateSecondLine() { return paras; } /** * Returns the test acc reported on this line, for comparison with acc * computed later to assert they align. Accuracy has always been reported * on this line in this file format, so fair to assume if this fails * then the file is simply malformed */ private double parseThirdLine(String line) { String[] parts = line.split(","); mse = Double.parseDouble(parts[0]); buildTime = Long.parseLong(parts[1]); testTime = Long.parseLong(parts[2]); benchmarkTime = Long.parseLong(parts[3]); memoryUsage = Long.parseLong(parts[4]); errorEstimateMethod = parts[5]; errorEstimateTime = Long.parseLong(parts[6]); buildPlusEstimateTime = Long.parseLong(parts[7]); return mse; } private String generateThirdLine() { String res = mse + "," + buildTime + "," + testTime + "," + benchmarkTime + "," + memoryUsage + "," + errorEstimateMethod + "," + errorEstimateTime + "," + buildPlusEstimateTime; return res; } private String getTimeUnitAsString() { return timeUnit.name(); } private void setTimeUnitFromString(String str) { timeUnit = TimeUnit.valueOf(str); } public void loadResultsFromFile(String path) throws FileNotFoundException, Exception { try { //init trueLabelValues = new ArrayList<>(); predLabelValues = new ArrayList<>(); predTimes = new ArrayList<>(); predDescriptions = new ArrayList<>(); numInstances = 0; mse = -1; buildTime = -1; testTime = -1; memoryUsage = -1; //check file exists File f = new File(path); if (!(f.exists() && f.length() > 0)) throw new FileNotFoundException("File " + path + " NOT FOUND"); Scanner inf = new Scanner(f); //parse meta infos parseFirstLine(inf.nextLine()); parseSecondLine(inf.nextLine()); double reportedTestMSE = parseThirdLine(inf.nextLine()); //parse predictions instancePredictionsFromScanner(inf); //acts as a basic form of verification, does the mse reported on line 3 align with //the mse calculated while reading predictions double eps = 1.e-8; if (mseTestPrint && Math.abs(reportedTestMSE - mse) > eps) { System.out.println("Calculated MSE (" + mse + ") differs from written MSE (" + reportedTestMSE + ") " + "by more than eps (" + eps + "). File = " + path + ". numinstances = " + numInstances + "."); } finalised = true; inf.close(); } catch (FileNotFoundException fnf) { if (printOnFailureToLoad) System.out.println("File " + path + " NOT FOUND"); throw fnf; } catch (Exception ex) { if (printOnFailureToLoad) System.out.println("File " + path + " FAILED TO LOAD"); throw ex; } } /****************************************** * * METRIC CALCULATIONS * */ /** * Will calculate all the metrics that can be found from the prediction information * stored in this object. Will NOT call finaliseResults(..), and finaliseResults(..) * not have been called elsewhere, however if it has not been called then true * class values must have been supplied while storing predictions. * * This is to allow iterative calculation of the metrics (in e.g. batches * of added predictions) */ public void findAllStats(){ //meta info if (numInstances <= 0) inferNumInstances(); if (mse < 0) calculateMSE(); rmse = findRMSE(); mae = findMAE(); r2 = findR2(); mape = findMAPE(); medianPredTime = findMedianPredTime(predTimes); allStatsFound = true; } public double findRMSE() { if (mse < 0) calculateMSE(); return Math.sqrt(mse); } public double findMAE() { int size = predLabelValues.size(); double sum = 0; for (int i = 0; i < size; i++) { sum += Math.abs(trueLabelValues.get(i) - predLabelValues.get(i)); } return sum / size; } public double findR2() { int size = predLabelValues.size(); double labelAverage = 0; for (int i = 0; i < size; i++) { labelAverage += trueLabelValues.get(i); } labelAverage /= size; double sum1 = 0; for (int i = 0; i < size; i++) { sum1 += Math.pow(trueLabelValues.get(i) - predLabelValues.get(i), 2); } double sum2 = 0; for (int i = 0; i < size; i++) { sum2 += Math.pow(trueLabelValues.get(i) - labelAverage, 2); } if (sum2 == 0) sum2 = 2.22044605e-16; return 1 - sum1 / sum2; } public double findMAPE() { int size = predLabelValues.size(); double sum = 0; for (int i = 0; i < size; i++) { sum += Math.abs(trueLabelValues.get(i) - predLabelValues.get(i)) / Math.max(2.22044605e-16, Math.abs(trueLabelValues.get(i))); } return sum / size; } /** * Will calculate all the metrics that can be found from the prediction information * stored in this object, UNLESS this object has been finalised (finaliseResults(..)) AND * has already had it's stats found (findAllStats()), e.g. if it has already been called * by another process. * * In this latter case, this method does nothing. */ @Override public void findAllStatsOnce(){ if (finalised && allStatsFound) { printlnDebug("Stats already found, ignoring findAllStatsOnce()"); return; } else { findAllStats(); } } /** * Concatenates the predictions of regressors made on different folds on the data * into one results object * * If RegressorResults ever gets split into separate classes for prediction and meta info, * this obviously gets cleaned up a lot * * @param rresults RegressorResults[fold] * @return single RegressorResults object */ public static RegressorResults concatenateRegressorResults( /*fold*/ RegressorResults[] rresults) throws Exception { return concatenateRegressorResults(new RegressorResults[][]{rresults})[0]; } /** * Concatenates the predictions of regressors made on different folds on the data * into one results object per regressor. * * If RegressorResults ever gets split into separate classes for prediction and meta info, * this obviously gets cleaned up a lot * * @param rresults RegressorResults[regressor][fold] * @return RegressorResults[regressor] */ public static RegressorResults[] concatenateRegressorResults( /*regressor*/ /*fold*/ RegressorResults[][] rresults) throws Exception { RegressorResults[] concatenatedResults = new RegressorResults[rresults.length]; for (int regressorid = 0; regressorid < rresults.length; regressorid++) { if (rresults[regressorid].length == 1) { concatenatedResults[regressorid] = rresults[regressorid][0]; } else { RegressorResults newCres = new RegressorResults(); for (int foldid = 0; foldid < rresults[regressorid].length; foldid++) { RegressorResults foldCres = rresults[regressorid][foldid]; for (int predid = 0; predid < foldCres.numInstances(); predid++) { newCres.addPrediction(foldCres.getTrueLabelValue(predid), foldCres.getPredLabelValue(predid), foldCres.getPredictionTime(predid), foldCres.getPredDescription(predid)); } } concatenatedResults[regressorid] = newCres; } } return concatenatedResults; } public static void main(String[] args) throws Exception { RegressorResults cr = new RegressorResults(); Collections.addAll(cr.trueLabelValues, 3., -0.5, 2., 7., -2., -2., -2., 1., 10., 1e6); Collections.addAll(cr.predLabelValues, 2.5, 0.0, 2., 8., -2., -2., -2., 0.9, 15., 1.2e6); Collections.addAll(cr.predTimes, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L); cr.numInstances = 15; cr.findAllStats(); System.out.println(cr.statsToString()); } }
37,284
33.395756
191
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/ParameterResults.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.tuning; import evaluation.storage.ClassifierResults; /** * Simple container class for a parameter set and accompanying classifierResults, * plus optionally a score which is used to order ParameterResults objects. * * Score defaults to the accuracy contained in the results object if not supplied * * @author James Large (james.large@uea.ac.uk) */ public class ParameterResults implements Comparable<ParameterResults> { public ParameterSet paras; public ClassifierResults results; public double score; /** * Defaults to scoring by accuracy. */ public ParameterResults(ParameterSet parameterSet, ClassifierResults results) { this.paras = parameterSet; this.results = results; this.score = results.getAcc(); } public ParameterResults(ParameterSet parameterSet, ClassifierResults results, double score) { this.paras = parameterSet; this.results = results; this.score = score; } @Override public int compareTo(ParameterResults other) { return Double.compare(this.score, other.score); } }
1,894
34.092593
97
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/ParameterSet.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.tuning; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; /** * * @author James Large (james.large@uea.ac.uk) */ public class ParameterSet { public Map<String, String> parameterSet = new HashMap<>(); private static String startParaLineDelimiter = "parasStart"; private static String endParaLineDelimiter = "parasEnd"; public int size() { return parameterSet.size(); } public boolean contains(String paraName) { return parameterSet.containsKey(paraName); } public Set<Entry<String,String>> getAllParameters() { return parameterSet.entrySet(); } public String getParameterValue(String paraName) { return parameterSet.get(paraName); } public void addParameter(String paraName, String paraValue) { parameterSet.put(paraName, paraValue); } @Override public String toString() { StringBuilder sb = new StringBuilder("{"); for (Map.Entry<String, String> para : parameterSet.entrySet()) sb.append(para.getKey() + ": " + para.getValue() + ", "); sb.append("}"); return sb.toString(); } public String[] toOptionsList() { String[] ps = new String[parameterSet.size() * 2]; int i = 0; for (Map.Entry<String, String> entry : parameterSet.entrySet()) { ps[i] = "-" + entry.getKey(); ps[i+1] = entry.getValue(); i+=2; } return ps; } /** * Assumes that this is just a list of flag-value pairs, e.g * [ flag1, value1, flag2, value2....] * and does not contain any independent flags (maybe representing that a boolean * flag should be set to true, for example), and that all the flag/value pairs * are parameter to be read in (e.g no debug flags) */ public void readOptionsList(String[] options) { parameterSet = new HashMap<String, String>(); for (int i = 0; i < options.length; i+=2) parameterSet.put(options[i], options[i+1]); //todo error checking etc } public static String toFileNameString(int[] inds) { StringBuilder sb = new StringBuilder(); sb.append(inds[0]); for (int i = 1; i < inds.length; i++) sb.append("_").append(inds[i]); return sb.toString(); } public String toClassifierResultsParaLine() { return toClassifierResultsParaLine(false); } public String toClassifierResultsParaLine(boolean includeStartEndMarkers) { StringBuilder sb = new StringBuilder(); if (includeStartEndMarkers) sb.append(startParaLineDelimiter).append(","); boolean first = true; for (Map.Entry<String, String> entry : parameterSet.entrySet()) { if (first) { //no initial comma sb.append(entry.getKey()).append(",").append(entry.getValue()); first = false; } else sb.append(",").append(entry.getKey()).append(",").append(entry.getValue()); } if (includeStartEndMarkers) sb.append(",").append(endParaLineDelimiter); return sb.toString(); } /** * Assumes that this is just a list of flag-value pairs, e.g * flag1, value1, flag2, value2.... */ public void readClassifierResultsParaLine(String line) { parameterSet = new HashMap<>(); String[] parts = line.split(","); int length = parts.length; if (parts.length % 2 == 1) { //odd number of parts, assumed to be an empty string as the last entry, //trailing comma. todo revisit this if there are problems length -= 1; } //note: i+=2 for (int i = 0; i < parts.length; i+=2) { String key = parts[i]; String value = parts[i+1]; parameterSet.put(key, value); } } /** * Will strip the line to only the parts between the parameterset start and end * delimeters and interpret those, useful if the classifierresults parameter line * has other info like buildtimes,accs,etc. */ public void readClassifierResultsParaLine(String line, boolean startEndMarkersIncluded) { if (startEndMarkersIncluded) { String[] parts = line.split(","); StringBuilder sb = new StringBuilder(); boolean reading = false; for (String part : parts) { if (part.equals(startParaLineDelimiter)) { reading = true; continue; } if (part.equals(endParaLineDelimiter)) break; if (reading) sb.append(part + ","); //else // ignore. } line = sb.toString(); } readClassifierResultsParaLine(line); } }
6,057
30.717277
94
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/ParameterSpace.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.tuning; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; /** * * Wraps/contains what is essentially a Map<String, List<String>>, which maps parameter names * to lists of possible values (stored as strings). The names should align to the names of different * set-able options via the setOptions(String[]) method of the classifier to be tuned. * * @author James Large (james.large@uea.ac.uk) */ public class ParameterSpace implements Iterable<Entry<String, List<String>>>{ public Map<String, List<String>> parameterLists = new HashMap<>(); public int numParas() { return parameterLists.size(); } public int numUniqueParameterSets() { int total = 1; for (Map.Entry<String, List<String>> entry : parameterLists.entrySet()) total *= entry.getValue().size(); return total; } public List<String> getValues(String key) { return parameterLists.get(key); } /** * Adder for *list* of any object (including string) * *arrays* of object will use this method by making the call * space.addParater(paraName, Arrays.asList(values)); */ public void addParameter(String paraName, List<? extends Object> paraValues) { List<String> stringValues = new ArrayList<>(paraValues.size()); for (int i = 0; i < paraValues.size(); i++) stringValues.add(paraValues.get(i).toString()); parameterLists.put(paraName, stringValues); } /** * Adder for *array* of strings themselves */ public void addParameter(String paraName, String[] paraValues) { parameterLists.put(paraName, Arrays.asList(paraValues)); } //Adders for the primitive types public void addParameter(String paraName, int[] paraValues) { List<String> stringValues = new ArrayList<>(paraValues.length); for (int i = 0; i < paraValues.length; i++) stringValues.add(paraValues[i]+""); parameterLists.put(paraName, stringValues); } public void addParameter(String paraName, double[] paraValues) { List<String> stringValues = new ArrayList<>(paraValues.length); for (int i = 0; i < paraValues.length; i++) stringValues.add(paraValues[i]+""); parameterLists.put(paraName, stringValues); } public void addParameter(String paraName, float[] paraValues) { List<String> stringValues = new ArrayList<>(paraValues.length); for (int i = 0; i < paraValues.length; i++) stringValues.add(paraValues[i]+""); parameterLists.put(paraName, stringValues); } public void addParameter(String paraName, boolean[] paraValues) { List<String> stringValues = new ArrayList<>(paraValues.length); for (int i = 0; i < paraValues.length; i++) stringValues.add(paraValues[i]+""); parameterLists.put(paraName, stringValues); } public void addParameter(String paraName, long[] paraValues) { List<String> stringValues = new ArrayList<>(paraValues.length); for (int i = 0; i < paraValues.length; i++) stringValues.add(paraValues[i]+""); parameterLists.put(paraName, stringValues); } public void addParameter(String paraName, short[] paraValues) { List<String> stringValues = new ArrayList<>(paraValues.length); for (int i = 0; i < paraValues.length; i++) stringValues.add(paraValues[i]+""); parameterLists.put(paraName, stringValues); } @Override public Iterator<Entry<String, List<String>>> iterator() { return parameterLists.entrySet().iterator(); } @Override public String toString() { StringBuilder sb = new StringBuilder("{\n"); for (Map.Entry<String, List<String>> paras : parameterLists.entrySet()) { sb.append("\t" + paras.getKey() + ": [ "); for (String val : paras.getValue()) sb.append(val + ", "); sb.append("]\n"); } sb.append("}"); return sb.toString(); } }
5,026
36.514925
101
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/Tuner.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.tuning; import evaluation.evaluators.CrossValidationEvaluator; import evaluation.storage.ClassifierResults; import evaluation.evaluators.Evaluator; import evaluation.storage.EstimatorResults; import evaluation.tuning.searchers.GridSearcher; import evaluation.tuning.searchers.ParameterSearcher; import experiments.data.DatasetLoading; import java.io.File; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.function.Function; import utilities.FileHandlingTools; import utilities.InstanceTools; import machine_learning.classifiers.SaveEachParameter; import weka.classifiers.AbstractClassifier; import weka.classifiers.functions.SMO; import weka.classifiers.functions.supportVector.PolyKernel; import weka.core.Instances; import tsml.classifiers.Checkpointable; import tsml.classifiers.TrainTimeContractable; /** * * @author James Large (james.large@uea.ac.uk) */ public class Tuner implements SaveEachParameter,Checkpointable, TrainTimeContractable { //Main 3 design choices. private ParameterSearcher searcher; //default = new GridSearcher(); private Evaluator evaluator; //default = new CrossValidationEvaluator(); private Function<EstimatorResults, Double> evalMetric; //default = ClassifierResults.GETTER_Accuracy; private ParameterResults bestParaSetAndResults = null; private int seed; private String classifierName; //interpreted from simpleClassName(), maybe have getter setter later private String datasetName; //interpreted from relationName(), maybe have getter setter later ////////// start interface variables //we're implementing CheckpointClassifier AND SaveEachParameter for now, however for this class checkpointing is //identical to SaveEachParamter, we just implicitely checkpoint after each parameterSet evaluation private String parameterSavingPath = null; //SaveEachParameter //CheckpointClassifier private boolean saveParameters = false; //SaveEachParameter //CheckpointClassifier long trainContractTimeNanos; //TrainTimeContractClassifier //note, leaving in nanos for max fidelity, max val of long = 2^64-1 = 586 years in nanoseconds boolean trainTimeContract = false; //TrainTimeContractClassifier ////////// end interface variables private boolean includeMarkersInParaLine = true; /** * if true, the base classifier will be cloned in order to evaluate each parameter set * this will prevent any potentially un-handled changes to the classifiers' state after * the previous parameter build/eval affecting the next one. * * if you know that the classifier either has no or correctly re-instantiates any data * that would effect consecutive builds on the same classifier instance, just leave this as * false to save mem/time */ boolean cloneClassifierForEachParameterEval = false; /** * if true, the dataset will be cloned in order to evaluate each parameter set * this will prevent any potentially un-handled changes to the dataset caused by the classifier * after each parameter build/eval * * if you know that the classifier does not edit the original data (as every classifier should not...) * just leave this as false to save mem/time */ boolean cloneTrainSetForEachParameterEval = false; public Tuner() { this(new CrossValidationEvaluator()); } public Tuner(Evaluator evaluator) { this.searcher = new GridSearcher(); this.evaluator = evaluator; this.evalMetric = ClassifierResults.GETTER_Accuracy; setSeed(0); } //handled by the interface methods now // public String getParameterSavingPath() { // return parameterSavingPath; // } // // public void setParameterSavingPath(String parameterSavingPath) { // this.parameterSavingPath = parameterSavingPath; // } public boolean getCloneTrainSetForEachParameterEval() { return cloneTrainSetForEachParameterEval; } public void setCloneTrainSetForEachParameterEval(boolean cloneTrainSetForEachParameterEval) { this.cloneTrainSetForEachParameterEval = cloneTrainSetForEachParameterEval; } public Instances cloneDataIfNeeded(Instances data) { if (cloneTrainSetForEachParameterEval) return new Instances(data); else return data; } public boolean getCloneClassifierForEachParameterEval() { return cloneClassifierForEachParameterEval; } public void setCloneClassifierForEachParameterEval(boolean cloneClassifierForEachParameterEval) { this.cloneClassifierForEachParameterEval = cloneClassifierForEachParameterEval; } public AbstractClassifier cloneClassifierIfNeeded(AbstractClassifier classifier) throws Exception { if (cloneClassifierForEachParameterEval) { //for some reason, the (abstract classifiers)' copy method returns a (classifier interface) reference... return (AbstractClassifier)AbstractClassifier.makeCopy(classifier); } else { //just reuse the same instance of the classifier, assume that no info //that from the previous build/eval affects this one. //potentially saves a lot of memory/time etc. return classifier; } } public int getSeed() { return seed; } public void setSeed(int seed) { this.seed = seed; searcher.setSeed(seed); evaluator.setSeed(seed); } public boolean getSaveParameters() { return saveParameters; } public void setSaveParameters(boolean saveParameters) { this.saveParameters = saveParameters; } public boolean getIncludeMarkersInParaLine() { return includeMarkersInParaLine; } public void setIncludeMarkersInParaLine(boolean includeMarkersInParaLine) { this.includeMarkersInParaLine = includeMarkersInParaLine; } public ParameterSearcher getSearcher() { return searcher; } public void setSearcher(ParameterSearcher searcher) { this.searcher = searcher; } public Evaluator getEvaluator() { return evaluator; } public void setEvaluator(Evaluator evaluator) { this.evaluator = evaluator; } public Function<EstimatorResults, Double> getEvalMetric() { return evalMetric; } public void setEvalMetric(Function<EstimatorResults, Double> evalMetric) { this.evalMetric = evalMetric; } public ClassifierResults evaluateParameterSetByIndex(AbstractClassifier baseClassifier, Instances trainSet, ParameterSpace parameterSpace, int parameterIndex) throws Exception { classifierName = baseClassifier.getClass().getSimpleName(); datasetName = trainSet.relationName(); searcher.setParameterSpace(parameterSpace); Iterator<ParameterSet> iter = searcher.iterator(); //iterate up to the specified parameter int id = 0; while (iter.hasNext()) { ParameterSet pset = iter.next(); if (id++ == parameterIndex) { //para found, evaluate it and return the results ClassifierResults results = evaluateParameterSet(baseClassifier, trainSet, pset); return results; } } return null; //todo, this should probs be an exception throw instead, tbd } public ClassifierResults evaluateParameterSet(AbstractClassifier baseClassifier, Instances trainSet, ParameterSet parameterSet) throws Exception { Instances data = cloneDataIfNeeded(trainSet); AbstractClassifier classifier = cloneClassifierIfNeeded(baseClassifier); String[] options = parameterSet.toOptionsList(); classifier.setOptions(options); ClassifierResults results = evaluator.evaluate(classifier, data); results.setEstimatorName("TunedClassifier:"+classifierName); results.setDatasetName(datasetName); results.setFoldID(seed); results.setSplit("train"); results.setParas(parameterSet.toClassifierResultsParaLine(includeMarkersInParaLine)); return results; } public ParameterResults tune(AbstractClassifier baseClassifier, Instances trainSet, ParameterSpace parameterSpace) throws Exception { //System.out.println("Evaluating para space: " + parameterSpace); //for contracting long startTime = System.nanoTime(); long maxParaEvalTime = 0; //meta info in case we're saving para files classifierName = baseClassifier.getClass().getSimpleName(); datasetName = trainSet.relationName(); //init the space searcher searcher.setParameterSpace(parameterSpace); Iterator<ParameterSet> iter = searcher.iterator(); //for resolving ties for the best paraset List<ParameterResults> tiesBestSoFar = new ArrayList<>(); //iterate over the space int parameterSetID = -1; while (iter.hasNext()) { parameterSetID++; ParameterSet pset = iter.next(); long thisParaStartTime = System.nanoTime(); if (saveParameters && parametersAlreadyEvaluated(parameterSetID)) continue; // THE WORK ClassifierResults results = evaluateParameterSet(baseClassifier, trainSet, pset); if (saveParameters) saveParaResults(parameterSetID, results); else storeParaResult(pset, results, tiesBestSoFar); if (trainTimeContract) { long thisParaTime = System.nanoTime() - thisParaStartTime; if (thisParaTime > maxParaEvalTime) maxParaEvalTime = thisParaTime; long totalTimeSoFar = System.nanoTime() - startTime; // int numParasEvald = parameterSetID + 1; // long avgTimePerPara = totalTimeSoFar / numParasEvald; if (withinTrainContract(totalTimeSoFar+maxParaEvalTime)) break; } //System.out.println("Score: " + String.format("%5f", score) + "\tParas: " + pset); } if (saveParameters) { // if we're contracting, (but also saving parasets) // we might not have had time to eval ALL the psets, justfind the best so far // if we're contracting but not saving each paraset, we'll have been using // storeParaResult() and have them in memory currently anyway if (trainTimeContract) tiesBestSoFar = loadBestOfSavedParas_SoFar(); else tiesBestSoFar = loadBestOfSavedParas_All(parameterSpace.numUniqueParameterSets()); //conversely if we're NOT contracting, we have the strict requirement that //the entire space has been evaluated (or at least has been fully iterated over as defined by the //searcher, e.g RandomSearcher has searched it's full 1000 times etc) } bestParaSetAndResults = resolveTies(tiesBestSoFar); //System.out.println("Best parameter set was: " + bestSet); return bestParaSetAndResults; } private boolean canWeEvaluateAnotherParaSet(long maxParaEvalTime, long totalTimeSoFar) { return trainContractTimeNanos - totalTimeSoFar > maxParaEvalTime; } private boolean parametersAlreadyEvaluated(int paraID) { return ClassifierResults.exists(parameterSavingPath + buildParaFilename(paraID)); } private String buildParaFilename(int paraID) { // return "fold" + seed + "_" +paraID + ".csv"; //experiments paasses us /path/[classifier]/predictions/[dataset]/fold[seed]_ return paraID + ".csv"; } private void storeParaResult(ParameterSet pset, ClassifierResults results, List<ParameterResults> tiesBestSoFar) { double score = evalMetric.apply(results); ParameterResults paraScore = new ParameterResults(pset, results, score); if (tiesBestSoFar.isEmpty()) //first time around loop tiesBestSoFar.add(paraScore); else { if (score == tiesBestSoFar.get(0).score) { //another tie tiesBestSoFar.add(paraScore); } else if (score > tiesBestSoFar.get(0).score) { //new best so far tiesBestSoFar.clear(); tiesBestSoFar.add(paraScore); } } } public void saveParaResults(int paraID, ClassifierResults results) throws Exception { // File f = new File(parameterSavingPath); // if (!f.exists()){ // System.out.println("Creating directory " + parameterSavingPath); // f.mkdirs(); // } //experiments paasses us /path/[classifier]/predictions/[dataset]/fold[seed]_ //so no need to make dir, just add on para id and write results.writeFullResultsToFile(parameterSavingPath + buildParaFilename(paraID)); } /** * Loads all the saved parameter results files with the expectation that every parameter set * up to the id# passed has been evaluated (intended usage being that numParasExpected = parameterSpace.numUniqueParameterSets()) * * populates and returns a list of the ties for best parameterSet */ private List<ParameterResults> loadBestOfSavedParas_All(int numParasExpected) throws Exception { List<ParameterResults> tiesBestSoFar = new ArrayList<>(); for (int paraID = 0; paraID < numParasExpected; paraID++) { String path = parameterSavingPath + buildParaFilename(paraID); if (ClassifierResults.exists(path)) { ClassifierResults tempResults = new ClassifierResults(path); ParameterSet pset = new ParameterSet(); pset.readClassifierResultsParaLine(tempResults.getParas(), includeMarkersInParaLine); storeParaResult(pset, tempResults, tiesBestSoFar); } else { throw new Exception("Trying to load paras back in, but missing expected parameter set ID: " + paraID + ", numParasExpected: " + numParasExpected); } } return tiesBestSoFar; } /** * Loads all the saved parameter results files that have been written 'so far', * using parameterSavingPath as a search term to look for saved files * * populates and returns a list of the ties for best parameterSet */ private List<ParameterResults> loadBestOfSavedParas_SoFar() throws Exception { List<ParameterResults> tiesBestSoFar = new ArrayList<>(); //assumption, parameterSavingPath is of form some/long/path/fold[seed]_ File f = new File(parameterSavingPath); String filenamePrefix = f.getName(); // fold[seed]_ String dir = f.getParent(); // some/long/path/ File[] files = FileHandlingTools.listFilesContaining(dir, filenamePrefix); for (File file : files) { ClassifierResults tempResults = new ClassifierResults(file.getAbsolutePath()); ParameterSet pset = new ParameterSet(); pset.readClassifierResultsParaLine(tempResults.getParas(), includeMarkersInParaLine); storeParaResult(pset, tempResults, tiesBestSoFar); } return tiesBestSoFar; } private ParameterResults resolveTies(List<ParameterResults> tiesBestSoFar) { if (tiesBestSoFar.size() == 1) { //clear winner return tiesBestSoFar.get(0); } else { //resolve ties randomly: todo future, maybe allow for some other method of resolving ties, //e.g choose 'least complex' parameter set of the ties Random rand = new Random(seed); return tiesBestSoFar.get(rand.nextInt(tiesBestSoFar.size())); } } public static void main(String[] args) throws Exception { int seed = 0; SMO svm = new SMO(); PolyKernel p=new PolyKernel(); p.setExponent(2); svm.setKernel(p); svm.setRandomSeed(seed); int size = 5; double[] cs = new double[size]; for (int i = 0; i < cs.length; i++) cs[i] = Math.pow(10.0, (i-size/2)); ParameterSpace space = new ParameterSpace(); space.addParameter("C", cs); Tuner tuner = new Tuner(); tuner.setPathToSaveParameters("C:/Temp/TunerTests/first/"); tuner.setSeed(seed); String dataset = "hayes-roth"; Instances all = DatasetLoading.loadDataNullable("Z:\\Data\\UCIDelgado\\"+dataset+"\\"+dataset+".arff"); Instances[] data = InstanceTools.resampleInstances(all, seed, 0.5); System.out.println(tuner.tune(svm, data[0], space)); } // METHODS FOR: SaveEachParameter,CheckpointClassifier,TrainTimeContractClassifier @Override //SaveEachParameter public void setPathToSaveParameters(String r) { this.parameterSavingPath = r; this.saveParameters = true; } @Override //SaveEachParameter public void setSaveEachParaAcc(boolean b) { this.saveParameters = b; //does anywhere set this to true but not give the path?. part of interface cleanup tests } @Override //Checkpointable public boolean setCheckpointPath(String path) { boolean validPath=Checkpointable.super.createDirectories(path); if(validPath){ this.parameterSavingPath = path; this.saveParameters = true; } return validPath; } @Override //CheckpointClassifier public void copyFromSerObject(Object obj) throws Exception { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } public void setTrainTimeLimit(long amount) { trainTimeContract = true; trainContractTimeNanos =amount; } @Override public boolean withinTrainContract(long start) { return start<trainContractTimeNanos; } }
19,574
37.533465
182
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/searchers/BayesianSearcher.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package evaluation.tuning.searchers; import evaluation.tuning.ParameterSet; import statistics.distributions.NormalDistribution; import weka.classifiers.functions.GaussianProcesses; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.RBFKernel; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.util.*; import java.util.function.Function; public class BayesianSearcher extends ParameterSearcher { private GaussianProcesses gp = new GaussianProcesses(); private Function<ParameterSet, Double> objectiveFunction; private int maxIterations = 500; private int numSeedPoints = 50; private String[] keys; private List<String>[] values; private Instance bestParameters; public BayesianSearcher(Function<ParameterSet, Double> objectiveFunction) throws Exception { this.objectiveFunction = objectiveFunction; gp.setKernel(new RBFKernel()); gp.setNoise(2); throw new Exception("Currently \"in progress\", most likely broken."); } public ParameterSet getBestParameters(){ return instanceToParameterSet(bestParameters); } public void setKernel(Kernel kernel){ gp.setKernel(kernel); } public void setNoise(double noise) { gp.setNoise(noise); } public void setMaxIterations(int max) { maxIterations = max; } @Override public Iterator<ParameterSet> iterator() { return new BayesianSearchIterator(); } private ParameterSet instanceToParameterSet(Instance inst){ ParameterSet pset = new ParameterSet(); for (int i = 0; i < inst.numAttributes()-1; i++) { pset.parameterSet.put(keys[i], values[i].get((int)inst.value(i))); } return pset; } private class BayesianSearchIterator implements Iterator<ParameterSet> { private boolean improvementExpected = true; private Instances parameterPool; private Instances pastParameters; private double maxObjVal = 0; private int numIterations = 0; private Random rand; public BayesianSearchIterator(){ rand = new Random(seed); keys = new String[space.numParas()]; values = new List[space.numParas()]; bestParameters = null; int n = 0; for (Map.Entry<String, List<String>> entry : space.parameterLists.entrySet()) { keys[n] = entry.getKey(); values[n] = entry.getValue(); n++; } int numAtts = keys.length+1; ArrayList<Attribute> atts = new ArrayList<>(numAtts); for (int i = 0; i < numAtts; i++){ atts.add(new Attribute("att" + i)); } parameterPool = new Instances("Parameters", atts, 0); parameterPool.setClassIndex(parameterPool.numAttributes()-1); pastParameters = new Instances(parameterPool, 0); pastParameters.setClassIndex(pastParameters.numAttributes()-1); GridSearcher gs = new GridSearcher(); gs.space = space; for (ParameterSet p : gs) { double[] idx = new double[keys.length + 1]; int i = 0; for (Map.Entry<String, String> entry : p.parameterSet.entrySet()) { idx[i] = values[i].indexOf(entry.getValue()); i++; } DenseInstance inst = new DenseInstance(1, idx); parameterPool.add(inst); } } @Override public boolean hasNext() { return improvementExpected; } @Override public ParameterSet next() { ParameterSet pset = null; Instance chosenParameters = null; if (numIterations < numSeedPoints){ chosenParameters = parameterPool.remove(rand.nextInt(parameterPool.size())); pset = instanceToParameterSet(chosenParameters); double objVal = objectiveFunction.apply(pset); chosenParameters.setValue(chosenParameters.classIndex(), objVal); pastParameters.add(chosenParameters); if (objVal > maxObjVal){ maxObjVal = objVal; bestParameters = chosenParameters; } } else{ try { gp.buildClassifier(pastParameters); double maxVal = 0; //Expected improvement, probably broken for (Instance inst: parameterPool){ double mean = gp.classifyInstance(inst); double std = gp.getStandardDeviation(inst); if (std != 0){ NormalDistribution n = new NormalDistribution(); double imp = (mean - maxObjVal - 0.01); double z = imp / std; double ei = imp * n.getCDF(z) + std * n.getDensity(z); if (ei > maxVal){ maxVal = ei; chosenParameters = inst; } } } if (maxVal == 0 || numIterations == maxIterations){ improvementExpected = false; chosenParameters = bestParameters; pset = instanceToParameterSet(chosenParameters); } else{ pset = instanceToParameterSet(chosenParameters); double objVal = objectiveFunction.apply(pset); chosenParameters.setValue(chosenParameters.classIndex(), objVal); pastParameters.add(chosenParameters); parameterPool.remove(chosenParameters); if (objVal > maxObjVal){ maxObjVal = objVal; bestParameters = chosenParameters; } } } catch (Exception e){ e.printStackTrace(); } } numIterations++; return pset; } } }
7,113
35.295918
96
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/searchers/GridSearcher.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.tuning.searchers; import evaluation.tuning.ParameterSet; import java.util.Iterator; import java.util.List; import java.util.Map; /** * * @author James Large (james.large@uea.ac.uk) */ public class GridSearcher extends ParameterSearcher { @Override public Iterator<ParameterSet> iterator() { return new GridSearchIterator(); } public class GridSearchIterator implements Iterator<ParameterSet> { int numParas; String[] keys; int[] sizes; int[] currentInds; public GridSearchIterator() { numParas = space.numParas(); currentInds = new int[numParas]; //keep init to all 0's sizes = new int[numParas]; keys = new String[numParas]; int i = 0; for (Map.Entry<String, List<String>> entry : space.parameterLists.entrySet()) { keys[i] = entry.getKey(); sizes[i] = entry.getValue().size(); i++; } } @Override public boolean hasNext() { //we havnt reached the end of the outer-most parameter list return currentInds[0] < sizes[0]; } @Override public ParameterSet next() { ///////////build the current parameter set ParameterSet pset = new ParameterSet(); for (int i = 0; i < keys.length; i++) pset.parameterSet.put(keys[i], space.parameterLists.get(keys[i]).get(currentInds[i])); ///////////increment to the next one //increment the inner-most parameter list int currentParaConsidered = numParas-1; currentInds[currentParaConsidered]++; //have we reached the end of this parameter list? while (currentInds[currentParaConsidered] == sizes[currentParaConsidered]) { if (currentParaConsidered == 0) //hasNext() will use this fact that the outer-most para is done to determine that there are none left break; //reset this list to 0 to cycle round again currentInds[currentParaConsidered] = 0; //increment the next-outer parameter list currentInds[--currentParaConsidered]++; } return pset; } } }
3,276
33.861702
121
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/searchers/ParameterSearcher.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.tuning.searchers; import evaluation.tuning.ParameterSet; import evaluation.tuning.ParameterSpace; import java.io.File; import utilities.FileHandlingTools; /** * * @author James Large (james.large@uea.ac.uk) */ public abstract class ParameterSearcher implements Iterable<ParameterSet> { protected ParameterSpace space = null; protected int seed = 0; protected String parameterSavingPath = null; public ParameterSpace getParameterSpace() { return space; } public void setParameterSpace(ParameterSpace parameterSpace) { this.space = parameterSpace; } public void setSeed(int seed) { this.seed = seed; } public void setParameterSavingPath(String path) { this.parameterSavingPath = path; } public String getParameterSavingPath() { return parameterSavingPath; } protected File[] findSavedParas() { if (parameterSavingPath == null || parameterSavingPath == "") return new File[] { }; return FileHandlingTools.listFilesContaining(parameterSavingPath, "fold" + seed + "_"); } protected int findHowManyParasAlreadySaved() { return findSavedParas().length; } }
2,033
30.292308
95
java
tsml-java
tsml-java-master/src/main/java/evaluation/tuning/searchers/RandomSearcher.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package evaluation.tuning.searchers; import evaluation.tuning.ParameterSet; import evaluation.tuning.searchers.GridSearcher.GridSearchIterator; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.TreeSet; /** * * @author James Large (james.large@uea.ac.uk) */ public class RandomSearcher extends ParameterSearcher { int numParaSetsToSample; public RandomSearcher() { numParaSetsToSample = 1000; } public RandomSearcher(int numParasToSample) { this.numParaSetsToSample = numParasToSample; } public int getNumParaSetsToSearch() { return numParaSetsToSample; } public void setNumParaSetsToSearch(int numParaSetsToSearch) { this.numParaSetsToSample = numParaSetsToSearch; } @Override public Iterator<ParameterSet> iterator() { if (numParaSetsToSample < space.numUniqueParameterSets()) return new RandomSearchIterator(); else { System.out.println("Warning: tryign to randomly sample a space more times than there are unique values in the space, just using a GridSearch"); return new GridSearcher().iterator(); } } public class RandomSearchIterator implements Iterator<ParameterSet> { Random rng; int numParaSetsSampled; int numParas; String[] keys; int[] sizes; Set<String> vistedParas = new TreeSet<>(); public RandomSearchIterator() { numParaSetsSampled = 0; numParas = space.numParas(); rng = new Random(seed); sizes = new int[numParas]; keys = new String[numParas]; int i = 0; for (Map.Entry<String, List<String>> entry : space.parameterLists.entrySet()) { keys[i] = entry.getKey(); sizes[i] = entry.getValue().size(); i++; } } @Override public boolean hasNext() { return numParaSetsSampled < numParaSetsToSample; } public int[] sampleParas(int secondSeed) { Random trng = new Random(secondSeed); int[] set = new int[numParas]; boolean isNewSet = true; do { set[0] = trng.nextInt(sizes[0]); for (int i = 1; i < numParas; i++) set[i] = trng.nextInt(sizes[i]); isNewSet = vistedParas.add(ParameterSet.toFileNameString(set)); } while (!isNewSet); return set; } @Override public ParameterSet next() { //so, to replicate this parameter set (in e.g a checkpointed/para split scenario) //init the searcher with same fold-seed, and iterate through same number of times int[] psetInds = sampleParas(rng.nextInt()); ParameterSet pset = new ParameterSet(); for (int i = 0; i < keys.length; i++) pset.parameterSet.put(keys[i], space.parameterLists.get(keys[i]).get(psetInds[i])); numParaSetsSampled++; return pset; } } }
4,170
31.585938
155
java
tsml-java
tsml-java-master/src/main/java/examples/BasicEvaluation.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package examples; import evaluation.evaluators.CrossValidationEvaluator; import evaluation.evaluators.Evaluator; import evaluation.evaluators.MultiSamplingEvaluator; import evaluation.evaluators.SingleTestSetEvaluator; import evaluation.storage.ClassifierResults; import experiments.ClassifierLists; import experiments.data.DatasetLoading; import weka.classifiers.Classifier; import weka.core.Instances; /** * Examples to show different ways of evaluating classifiers * * @author James Large (james.large@uea.ac.uk) */ public class BasicEvaluation { public static void main(String[] args) throws Exception { // We'll use this data throughout, see Ex01_Datahandling int seed = 0; Instances[] trainTest = DatasetLoading.sampleItalyPowerDemand(seed); Instances train = trainTest[0]; Instances test = trainTest[1]; // Let's use Random Forest throughout, see Ex02_Classifiers Classifier classifier = ClassifierLists.setClassifierClassic("RandF", seed); // We saw in Ex02_Classifiers that we can build on predefined train data, and test // on predefined test data by looping over the instances // We can also evaluate using evaluation.Evaluators to get back an evaluation.storage.ClassifierResults // object, which we'll discuss further below. This functionality is still being expanded, // but the API is fairly set. // Here's the most basic evaluator, which replaces the looping over the test // set in the previous example // We build the classifier ourselves on the train data classifier.buildClassifier(train); // Setup the evaluator boolean cloneData = true, setClassMissing = true; Evaluator testSetEval = new SingleTestSetEvaluator(seed, cloneData, setClassMissing); // And, in this case, test on the single held-out test set. ClassifierResults testResults = testSetEval.evaluate(classifier, test); System.out.println("Random Forest accuracy on ItalyPowerDemand: " + testResults.getAcc()); // Other evaluators currently implemented are for cross validation and random stratified resamples // Instead of building the classifier before-hand and passing that to the evaluator with // the test data, these will repeatedly build the classifier on each fold or resample. // Let's generate an estimate of our error from the train data through cross validation. boolean cloneClassifier = false, maintainFoldClassifiers = false; MultiSamplingEvaluator cvEval = new CrossValidationEvaluator(seed, cloneData, setClassMissing, cloneClassifier, maintainFoldClassifiers); cvEval.setNumFolds(10); ClassifierResults trainResults = cvEval.evaluate(classifier, train); System.out.println("Random Forest average accuracy estimate on ItalyPowerDemand: " + trainResults.getAcc()); for (int i = 0; i < 10; i++) System.out.println("\tCVFold " + i + " accuracy: " + cvEval.getFoldResults()[i].getAcc()); // We've used ClassifierResults so far to retrieve the accuracy of a set of predictions // This is a general purpose predictions-storage class, which gets updated relatively // often. It stores predictions, meta info and timings, can calculate eval metrics // over them (accuracy, auroc, f1, etc.), and supports reading/writing to file. String mockFile = trainResults.writeFullResultsToString(); System.out.println("\n\n" + mockFile); } }
4,571
42.132075
145
java
tsml-java
tsml-java-master/src/main/java/examples/Classifiers.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package examples; import experiments.ClassifierLists; import experiments.data.DatasetLoading; import weka.classifiers.Classifier; import weka.classifiers.trees.RandomForest; import weka.core.Instance; import weka.core.Instances; /** * Examples to show different ways of constructing classifiers, and basic usage * * @author James Large (james.large@uea.ac.uk) */ public class Classifiers { public static void main(String[] args) throws Exception { // We'll use this data throughout, see Ex01_Datahandling int seed = 0; Instances[] trainTest = DatasetLoading.sampleItalyPowerDemand(seed); Instances train = trainTest[0]; Instances test = trainTest[1]; // Here's the super basic workflow, this is pure weka: RandomForest randf = new RandomForest(); randf.setNumTrees(500); randf.setSeed(seed); randf.buildClassifier(train); //aka fit, train double acc = .0; for (Instance testInst : test) { double pred = randf.classifyInstance(testInst); //aka predict //double [] dist = randf.distributionForInstance(testInst); //aka predict_proba if (pred == testInst.classValue()) acc++; } acc /= test.numInstances(); System.out.println("Random Forest accuracy on ItalyPowerDemand: " + acc); // All classifiers implement the Classifier interface. this guarantees // the buildClassifier, classifyInstance and distributionForInstance methods, // which is mainly what we want // Most if not all classifiers should extend AbstractClassifier, which adds // on a little extra common functionality // There are also a number of classifiers listed in experiments.ClassifierLists // This class is updated over time and may eventually turn in to factories etc // on the backend, but for now what this is just a way to get a classifier // with defined settings (parameters etc). We use this to record the exact // parameters used in papers for example. We also use this to instantiate // particular classifiers from a string argument when running on clusters Classifier classifier = ClassifierLists.setClassifierClassic("RandF", seed); classifier.buildClassifier(train); classifier.distributionForInstance(test.instance(0)); } }
3,458
35.797872
91
java
tsml-java
tsml-java-master/src/main/java/examples/Clusterers.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package examples; import evaluation.storage.ClustererResults; import experiments.data.DatasetLoading; import machine_learning.clusterers.KMeans; import tsml.clusterers.UnsupervisedShapelets; import utilities.ClusteringUtilities; import weka.core.Instances; import java.util.ArrayList; import java.util.Arrays; /** * Examples to show the method for building clusterers and basic usage. * * @author Matthew Middlehurst */ public class Clusterers { public static void main(String[] args) throws Exception { // We'll use this data throughout, see Ex01_Datahandling Instances inst = DatasetLoading.loadChinatown(); System.out.println(Arrays.toString(inst.attributeToDoubleArray(inst.classIndex()))); // Create an object from one of the time series or vector clusters implemented. // Call the buildClusterer method with your data. Most clusters will need the number of clusters k to be set. UnsupervisedShapelets us = new UnsupervisedShapelets(); us.setSeed(0); us.setNumClusters(inst.numClasses()); us.buildClusterer(inst); // You can find the cluster assignments for each data instance by calling getAssignments() and each cluster // containing instance indicies using getClusters(). // The index of assignments array will match the Instances object, i.e. index 0 with value 1 == first instance // of data assigned to cluster 1. double[] tsAssignments = us.getAssignments(); ArrayList<Integer>[] tsClusters = us.getClusters(); System.out.println("UnsupervisedShapelets cluster assignments:"); System.out.println(Arrays.toString(tsAssignments)); System.out.println("UnsupervisedShapelets clusters:"); System.out.println(Arrays.toString(tsClusters)); // ClustererResults is out storage for completed cluster results. The class will calculate popular clustering // metrics such as rand index and mutual information. ClustererResults tsCR = ClusteringUtilities.getClusteringResults(us, inst); tsCR.findAllStats(); System.out.println("UnsupervisedShapelets results:"); System.out.println(tsCR.statsToString()); System.out.println(); // Non-TSC clustering algorithms are also available in tsml. // weka also implements a range of clustering algorithmsm any class value must be removed prior to use these // however. KMeans km = new KMeans(); km.setSeed(0); km.setNumClusters(inst.numClasses()); km.buildClusterer(inst); double[] vAssignments = km.getAssignments(); ArrayList<Integer>[] vClusters = km.getClusters(); System.out.println("KMeans cluster assignments:"); System.out.println(Arrays.toString(vAssignments)); System.out.println("KMeans clusters:"); System.out.println(Arrays.toString(vClusters)); ClustererResults vCR = ClusteringUtilities.getClusteringResults(km, inst); vCR.findAllStats(); System.out.println("KMeans results:"); System.out.println(vCR.statsToString()); } }
3,935
41.322581
118
java
tsml-java
tsml-java-master/src/main/java/examples/DataHandling.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package examples; import experiments.data.DatasetLists; import experiments.data.DatasetLoading; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.TimeSeriesResampler; import utilities.InstanceTools; import weka.core.Instance; import weka.core.Instances; import java.io.IOException; import java.util.Arrays; /** * Examples to show different ways of loading and basic handling of datasets * * @author James Large (james.large@uea.ac.uk) */ public class DataHandling { public static void main(String[] args) throws Exception { /* * Uncomment which function is needed depending on data file type. */ // dataHandlingWithARFF(); // .arff // dataHandlingWithTS(); // .ts String whereTheDataIs = DatasetLoading.BAKED_IN_TSC_DATA_PATH; String whereToPutTheData = "C:\\Temp\\"; String[] problems = DatasetLists.tscProblems112; problems = new String[]{"Chinatown"}; int resamples = 30; for (String str : problems) { for (int i = 0; i < 30; i++) resamplingData(whereTheDataIs, whereToPutTheData, str, i); } } public static void resamplingData(String source, String dest, String problem, int resample) throws IOException { Instances train = DatasetLoading.loadData(source + problem + "/" + problem + "_TRAIN.arff"); Instances test = DatasetLoading.loadData(source + problem + "/" + problem + "_TEST.arff"); // We could then resample these, while maintaining train/test distributions, using this Instances[] trainTest = InstanceTools.resampleTrainAndTestInstances(train, test, resample); train = trainTest[0]; test = trainTest[1]; DatasetLoading.saveDataset(train, dest + problem + "/" + problem + "_" + resample + "_TRAIN" + ".arff"); DatasetLoading.saveDataset(test, dest + problem + "/" + problem + "_" + resample + "_TEST" + ".arff"); } private static void dataHandlingWithARFF() throws Exception { // We'll be loading the ItalyPowerDemand dataset which is distributed with this codebase String basePath = "src/main/java/experiments/data/tsc/"; String dataset = "ItalyPowerDemand"; int seed = 1; Instances train; Instances test; Instances[] trainTest; ///////////// Loading method 1: loading individual files // DatasetLoading.loadData...(...) // For loading in a single arff without performing any kind of sampling. Class value is // assumed to be the last attribute train = DatasetLoading.loadDataThrowable(basePath + dataset + "/" + dataset + "_TRAIN.arff"); test = DatasetLoading.loadDataThrowable(basePath + dataset + "/" + dataset + "_TEST.arff"); // We could then resample these, while maintaining train/test distributions, using this trainTest = InstanceTools.resampleTrainAndTestInstances(train, test, seed); train = trainTest[0]; test = trainTest[1]; ///////////// Loading method 2: sampling directly // DatasetLoading.sampleDataset(...) // Wraps the data loading and sampling performed above. Read in a dataset either // from a single complete file (e.g. uci data) or a predefined split (e.g. ucr/tsc data) // and resamples it according to the seed given. If the resampled fold can already // be found in the read location ({dsetname}{foldid}_TRAIN and _TEST) then it will // load those. See the sampleDataset(...) javadoc trainTest = DatasetLoading.sampleDataset(basePath, dataset, seed); train = trainTest[0]; test = trainTest[1]; ///////////// Loading method 3: sampling the built in dataset // DatasetLoading.sampleDataset(...) // Because ItalyPowerDemand is distributed with the codebase, there's a wrapper // to sample it directly for quick testing trainTest = DatasetLoading.sampleItalyPowerDemand(seed); train = trainTest[0]; test = trainTest[1]; //////////// Data inspection and handling: // We can look at the basic meta info System.out.println("train.relationName() = " + train.relationName()); System.out.println("train.numInstances() = " + train.numInstances()); System.out.println("train.numAttributes() = " + train.numAttributes()); System.out.println("train.numClasses() = " + train.numClasses()); // And the individual instances for (Instance inst : train) System.out.print(inst.classValue() + ", "); System.out.println(""); // Often for speed we just want the data in a primitive array // We can go to and from them using this sort of procedure // Lets keeps the class labels separate in this example double[] classLabels = train.attributeToDoubleArray(train.classIndex()); // aka y_train boolean removeLastVal = true; double[][] data = InstanceTools.fromWekaInstancesArray(train, removeLastVal); // aka X_train // We can then do whatever fast array-optimised stuff, and shove it back into an instances object Instances reformedTrain = InstanceTools.toWekaInstances(data, classLabels); } private static void dataHandlingWithTS() throws IOException { // We'll be loading the ItalyPowerDemand dataset which is distributed with this codebase String basePath = "src/main/java/experiments/data/tsc/"; String dataset = "ItalyPowerDemand"; int seed = 1; TimeSeriesInstances train; TimeSeriesInstances test; TimeSeriesResampler.TrainTest trainTest; TimeSeriesInstances[] trainTestSplit; /* * Loading method 1: loading individual files. * * For loading in a single ts file without performing any kind of sampling. */ train = DatasetLoading.loadTSData(basePath + dataset + "/" + dataset + "_TRAIN.ts"); test = DatasetLoading.loadTSData(basePath + dataset + "/" + dataset + "_TEST.ts"); // We could then resample these, while maintaining train/test distributions, using this trainTest = TimeSeriesResampler.resampleTrainTest(train, test, seed); train = trainTest.train; test = trainTest.test; /* * Loading method 2: sampling directly. * * For loading in a single ts file and resampling the data */ trainTestSplit = DatasetLoading.sampleTSDataset(basePath, dataset, seed); train = trainTestSplit[0]; test = trainTestSplit[1]; /* * Loading method 3: sampling the build in dataset. * * Because ItalyPowerDemand is distributed with the codebase, there's a wrapper * to sample it directly for quick testing */ trainTestSplit = DatasetLoading.sampleItalyPowerDemandTS(seed); train = trainTestSplit[0]; test = trainTestSplit[1]; /* * Data inspection and handling. We can look at the basic meta info. */ System.out.println("train.getProblemName() = " + train.getProblemName()); System.out.println("train.getDescription() = " + train.getDescription()); System.out.println("train.numInstances() = " + train.numInstances()); System.out.println("train.numClasses() = " + train.numClasses()); System.out.println("train.getClassLabels() = " + Arrays.toString(train.getClassLabels())); System.out.println("train.getClassCounts() = " + Arrays.toString(train.getClassCounts())); System.out.println("train.getClassIndexes() = " + Arrays.toString(train.getClassIndexes())); /* * Example of the data structure format */ // for each instance for (TimeSeriesInstance instance : train) { // for each dimension in the instance (multivariate support) for (TimeSeries series : instance) { System.out.println(Arrays.toString(series.toValueArray())); } } } }
9,064
35.26
116
java
tsml-java
tsml-java-master/src/main/java/examples/HiveCote1Examples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package examples; import experiments.ExperimentalArguments; import experiments.ClassifierExperiments; import experiments.data.DatasetLoading; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.classifiers.dictionary_based.cBOSS; import tsml.classifiers.interval_based.RISE; import tsml.classifiers.hybrids.HIVE_COTE; import tsml.classifiers.interval_based.TSF; import weka.core.Instance; import weka.core.Instances; import java.util.concurrent.TimeUnit; /** * This class demonstrates how to use the HIVE-COTE classifier * HIVE-COTE version 0.1: published here * HIVE-COTE version 1.0: published here * */ public class HiveCote1Examples { public static void basicUsage() throws Exception { System.out.println(" Basic Usage: load a dataset, build a default config classifier, make some predictions:"); HIVE_COTE hc = new HIVE_COTE(); System.out.println(" Current default for HC is to use version 1.0: RISE, TSF, cBOSS and STC"); // hc.setupHIVE_COTE_1_0(); //called in default constructor // hc.setupHIVE_COTE_0_1(); // approximation of original version (STC has changed Instances[] trainTest = DatasetLoading.sampleItalyPowerDemand(0); //Loads the default italy power demans hc.setDebug(true);//Verbose version hc.buildClassifier(trainTest[0]); int correct=0; for(Instance ins: trainTest[1]){ double c=hc.classifyInstance(ins); if(c==ins.classValue()) correct++; } System.out.println(" Number correct = "+correct+ " accuracy = "+correct/(double)trainTest[1].numInstances()); } public static void usageWithExperimentsClass() throws Exception { System.out.println(" the class src/main/java/experiments/ClassifierExperiments.java standardises the output of classifiers" + " to facilitate easy post processing" ); System.out.println(" ClassifierExperiments.java allows for configuration through command line arguments. Extensive " + "documentation on the possible configurations is at the top of the class" ); System.out.println(" ClassifierExperiments.java creates a classifier based on switches in src/main/java/experiments/ClassifierLists.java" ); System.out.println(" This method shows how to use it through string passing via the ExperimentsArguments formatting class" ); System.out.println(" Command line uses the same string format" ); System.out.println(" Experiment is configurable for contracting and checkpointing" ); System.out.println(" However, you cannot yet configure the classifier directly. The purpose of this class is to " + "run large scale comaprative studies" ); System.out.println(" You can, if you wish, add a bespoke option to ClassifierLists.java. Use setBespokeClassifiers" + "and remember to add your option to the string array bespoke" ); //Local run without args, mainly for debugging String[] settings=new String[9]; //Location of data set settings[0]="-dp=src/main/java/experiments/data/tsc/";//Where to get data settings[1]="-rp=Temp/";//Where to write results settings[2]="-gtf=false"; //Whether to generate train files or not settings[3]="-cn=HIVE-COTE"; //Classifier name: See ClassifierLists for valid options settings[4]="-dn=Chinatown"; //Problem file, added below settings[5]="-f=1";//Fold number, added below (fold number 1 is stored as testFold0.csv, its a cluster thing) settings[6]="--force=true";//Overwrites existing results if true, otherwise set to false settings[7]="-ctr=0";//No time contract settings[8]="-cp=0";//No checkpointing ClassifierExperiments.debug=true; System.out.println("Manually set args:"); for (String str : settings) System.out.println("\t"+str); System.out.println(""); ExperimentalArguments expSettings = new ExperimentalArguments(settings); ClassifierExperiments.setupAndRunExperiment(expSettings); System.out.println(" The output will be in Temp/HIVE-COTE/ChinaTown/Predictions/testFold9.csv"); System.out.println(" The format of this file is explained in "); } public static void buildingFromComponents() throws Exception { System.out.println("We always build HIVE-COTE from constituent components " ); System.out.printf(" Components results must be in the standard format, generated by ClassifierExperiments.java"); String problem="Chinatown"; //Local run without args, mainly for debugging String[] settings=new String[6]; //Location of data set settings[0]="-dp=src/main/java/experiments/data/tsc/";//Where to get data settings[1]="-rp=C:/Temp";//Where to write results settings[2]="-gtf=true"; //HIVE-COTE requires train files settings[3]="-cn="; //Classifier name: See ClassifierLists for valid options settings[4]="-dn="+problem; //Problem file, settings[5]="-f=1";//Fold number, added below (fold number 1 is stored as testFold0.csv, its a cluster thing) ClassifierExperiments.debug=true; System.out.println("Manually set args:"); for (String str : settings) System.out.println("\t"+str); System.out.println(""); String[] components={"TSF","RISE","cBOSS","STC"}; for(String s:components){ System.out.println("Building "+s); settings[3]="-cn="+s; //Classifier name: See ClassifierLists for valid options ExperimentalArguments expSettings = new ExperimentalArguments(settings); ClassifierExperiments.setupAndRunExperiment(expSettings); System.out.println(s+" Finished"); } System.out.println(" Components finished. You can run the load from file using experiments with argument " + "HIVE_COTE1.0 "); settings[2]="-gtf=false"; //Dont need HC train file settings[3]="-cn=HIVE-COTE1.0"; //Classifier name: See ClassifierLists for valid options ExperimentalArguments expSettings = new ExperimentalArguments(settings); ClassifierExperiments.setupAndRunExperiment(expSettings); System.out.println("HIVE-COTE Finished. Results will be in C:/Temp/HIVE-COTE1.0/Predictions/Chinatown/"); System.out.println(" or just run it yourself"); HIVE_COTE hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setResultsFileLocationParameters("C:/Temp/", problem, 0); hc.setClassifiersNamesForFileRead(components); Instances train = DatasetLoading.loadData("src/main/java/experiments/data/tsc/Chinatown/Chinatown_TRAIN"); Instances test = DatasetLoading.loadData("src/main/java/experiments/data/tsc/Chinatown/Chinatown_TEST"); hc.setDebug(true);//Verbose version hc.buildClassifier(train); int correct=0; for(Instance ins: test){ double c=hc.classifyInstance(ins); if(c==ins.classValue()) correct++; } System.out.println(" Number correct = "+correct+ " accuracy = "+correct/(double)test.numInstances()); } public static void setClassifiersAndThread() throws Exception { System.out.println(" HIVE COTE is configurable in many ways"); HIVE_COTE hc = new HIVE_COTE(); System.out.println(" Current default for HC is to use version 1.0: RISE, TSF, cBOSS and STC"); System.out.printf("Suppose we want different classifiers\n"); EnhancedAbstractClassifier[] c=new EnhancedAbstractClassifier[2]; c[0]=new RISE(); c[1]=new TSF(); String[] names={"RISE","TSF"}; hc.setClassifiers(c,names,null); String[] n=hc.getClassifierNames(); for(String s:n) System.out.println(" Classifier "+s); System.out.println(" We can make it threaded so each component runs in its own thread\n"); System.out.println(" Threading demonstrated by interleaved printouts\n"); hc.enableMultiThreading(2); Instances[] trainTest = DatasetLoading.sampleItalyPowerDemand(0); //Loads the default italy power demans hc.setDebug(true); hc.buildClassifier(trainTest[0]); int correct=0; for(Instance ins: trainTest[1]){ double cls=hc.classifyInstance(ins); if(cls==ins.classValue()) correct++; } System.out.println(" Number correct = "+correct+ " accuracy = "+correct/(double)trainTest[1].numInstances()); System.out.println(" HIVE COTE is contractable, if its components are contractable."); System.out.println(" To set a contract (max run time) use any of the Contractable methods\n"); hc.enableMultiThreading(1); hc.setTrainTimeLimit(10, TimeUnit.SECONDS); long t =System.nanoTime(); hc.buildClassifier(trainTest[0]); t =System.nanoTime()-t; System.out.println("\t\t Time elapsed = "+t/1000000000+" seconds"); } public static void contract() throws Exception { System.out.println(" HIVE COTE is contractable and checkpointable"); System.out.println(" these can be set through ClassifierExperiments or directly"); HIVE_COTE hc = new HIVE_COTE(); EnhancedAbstractClassifier[] c=new EnhancedAbstractClassifier[3]; c[0]=new TSF(); c[1]=new RISE(); c[2]=new cBOSS(); // c[0]=new ShapeletTransformClassifier(); String[] names={"TSF","RISE","cBOSS"};//"STC"};// hc.setClassifiers(c,names,null); String[] n=hc.getClassifierNames(); for(String s:n) System.out.println(" Classifier "+s); Instances train = DatasetLoading.loadData("src/main/java/experiments/data/tsc/Beef/Beef_TRAIN"); Instances test = DatasetLoading.loadData("src/main/java/experiments/data/tsc/Beef/Beef_TEST"); hc.setDebug(true); System.out.println(" HIVE COTE is contractable only if its components are contractable."); System.out.println(" To set a contract (max run time) use any of the Contractable methods\n"); //Ways of setting the contract time //Minute, hour or day limit hc.setMinuteLimit(1); hc.setHourLimit(2); hc.setDayLimit(1); //Specify units hc.setTrainTimeLimit(30, TimeUnit.SECONDS); hc.setTrainTimeLimit(1, TimeUnit.MINUTES); //Or just give it in nanoseconds hc.setTrainTimeLimit(10000000000L); long t =System.nanoTime(); hc.buildClassifier(train); t =System.nanoTime()-t; System.out.println("\t\t Time elapsed = "+t/1000000000+" seconds"); } public static void main(String[] args) throws Exception { HIVE_COTE hc = new HIVE_COTE(); System.out.println(" HIVE COTE classifier, location "+hc.getClass().getName()); // basicUsage(); // usageWithExperimentsClass(); // buildingFromComponents(); // setClassifiersAndThread(); contract(); } }
11,896
48.570833
148
java
tsml-java
tsml-java-master/src/main/java/examples/HiveCote2Examples.java
package examples; import experiments.ClassifierExperiments; import experiments.ExperimentalArguments; import experiments.data.DatasetLoading; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.classifiers.dictionary_based.TDE; import tsml.classifiers.hybrids.HIVE_COTE; import tsml.classifiers.kernel_based.Arsenal; import utilities.ClassifierTools; import weka.core.Instances; import java.util.concurrent.TimeUnit; public class HiveCote2Examples { public static void simpleBuild() throws Exception { System.out.println("Basic Usage: \n" + " - load a dataset\n" + " - build a default classifier\n" + " - make predictions"); System.out.println("Current default for HIVE COTE is to use version 2.0:\n" + " STC, DrCIF, Arsenal and TDE"); String problem="Chinatown"; Instances train= DatasetLoading.loadData("src/main/java/experiments/data/tsc/" + problem + "/" + problem + "_TRAIN"); //load train data of Chinatown dataset Instances test= DatasetLoading.loadData("src/main/java/experiments/data/tsc/" + problem + "/" + problem + "_TEST"); //load test data of chinatown dataset HIVE_COTE hc2 = new HIVE_COTE();//Defaults to HC V2.0 hc2.setDebug(true); //Verbose output hc2.buildClassifier(train); double acc = ClassifierTools.accuracy(test,hc2); System.out.println(" HC2 test accuracy on "+problem+" = "+acc); } public static void experimentClassBuild() throws Exception { System.out.println("The Experiments.java class located in src/main/java/experiments/Experiments.java is used to " + "standardise the output of classifiers to make post processing easier"); System.out.println("\nExperiments.java can be configured through the use of command line arguments" + "\n\tDocumentation for these arguments can be seen at the top of the ExperimentalArguments class in Experiments.java"); System.out.println("Experiments.java creates a classifier based on a switch statement in src/main/java/experiments/ClassifierLists.java"); System.out.println("\nThe classifier cannot be configured directly, as the purpose of this class is to run large scale comparative experiments"); System.out.println("However bespoke options can be added to ClassifierLists.java."); System.out.println("\tThe option need to be added to the setBespokeClassifiers method and the option name needs " + "to be added to the 'bespoke' string array\n"); //Setting up arguments String[] arguments = new String[9]; arguments[0] = "-dp=src/main/java/experiments/data/tsc/"; //Data location arguments[1] = "-rp=C:/Temp/"; //Location of where to write results arguments[2] = "-gtf=false"; //Generate train files or not arguments[3] = "-cn=HC2"; //Classifier name. A list of valid classifier names can be found in ClassifierLists.java arguments[4] = "-dn=Chinatown"; //Dataset name arguments[5] = "-f=1"; //Fold number arguments[6] = "--force=true"; //Overwrites existing results if set to true, otherwise does not arguments[7] = "-ctr=0"; //No time contract arguments[8] = "-cp=0"; //No checkpointing ClassifierExperiments.debug = true; System.out.println("Manually set args: "); for (String string : arguments){ System.out.println("\t" + string); } System.out.println(); ExperimentalArguments exp = new ExperimentalArguments(arguments); ClassifierExperiments.setupAndRunExperiment(exp); System.out.println("The output of this will be stored in C:/Temp/HC2/Predictions/Chinatown/testFold0.csv"); } public static void fromComponentBuild() throws Exception { //Build some components with train files using experiments String problem = "Chinatown"; //Setting up arguments String[] arguments = new String[6]; arguments[0] = "-dp=src/main/java/experiments/data/tsc/"; //Data location arguments[1] = "-rp=C:/Temp/"; //Location of where to write results arguments[2] = "-gtf=true"; //Generate train files arguments[3] = "-cn="; //Classifier name. A list of valid classifier names can be found in ClassifierLists.java arguments[4] = "-dn="+problem; //Dataset name arguments[5] = "-f=1"; //Fold number ClassifierExperiments.debug = true; System.out.println("Manually set arguments:"); for (String string : arguments){ System.out.println("\t" + string); } System.out.println(); String[] components = {"STC","DrCIF","Arsenal","TDE"}; for (String component : components){ System.out.println("Building component: " + component); arguments[3] = "-cn="+component; ExperimentalArguments experimentalArguments = new ExperimentalArguments(arguments); ClassifierExperiments.setupAndRunExperiment(experimentalArguments); System.out.println("Finished component: " + component); } System.out.println("All components finished"); //Rebuild from file System.out.println("You can now run the load from file using Experiments with the argument:" + "\t HIVE-COTE 2.0"); arguments[2] = "-gtf=false"; // Do not need train files arguments[3] = "-cn=HIVE-COTE 2.0"; //Classifier name. A list of valid classifier names can be found in ClassifierLists.java ExperimentalArguments experimentalArguments = new ExperimentalArguments(arguments); ClassifierExperiments.setupAndRunExperiment(experimentalArguments); System.out.println("HIVE-COTE 2.0 finished. Results will be in C:/Temp/HIVE-COTE 2.0/Predictions/Chinatown/"); System.out.println("Or it can be run manually"); HIVE_COTE hc2 = new HIVE_COTE(); hc2.setBuildIndividualsFromResultsFiles(true); hc2.setResultsFileLocationParameters("C:/Temp/",problem,0); hc2.setClassifiersNamesForFileRead(components); Instances train = DatasetLoading.loadData("src/main/java/experiments/data/tsc/"+problem+"/"+problem+"_TRAIN"); Instances test = DatasetLoading.loadData("src/main/java/experiments/data/tsc/"+problem+"/"+problem+"_TEST"); hc2.setDebug(true); hc2.buildClassifier(train); double acc = ClassifierTools.accuracy(test,hc2); System.out.println("Accuracy = " + acc); } public static void configuration() throws Exception { System.out.println("HIVE-COTE is very configurable"); HIVE_COTE hc2 = new HIVE_COTE(); System.out.println("The current default for HC is to use version 2.0: STC, DrCIF, Arsenal, TDE"); // Set up to debug print hc2.setDebug(true); // Switch version versions System.out.println("HIVE-COTE can be configured between 3 main versions:"); System.out.println("\t 0.1 - EE, STC, RISE, BOSS, TSF"); System.out.println("\t 1.0 - STC, RISE, cBOSS, TSF"); System.out.println("\t 2.0 - STC, DrCIF, Arsenal, TDE"); hc2.setupHIVE_COTE_0_1(); //Version 0.1 hc2.setupHIVE_COTE_1_0(); //Version 1.0 hc2.setupHIVE_COTE_2_0(); //Version 2.0 //Set up as threaded System.out.println("HIVE-COTE 2.0 can be threaded"); hc2.enableMultiThreading(2); //Set thread limit of 2 hc2.enableMultiThreading(4); //Set thread limit of 4 hc2.enableMultiThreading();//Set thread limit to the number of available processors subtract 1 //For example a 4 core processor would have 3 cores allocated //Build from any classifiers System.out.println("HIVE-COTE can have the classifiers to be used set manually"); System.out.println("Manually set classifiers"); EnhancedAbstractClassifier[] classifiers = new EnhancedAbstractClassifier[2]; classifiers[0] = new Arsenal(); classifiers[1] = new TDE(); String[] classifierNames = {"Arsenal","TDE"}; hc2.setClassifiers(classifiers,classifierNames,null); String[] names = hc2.getClassifierNames(); for(String name : names){ System.out.println("\tClassifier: " + name); } String problem = "Chinatown"; Instances train = DatasetLoading.loadData("src/main/java/experiments/data/tsc/"+problem+"/"+problem+"_TRAIN"); Instances test = DatasetLoading.loadData("src/main/java/experiments/data/tsc/"+problem+"/"+problem+"_TEST"); hc2.buildClassifier(train); double acc = ClassifierTools.accuracy(test,hc2); System.out.println("Accuracy = " + acc); } /** * Contracting restricts the build time. It should be noted * 1. Contracts are approximate. It is not possible with all classifier to exactly control the build time. * Points to note * TDE: TDE always builds a bagged model, and if train estimates are required, uses out of bag estimates. This * second stage can actually take a long time, since transform is needed for each out of bag case. We can do no * more than esitmate how long this takes, and it may well go over * STC: Contract time is split three ways: transform search, build final rotation forest model and build bagged * model for estimates. The division is decided at the beginning of the build, and so is approximate * CIF: If producing train estimates, the time is evenly split between full build and OOB build. * Arsenal: * 2.The contract relates only to the training of the classifier. The test time can also be quite a large * overhead, especially for TDE, and so the overall train/test build time may take considerably longer than the * contracT * 3. Train estimate: HIVE_COTE can produce its own train estimates, which it does through a form of bagging * (CLARIFY WITH JAMES) * 4. Threaded. The HC2.0 version has limited threading: if threaded, each classifier is built in its own thread. * If threaded and contracted, the assumption is the contract time is available for each thread, so that the overall * build time will be approximately the contract time. * * */ public static void contracting() throws Exception { //Example with a 1 hour sequential contract, each classifier gets approximately 15 mins Instances train = DatasetLoading.loadData("src/main/java/experiments/data/tsc/ArrowHead/ArrowHead_TRAIN"); HIVE_COTE hc2 = new HIVE_COTE(); System.out.println("HIVE COTE 2.0 is contractable"); System.out.println("Contract time can be set via the use Contractable methods"); //Set by minute, hour and day hc2.setMinuteLimit(1); hc2.setHourLimit(2); hc2.setDayLimit(3); //Set single unit limits hc2.setOneMinuteLimit(); hc2.setOneHourLimit(); hc2.setOneDayLimit(); //Set by specifying TimeUnit hc2.setTrainTimeLimit(42, TimeUnit.MINUTES); hc2.setTrainTimeLimit(10,TimeUnit.SECONDS); System.out.println("The first example is a 1 hour sequential contract"); System.out.println("Each classifier gets approximately 15 minutes train time"); hc2.setHourLimit(1); hc2.setDebug(true); long time = System.nanoTime(); hc2.buildClassifier(train); time = System.nanoTime() - time; System.out.println("\t\t Time elapsed = "+time/1000000000+" seconds"); //Example with a 1 hour threaded contract, each classifier gets approximately 1 hour. HIVE_COTE hc2Threaded = new HIVE_COTE(); System.out.println("HIVE COTE 2.0 can be threaded"); System.out.println("The amount of threads can be specified"); // set by number // hc2.enableMultiThreading(2); //with no argument it will allocate the number of available processors minus 1 hc2Threaded.enableMultiThreading(); System.out.println("The second example is a 1 hour threaded contract"); System.out.println("Each classifier gets approximately 1 hour train time"); hc2Threaded.setHourLimit(1); hc2Threaded.setDebug(true); time = System.nanoTime(); hc2Threaded.buildClassifier(train); time = System.nanoTime() - time; System.out.println("\t\t Time elapsed = "+time/1000000000+" seconds"); } public static void main(String[] args) throws Exception { HIVE_COTE hc2 = new HIVE_COTE(); System.out.println("HIVE-COTE Classifier class location: " + hc2.getClass().getName()); simpleBuild(); //experimentClassBuild(); //fromComponentBuild(); //configuration(); //contracting(); } }
13,047
49.378378
153
java
tsml-java
tsml-java-master/src/main/java/examples/ThoroughEvaluation.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package examples; import evaluation.MultipleEstimatorEvaluation; import evaluation.storage.EstimatorResultsCollection; import experiments.ExperimentalArguments; import experiments.ClassifierExperiments; import java.util.Arrays; /** * Examples on how to handle collections of results and use the MultipleEstimatorEvaluation pipeline * * @author James Large (james.large@uea.ac.uk) */ public class ThoroughEvaluation { public static void main(String[] args) throws Exception { // First of all, let's generate some results. You'll already have these if you ran Ex04_ThoroughExperiments, // but we'll do it here if not. // NOTE: Again, if you want to run this file, you'll need to define an // acceptable location to write some small files as examples: String resultsPath = "C:/Temp/ueatscExamples/"; String[] classifiers = { "SVML", "ED", "C45" }; String[] datasets = { "ItalyPowerDemand", "Beef" }; int numFolds = 3; ExperimentalArguments expThreaded = new ExperimentalArguments(); expThreaded.dataReadLocation = "src/main/java/experiments/data/tsc/"; expThreaded.resultsWriteLocation = resultsPath; ClassifierExperiments.setupAndRunMultipleExperimentsThreaded(expThreaded, classifiers, null, datasets, 0, numFolds); // We have a lot of various tools for handling results that have built up over time // and are continuing to be developed. These results analysis tools in particular // are firstly built with our own research output in mind, public usabiltiy second. // The apis and functionality are updated over time. // Let's load back in all the results files we made: EstimatorResultsCollection crc = new EstimatorResultsCollection(); crc.addEstimators(classifiers, resultsPath); crc.setDatasets(datasets); crc.setFolds(numFolds); crc.setSplit_Test(); crc.load(); System.out.println(crc); // We now basically have a brutally simple primitive array of results, organised as // [split][classifier][dataset][fold] // Functionality to interact with these is in its infancy, but there are two // main operations: SLICE and RETRIEVE // slice...() will give you a sub-collection of all results of that particular // split/classifier/dataset/fold EstimatorResultsCollection subCrc = crc.sliceDataset("ItalyPowerDemand"); subCrc = subCrc.sliceEstimator("ED"); System.out.println(subCrc); // retrieve...() will get some piece of information, e.g. an eval metric, // from each ClassifierResults object in the collection and return it to // you in a parallel array format double[][][][] subCrcAccs = subCrc.retrieveAccuracies(); // We know there are only the three folds of ItalyPowerDemand in here, let's get those double[] accs = subCrcAccs[0][0][0]; System.out.println("ED test accs on ItalyPowerDemand: " + Arrays.toString(accs)); // The MultipleEstimatorEvaluation (MEE) pipeline is a bit of a beast, and is itself // only a front end-api for EstimatorResultsAnalysis, which is an absolute monster. // These especialy will get updated over time when desire/need/motivation for // software engineering is found // Broadly, give it results (we plan to update MEE to take results via a EstimatorResultsCollection, // but at present you load results in a very similar way), give it evaluation metrics // to compare on and settings for things like diagram creation etc., call runComparison() // and let it split out a LOT of csv and xls files, and matlab figs + pdf files if diagram // creation is turned on // See MEE.main and examples for more indepth stuff and different options // Tycally we'd write somewhere else, but for this example we'll write into the resultsPath again. MultipleEstimatorEvaluation mee = new MultipleEstimatorEvaluation(resultsPath, "Ex05_ThoroughEvaluation", numFolds); mee.setTestResultsOnly(true); // We didnt also produce e.g. train data estimates mee.setBuildMatlabDiagrams(false); // turning this off for example, see *1 below mee.setCleanResults(true); // deletes the individual predictions once per-object evals are found (acc in this case) to save memory mee.setDebugPrinting(true); mee.setUseAccuracyOnly(); // using accuracy only to reduce number of files produced in this example mee.setDatasets(datasets); //general rule of thumb: set/add/read the classifiers as the last thing before running mee.readInEstimators(classifiers, resultsPath); mee.runComparison(); // A whole bunch of files should now have been spat out. Have a poke around them. // There's little documentation on exactly what each output represents, but most // should be clear from their file names and locations // Main file of interest really is the ...ResultsSheet.xls on the top level, which is // a summary of the rest of it. // *1. I have MATLAB 2016b. There's nothing particularly bespoke about the things we're doing in // these scripts so all newer versions of MATLAB should run them, and maybe some older. // I don't have a list of versions it runs on, unfortunately. // If you'd like to reimplement these diagrams in python, you're more than welcome to. // Note that despite turning figure creation off, directories and supporting files // for the creation of them were still made, to more easily be able to make the figures // afterwards. } }
6,975
45.198675
149
java
tsml-java
tsml-java-master/src/main/java/examples/ThoroughExperiments.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package examples; import experiments.ClassifierExperiments; import experiments.ExperimentalArguments; /** * Examples showing how to use the ClassifierExperiments class * * @author James Large (james.large@uea.ac.uk) */ public class ThoroughExperiments { public static void main(String[] args) throws Exception { // NOTE: If you want to run this file, you'll need to define an // acceptable location to write some small files as examples: String resultsPath = "C:/Temp/ueatscExamples/"; // We've seen how to load data, construct a classifer, and evaluate it in our own code // Our main use case however is running distributed experiments of many classifiers // over many datasets and resamples of each dataset in order to compare them. // The experiments class handles this main use case. It will take a classifier, dataset, // resample id, and read/write locations at minimum, perform the evaluation, and write // the results in the ClassifierResults format. ///////// Running a job from command line: String[] exampleMinimalArgs = { "--dataPath=src/main/java/experiments/data/tsc/", // where to read data from "--resultsPath="+resultsPath, // where to write results to "--classifierName=RandF", // loaded from ClassifierLists.setClassifier, see Ex02_Classifiers "--datasetName=ItalyPowerDemand", // loaded using sampleDataset, see Ex01_DataHandling "--fold=1", // used as the seed. Because of our cluster, this is one-indexed on input, but immediately decremented to be zero-indexed // above are the required args, all others are optional // for this run, we'll also forceEvaluation since, the experiment // by default will abort if the result file already exists "--force=true" }; ClassifierExperiments.main(exampleMinimalArgs); // or actually from command line e.g.: // java -jar ueatsc.jar -dp=src/main/java/experiments/data/tsc/ ... ///////// Running a job from code: // When running locally from code, it may be easier to just set up the // ExperimentalArguments object yourself ExperimentalArguments exp = new ExperimentalArguments(); exp.dataReadLocation = "src/main/java/experiments/data/tsc/"; exp.resultsWriteLocation = resultsPath; exp.estimatorName = "RandF"; exp.datasetName = "ItalyPowerDemand"; exp.foldId = 0; // note that since we're now setting the fold directly, we can resume zero-indexing // here, we wont force the evaluation. see the difference ClassifierExperiments.setupAndRunExperiment(exp); // Running many jobs from code: // Here, we'll set up to run the jobs threaded. You can also easily imagine // just setting up a loop over each classifier, dataset, fold and calling setupAndRunExperiment above String[] classifiers = { "SVML", "ED", "C45" }; // with entries in ClassifierLists.setClassifier, see Ex02_Classifiers String[] datasets = { "ItalyPowerDemand", "Beef" }; // both available at the dataReadLocation int numFolds = 3; ExperimentalArguments expThreaded = new ExperimentalArguments(); expThreaded.dataReadLocation = "src/main/java/experiments/data/tsc/"; // set the common data read location expThreaded.resultsWriteLocation = resultsPath; // set the common results write location // set any other common settings you want here, e.g. force // classifier, dataset, fold shall be assigned internally across threads // will use one thread per core by default ClassifierExperiments.setupAndRunMultipleExperimentsThreaded(expThreaded, classifiers, null, datasets, 0, numFolds); } }
4,970
42.99115
145
java
tsml-java
tsml-java-master/src/main/java/experiments/BasicBuildTests.java
/* Class to do basic build tests for all classifiers */ package experiments; import experiments.data.DatasetLoading; import tsml.transformers.Transformer; import utilities.ClassifierTools; import weka.classifiers.Classifier; import weka.core.Instances; import java.io.IOException; /** * Does basic sanity check builds for all listed classifiers and transformers. Does not guarantee correctness, * just checks they all build and produce output * * @author ajb */ public class BasicBuildTests { public static void buildAllClassifiers(String[] problems, String[] classifiers, String path) throws IOException { for(String str:problems){ System.out.println("Building all for problem "+str); Instances train = DatasetLoading.loadData(path+str+"\\"+str+"_TRAIN.arff"); Instances test = DatasetLoading.loadData(path+str+"\\"+str+"_TEST.arff"); for(String cls:classifiers){ System.out.print("\t Building "+cls+" .... "); Classifier c= ClassifierLists.setClassifierClassic(cls,0); try{ c.buildClassifier(train); System.out.print("Built successfully. Accuracy = "); double a=ClassifierTools.accuracy(test, c); System.out.println(a); }catch(Exception e){ System.out.println("Classifier failed to build with exception "+e); // e.printStackTrace(); } } } } public static void buildAllTransforms(String[] problems, String[] transforms, String path) throws IOException { for(String str:problems){ System.out.println("Transforming all all for problem "+str); Instances train = DatasetLoading.loadData(path+str+"\\"+str+"_TRAIN.arff"); Instances test = DatasetLoading.loadData(path+str+"\\"+str+"_TEST.arff"); for(String trans:transforms){ System.out.print("\t Building "+trans+" .... "); Transformer f = TransformLists.setClassicTransform(trans,0); try{ Instances trainTrans=f.transform(train); System.out.print("\tTrain transformed successfully. Prior to Trans length = "+(train.numAttributes()-1)); Instances testTrans=f.transform(test); System.out.println("\t\t Test transformed successfully. Length = "+(testTrans.numAttributes()-1)); }catch(Exception e){ System.out.println("Transform failed to build with exception "+e); e.printStackTrace(); System.exit(0); } } } } public static void main(String[] args) throws IOException { System.out.println("Testing all SimpleBatch filters do not crash"); String dataPath="src\\main\\java\\experiments\\data\\tsc\\"; String[] problems={"ItalyPowerDemand","Chinatown","Beef"}; String[] transforms=TransformLists.allFilters; buildAllTransforms(problems,transforms,dataPath); System.out.println("Testing core functionality of all TSC classifiers"); String[] classifiers=ClassifierLists.allUnivariate; buildAllClassifiers(problems,classifiers,dataPath); } }
3,358
40.9875
125
java
tsml-java
tsml-java-master/src/main/java/experiments/BasicReproductionTests.java
/* * Copyright (C) 2019 xmw13bzu * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package experiments; import evaluation.storage.ClassifierResults; import java.io.File; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.text.SimpleDateFormat; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; import org.junit.Test; import utilities.ClassifierTools; import utilities.FileHandlingTools; import weka.classifiers.Classifier; import weka.core.Randomizable; import machine_learning.classifiers.ensembles.CAWPE; /** * * Tests to compare test accuracies for important classifier on a quick italy power * demand run to saved expected results, and to recreate results/analysis for a cawpe paper section. * * Just confirms that old results are still reproducible * * @author James Large (james.large@uea.ac.uk) */ public class BasicReproductionTests { public static final int defaultSeed = 0; public static boolean failTestsOnTimingsDifference = false; public static double timingEqualityThreshold = 1.2; public static String reproductionDirectory = "src/main/java/experiments/reproductions/classifiers/"; static { new File(reproductionDirectory).mkdirs(); } private static final String tsClassifiers = "tsml.classifiers."; private static final String extraClassifiers = "machine_learning.classifiers."; public static final String[] classifierPaths = { tsClassifiers + "dictionary_based.BagOfPatternsClassifier", tsClassifiers + "dictionary_based.SAXVSM", tsClassifiers + "dictionary_based.WEASEL", tsClassifiers + "dictionary_based.cBOSS", tsClassifiers + "dictionary_based.TDE", tsClassifiers + "distance_based.DTWCV", tsClassifiers + "distance_based.proximity.ProximityForest", tsClassifiers + "distance_based.ProximityForestWrapper", tsClassifiers + "distance_based.SlowDTW_1NN", // tsClassifiers + "hybrids.HIVE_COTE", //assumed to cover its consituents tsClassifiers + "interval_based.LPS", tsClassifiers + "interval_based.TSF", tsClassifiers + "interval_based.RISE", tsClassifiers + "interval_based.STSF", tsClassifiers + "interval_based.CIF", tsClassifiers + "interval_based.DrCIF", tsClassifiers + "shapelet_based.FastShapelets", tsClassifiers + "shapelet_based.LearnShapelets", // tsClassifiers + "shapelet_based.ROCKETClassifier", //requires env variable, transform covered extraClassifiers + "PLSNominalClassifier", extraClassifiers + "kNN", extraClassifiers + "ensembles.CAWPE", extraClassifiers + "ensembles.stackers.SMLR", }; //////////////////////// // ClassifierResults files will store the prob distributions to 6 decimal places // by default, while recreted results in memory will have arbitrary precision // Compare doubles based of the probability dists with these funcs // // Affects: prediction dists themselves, NLL // Ignores: ACC, AUROC, BALACC, these should be the same either way, despite being doubles. // possible mega edge case with AUROC where the higher precision resolves a tie differently // in the ordering of predictions. If this is a case, need to just blanket round to 6 places // before finding the values of stats public static final double eps = 10e-6; public static boolean doubleEqual(double v1, double v2) { return Math.abs(v1 - v2) < eps; } public static boolean doubleArrayEquals(double[] a1, double[] a2) { for (int i = 0; i < a1.length; i++) if (!doubleEqual(a1[i], a2[i])) return false; return true; } public static class ExpectedClassifierResults { public String simpleClassifierName; //simple unconditioned class name public String fullClassifierName; //includes package paths for construction public Classifier classifier = null; public ClassifierResults results; public String dateTime; public long time; public ExpectedClassifierResults(File resFile) throws Exception { simpleClassifierName = resFile.getName().split("\\.")[0]; //without any filetype extensions results = new ClassifierResults(resFile.getAbsolutePath()); String[] p = results.getParas().split(","); fullClassifierName = p[0].trim(); dateTime = p[1].trim(); time = Long.parseLong(p[2].trim()); } public ExpectedClassifierResults(String fullClassName) throws Exception { fullClassifierName = fullClassName; String[] t = fullClassName.split("\\."); simpleClassifierName = t[t.length-1]; } public void save(String directory) throws Exception { directory.replace("\\", "/"); if (!directory.endsWith("/")) directory+="/"; Date date = new Date(); SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); dateTime = formatter.format(date); time = System.currentTimeMillis(); results.setDescription("Generated by BasicReproductionTests at " + dateTime); //saving date in a couple formats for future-proofing results.setParas(fullClassifierName + ", " + dateTime + ", " + time); results.writeFullResultsToFile(directory + simpleClassifierName + ".csv"); } /** * todo should obviously go into classifierresults itself, and be spread around * the sub-modules of it (predictions, etc) one that class is split up * * Does not compare every tiny thing (for now), i.e. does not test for equality * of characters in the constructed file, for example. Timings are very unlikely to * be exact, meta info on the first line might change over time but that's not * the target of these tests, etc. * * Tests for equality of summary performance metrics, equality of the * last five predictions (prob distributions), and (to be tested on stability) * pseudo-equality on the prediction times of the last 5 predictions and the * build times. * * Pseudo equality is defined as being within some proportional threshold of the * expected value (timingEqualityThreshold, default 2). Because we are using the * last five predictions, the JVM should have sorted out it's caching issues * and allocated space with can make the timings of the first few instances of any * particular operation far larger than normal */ public boolean equal(ClassifierResults newResults) throws Exception { newResults = ClassifierResults.util_roundAllPredictionDistsToDefaultPlaces(newResults); results.findAllStatsOnce(); newResults.findAllStatsOnce(); boolean res = true; ///////////////// SUMMARY PERFORMANCE METRICS if (results.getAcc() != newResults.getAcc()) { System.out.println("ACCURACY DIFFERS, exp="+results.getAcc()+" new="+newResults.getAcc()); res = false; } if (results.balancedAcc!= newResults.balancedAcc) { System.out.println("BALANCED ACCURACY DIFFERS, exp="+results.balancedAcc+" new="+newResults.balancedAcc); res = false; } if (results.meanAUROC != newResults.meanAUROC) { System.out.println("AUROC DIFFERS, exp="+results.meanAUROC+" new="+newResults.meanAUROC); res = false; } if (!doubleEqual(results.nll, newResults.nll)) { //see comment at doubleEqual System.out.println("NLL DIFFERS, exp="+results.nll+" new="+newResults.nll); res = false; } ///////////////// BUILD TIMES //assuming sub-millisecond timings are unreliable anyway long t1 = TimeUnit.MILLISECONDS.convert(results.getBuildTimeInNanos(), TimeUnit.NANOSECONDS); long t2 = TimeUnit.MILLISECONDS.convert(newResults.getBuildTimeInNanos(), TimeUnit.NANOSECONDS); if (t1*timingEqualityThreshold < t2 || t1/timingEqualityThreshold > t2) { if (failTestsOnTimingsDifference) { System.out.println("BUILD TIME OUTSIDE THRESHOLD, exp="+t1+" new="+t2); res = false; } } ///////////////// FINAL FIVE PREDICTIONS for (int i = 0; i < 5; i++) { double[] expDist = results.getProbabilityDistribution(results.numInstances()-1-i); double[] newDist = newResults.getProbabilityDistribution(newResults.numInstances()-1-i); if (!doubleArrayEquals(expDist, newDist)) { System.out.println("PREDICTION DIST 'NUMINSTS-"+i+"' DIFFERS, exp="+Arrays.toString(expDist)+" new="+Arrays.toString(newDist)); res = false; } long tt1 = results.getPredictionTimeInNanos(results.numInstances()-1-i); long tt2 = newResults.getPredictionTimeInNanos(newResults.numInstances()-1-i); t1 = TimeUnit.MILLISECONDS.convert(tt1, TimeUnit.NANOSECONDS); t2 = TimeUnit.MILLISECONDS.convert(tt2, TimeUnit.NANOSECONDS); if (t1*timingEqualityThreshold < t2 || t1/timingEqualityThreshold > t2) { if (failTestsOnTimingsDifference) { System.out.println("PREDICTION TIME NUMINSTS-"+i+" OUTSIDE THRESHOLD, exp="+t1+" new="+t2); res = false; } } } return res; } } public static Classifier constructClassifier(String fullClassifierName) { Classifier inst = null; try { Class c = Class.forName(fullClassifierName); inst = (Classifier) c.newInstance(); if (inst instanceof Randomizable) ((Randomizable)inst).setSeed(defaultSeed); else { Method[] ms = c.getMethods(); for (Method m : ms) { if (m.getName().equals("setSeed") || m.getName().equals("setRandSeed")) { m.invoke(inst, defaultSeed); break; } } } } catch (ClassNotFoundException ex) { Logger.getLogger(BasicReproductionTests.class.getName()).log(Level.SEVERE, null, ex); } catch (InstantiationException ex) { Logger.getLogger(BasicReproductionTests.class.getName()).log(Level.SEVERE, null, ex); } catch (IllegalAccessException ex) { Logger.getLogger(BasicReproductionTests.class.getName()).log(Level.SEVERE, null, ex); } catch (IllegalArgumentException ex) { Logger.getLogger(BasicReproductionTests.class.getName()).log(Level.SEVERE, null, ex); } catch (InvocationTargetException ex) { Logger.getLogger(BasicReproductionTests.class.getName()).log(Level.SEVERE, null, ex); } return inst; } public static void generateMissingExpectedResults() throws Exception { List<String> failedClassifiers = new ArrayList<>(); List<String> existingFiles = Arrays.asList((new File(reproductionDirectory)).list()); for (String classifierPath : classifierPaths) { String[] t = classifierPath.split("\\."); String simpleClassifierName = t[t.length-1]; boolean exists = false; for (String existingFile : existingFiles) { if (simpleClassifierName.equals(existingFile.split("\\.")[0])) { exists = true; break; } } if (exists) continue; else { System.out.println("Attempting to generate missing result for " + simpleClassifierName); } if (!generateExpectedResult(classifierPath)) failedClassifiers.add(simpleClassifierName); } System.out.println("\n\n\n"); System.out.println("Failing classifiers = " + failedClassifiers); } public static void generateAllExpectedResults() throws Exception { List<String> failedClassifiers = new ArrayList<>(); for (String classifierPath : classifierPaths) { String[] t = classifierPath.split("\\."); String simpleClassifierName = t[t.length-1]; if (!generateExpectedResult(classifierPath)) failedClassifiers.add(simpleClassifierName); } System.out.println("\n\n\n"); System.out.println("Failing classifiers = " + failedClassifiers); } public static boolean generateExpectedResult(String classifierPath) throws Exception { ExpectedClassifierResults expres = new ExpectedClassifierResults(classifierPath); boolean worked = true; try { expres.classifier = constructClassifier(classifierPath); } catch (Exception e) { System.err.println(expres.simpleClassifierName + " construction FAILED"); System.err.println(e); e.printStackTrace(); worked = false; } try { expres.results = ClassifierTools.testUtils_evalOnIPD(expres.classifier); } catch (Exception e) { System.err.println(expres.simpleClassifierName + " evaluation on ItalyPowerDemand FAILED"); System.err.println(e); e.printStackTrace(); worked = false; } if (worked) { expres.save(reproductionDirectory); System.err.println(expres.simpleClassifierName + " evaluated and saved SUCCESFULLY, IPD acc = " + expres.results.getAcc()); } return worked; } public static boolean confirmAllExpectedResultReproductions() throws Exception { System.out.println("--confirmAllExpectedResultReproductions()"); File[] expectedResults = FileHandlingTools.listFiles(reproductionDirectory); if (expectedResults == null) throw new Exception("No expected results saved to compare to, dir="+reproductionDirectory); List<String> failedClassifiers = new ArrayList<>(); for (File expectedResultFile : expectedResults) { ExpectedClassifierResults expres = new ExpectedClassifierResults(expectedResultFile); Classifier c = constructClassifier(expres.fullClassifierName); ClassifierResults newres = ClassifierTools.testUtils_evalOnIPD(c); if (expres.equal(newres)) System.out.println("\t" + expres.simpleClassifierName + " all good, parity with results created " + expres.dateTime); else { System.out.println("\t" + expres.simpleClassifierName + " was NOT recreated successfully! no parity with results created " + expres.dateTime); failedClassifiers.add(expres.simpleClassifierName); } } if (failedClassifiers.size() > 0) { System.out.println("\n\n\n"); System.out.println("Failing classifiers = " + failedClassifiers); return false; } return true; } /** * Test of buildCAWPEPaper_AllResultsForFigure3 method, of class CAWPE. * * Larger scale test (~19 secs locally), one that @jamesl used often before formulating into unit test * * Implicitly provides tests for the * -cross validation evaluator * -multiple classifier evaluation pipeline * -basic experiments setup with soem built-in weka classifiers * -slightly more bespoke ensemble experiments setup * -datasets resampling */ public static boolean testBuildCAWPEPaper_AllResultsForFigure3() throws Exception { System.out.println("--buildCAWPEPaper_AllResultsForFigure3()"); ClassifierExperiments.beQuiet = true; CAWPE.buildCAWPEPaper_AllResultsForFigure3(""); File f = new File("Analysis/UCICAWPEvsHeteroEnsembles_BasicClassifiers/UCICAWPEvsHeteroEnsembles_BasicClassifiers_BIGglobalSummary.csv"); // assertTrue(f.exists()); //read in summary for later comparison Scanner scan = new Scanner(f); StringBuilder sb = new StringBuilder(); while (scan.hasNext()) { String t = scan.nextLine(); if (t.contains("ExtraTimeForEst")) //this is now the first timing metric, these can't be reliably reproduced ofc so ignore break; sb.append(t).append("\n"); } scan.close(); //confirm folder structure all there // assertTrue(new File("Analysis/UCICAWPEvsHeteroEnsembles_BasicClassifiers/Timings/TRAIN/TRAINTrainTimes_SUMMARY.csv").exists()); // assertTrue(new File("Analysis/UCICAWPEvsHeteroEnsembles_BasicClassifiers/Timings/TEST/TESTAvgPredTimes_SUMMARY.csv").exists()); // for (String set : new String[] { EstimatorResultsAnalysis.trainLabel, EstimatorResultsAnalysis.testLabel, EstimatorResultsAnalysis.trainTestDiffLabel }) { // for (PerformanceMetric metric : PerformanceMetric.getDefaultStatistics()) { // String name = metric.name; // assertTrue(new File("Analysis/UCICAWPEvsHeteroEnsembles_BasicClassifiers/"+name+"/"+set+"/"+set+name+"_SUMMARY.csv").exists()); // } // } //clean up the generated files FileHandlingTools.recursiveDelete("Analysis/"); FileHandlingTools.recursiveDelete("Results/"); // assertTrue(!new File("Analysis").exists()); // assertTrue(!new File("Results").exists()); //confirm summary of results are the same (implying individual base classifier and ensemble results for folds are correct) //ignores timings, as no realistic way to make those equivalent /* String[] dataHeaders = { "UCI", }; String[] dataPaths = { "src/main/java/experiments/data/uci/" }; String[][] datasets = { { "hayes-roth", "iris", "teaching" } }; String writePathResults = writePathBase + "Results/"; String writePathAnalysis = writePathBase + "Analysis/"; int numFolds = 3; */ /* this was the expected output prior to setClassifier updates 2019_10_16, which revealed that mlp was not beingc correctly seeded (defaulted to 0). String expectedBigGlobalSummary = "ACC:TESTACC,CAWPE,NBC,WMV,RC,MV,ES,SMLR,SMM5,PB,SMLRE\n" + "AvgTESTACCOverDsets:,0.7285445094217025,0.7318294707963324,0.7172998339470075,0.7145563497220419,0.6885834957764781,0.7117738791423003,0.7191336365605373,0.7116821890116237,0.6928972637354703,0.7012410656270306\n" + "AvgTESTACCRankOverDsets:,3.1666666666666665,3.1666666666666665,5.0,5.333333333333333,5.833333333333333,6.0,6.333333333333333,6.333333333333333,6.833333333333333,7.0\n" + "StddevOfTESTACCOverDsets:,0.22008900216444294,0.20946693560070861,0.22754669947650313,0.2282133513717843,0.2383855215392628,0.2202751366996902,0.22341886857881665,0.24839719907549082,0.25329683821988797,0.2409493775423368\n" + "AvgOfStddevsOfTESTACCOverDsetFolds:,0.03279891126799218,0.024754101494977986,0.04849787496303352,0.046816397436865304,0.04897749514576629,0.047987230632012545,0.024195798008593224,0.03431576869313286,0.0370264191912524,0.03595685865637037\n" + "StddevsOfTESTACCRanksOverDsets:,1.0408329997330663,2.0207259421636903,1.3228756555322954,1.755942292142123,3.6170890690351176,2.6457513110645907,3.0550504633038935,3.7859388972001824,2.9297326385411573,5.196152422706632\n" + "\n" + "BALACC:TESTBALACC,CAWPE,NBC,WMV,RC,MV,ES,SMLR,SMM5,PB,SMLRE\n" + "AvgTESTBALACCOverDsets:,0.7322852456185789,0.7398326210826212,0.7129216308382974,0.7106769619269618,0.6683040878874212,0.7025049641716308,0.7285234826901493,0.7235676206509541,0.6870069282569283,0.712745834412501\n" + "AvgTESTBALACCRankOverDsets:,3.1666666666666665,3.1666666666666665,5.0,5.333333333333333,5.833333333333333,6.333333333333333,6.333333333333333,6.333333333333333,6.5,7.0\n" + "StddevOfTESTBALACCOverDsets:,0.21979813009604562,0.2075778061006703,0.22885704565704787,0.22951237016096734,0.25279977059148956,0.2238033387573712,0.22506552355121331,0.25286563032293463,0.25503069275904094,0.2448594862094281\n" + "AvgOfStddevsOfTESTBALACCOverDsetFolds:,0.03455598374923909,0.024060473815657058,0.05177301734816558,0.050439169878735056,0.05127020812811125,0.05645152922577826,0.01776687586864427,0.03134730161003507,0.03765792057591982,0.030328734418572856\n" + "StddevsOfTESTBALACCRanksOverDsets:,1.0408329997330663,2.0207259421636903,1.3228756555322954,1.755942292142123,3.6170890690351176,3.055050463303893,3.0550504633038935,3.7859388972001824,2.598076211353316,5.196152422706632\n" + "\n" + "AUROC:TESTAUROC,CAWPE,SMM5,PB,SMLR,WMV,NBC,RC,MV,ES,SMLRE\n" + "AvgTESTAUROCOverDsets:,0.8500076854235693,0.8156226498687755,0.8214985405823803,0.8176827886374668,0.8228581929729067,0.819681196812097,0.8175782745531306,0.8108473566884643,0.8099753212757199,0.797600009436251\n" + "AvgTESTAUROCRankOverDsets:,1.3333333333333333,4.666666666666667,4.666666666666667,5.0,5.333333333333333,5.333333333333333,6.0,7.0,7.333333333333333,8.333333333333334\n" + "StddevOfTESTAUROCOverDsets:,0.152560502120108,0.18855774768880665,0.19229971562576004,0.18018940087531557,0.14586931174236345,0.16509673380553797,0.1491787090622888,0.15124348237259566,0.1528537298969881,0.1770574668705955\n" + "AvgOfStddevsOfTESTAUROCOverDsetFolds:,0.030274719407621924,0.04775468866122167,0.02190443701428843,0.03848402717616285,0.02989273753439181,0.02752977352082156,0.030072602448817185,0.029162226381979423,0.03277007476456647,0.04972644635842636\n" + "StddevsOfTESTAUROCRanksOverDsets:,0.5773502691896257,3.055050463303893,3.7859388972001824,2.0,3.7859388972001824,1.1547005383792517,2.0,3.605551275463989,2.0816659994661326,2.8867513459481287\n" + "\n" + "NLL:TESTNLL,CAWPE,SMM5,WMV,SMLR,NBC,MV,RC,SMLRE,PB,ES\n" + "AvgTESTNLLOverDsets:,0.8740473258902913,1.1672216448053325,1.2588112791030788,1.1748228744879337,1.2594131707464087,1.293933192179383,1.2679074849500689,1.2864376793720262,1.592847724655895,1.3038288622797296\n" + "AvgTESTNLLRankOverDsets:,1.0,4.666666666666667,5.333333333333333,5.666666666666667,5.666666666666667,6.0,6.0,6.666666666666667,6.666666666666667,7.333333333333333\n" + "StddevOfTESTNLLOverDsets:,0.691694787015834,0.9796905490544456,1.0223841041530592,0.9870722431231613,0.7746460079084273,1.0258722030842515,1.0360622981608925,1.0024121206377643,1.5111772632891516,1.0222936474880255\n" + "AvgOfStddevsOfTESTNLLOverDsetFolds:,0.13100093711191763,0.22899161342369623,0.1798812801999756,0.1874603882410609,0.20352196751423426,0.18541729066158644,0.16955322281243648,0.16941142924146801,0.2786626997782997,0.2000278240505596\n" + "StddevsOfTESTNLLRanksOverDsets:,0.0,2.0816659994661326,0.5773502691896258,2.516611478423583,4.041451884327381,3.4641016151377544,2.6457513110645907,4.041451884327381,4.163331998932266,1.5275252316519465"; */ String expectedBigGlobalSummary = "ACC:TESTACC,WMV,NBC,CAWPE,RC,MV,SMM5,ES,SMLR,SMLRE,PB\n" + "AvgTESTACCOverDsets:,0.7274629990614395,0.7153880586239261,0.7314684860298896,0.7217955382282869,0.6974651649700383,0.719172622915313,0.7124056024835751,0.7031225182297307,0.7075792361562342,0.6928972637354703\n" + "AvgTESTACCRankOverDsets:,4.0,4.333333333333333,4.666666666666667,4.833333333333333,5.166666666666667,5.333333333333333,5.5,7.0,7.0,7.166666666666667\n" + "StddevOfTESTACCOverDsets:,0.21965069702486056,0.22075061233906448,0.21599168566182694,0.22433813402114516,0.23250546652710627,0.23224263867524703,0.21152524227063668,0.24148559934127398,0.22786315741913124,0.25329683821988797\n" + "AvgOfStddevsOfTESTACCOverDsetFolds:,0.05097187092275754,0.03297117847198802,0.03642567280598806,0.043526350912095536,0.03818281787490734,0.03357343280121439,0.03932505708576283,0.03895688145388599,0.02497874944688039,0.0370264191912524\n" + "StddevsOfTESTACCRanksOverDsets:,1.3228756555322954,3.6170890690351176,0.7637626158259734,2.0207259421636903,4.193248541803041,3.7859388972001824,3.968626966596886,4.358898943540674,3.605551275463989,1.755942292142123\n" + "\n" + "BALACC:TESTBALACC,NBC,WMV,CAWPE,RC,MV,SMM5,ES,SMLRE,SMLR,PB\n" + "AvgTESTBALACCOverDsets:,0.7241418458085125,0.7228188940688942,0.7351912285245619,0.7176682422515755,0.6773329448329449,0.7306936890270225,0.7013012604679271,0.7188453984287317,0.7126520547353881,0.6870069282569283\n" + "AvgTESTBALACCRankOverDsets:,3.8333333333333335,4.0,4.666666666666667,4.833333333333333,5.166666666666667,5.333333333333333,5.666666666666667,6.666666666666667,7.333333333333333,7.5\n" + "StddevOfTESTBALACCOverDsets:,0.2167439508778403,0.2212685239000405,0.21563344429828277,0.22580992139595155,0.24870379259009287,0.23484868038157847,0.21817072502418683,0.22983957642097774,0.24379996657723263,0.25503069275904094\n" + "AvgOfStddevsOfTESTBALACCOverDsetFolds:,0.031271297432199995,0.05477493885465065,0.03814232820465679,0.04785776036381459,0.04280967408056264,0.0287796820308799,0.048019837791095754,0.029627808499106525,0.03138832405014855,0.03765792057591982\n" + "StddevsOfTESTBALACCRanksOverDsets:,2.753785273643051,1.3228756555322954,0.7637626158259734,2.0207259421636903,4.193248541803041,3.7859388972001824,4.163331998932266,4.163331998932266,3.7859388972001824,1.8027756377319946\n" + "\n" + "AUROC:TESTAUROC,CAWPE,SMM5,PB,WMV,NBC,RC,SMLR,ES,MV,SMLRE\n" + "AvgTESTAUROCOverDsets:,0.849770319016462,0.8229810950769959,0.8214985405823803,0.8213685981068424,0.8149374537219297,0.8173813462624793,0.8114881567997374,0.8111879235089292,0.810024282569528,0.7902312289775987\n" + "AvgTESTAUROCRankOverDsets:,2.3333333333333335,4.0,4.333333333333333,5.333333333333333,5.333333333333333,6.0,6.333333333333333,6.666666666666667,6.666666666666667,8.0\n" + "StddevOfTESTAUROCOverDsets:,0.15503685228831474,0.19001901503910637,0.19229971562576004,0.14654126643776394,0.1658309636107022,0.14875742473472578,0.18600285896473676,0.1510921761373448,0.1514632434016978,0.1726614810233521\n" + "AvgOfStddevsOfTESTAUROCOverDsetFolds:,0.02537300115566142,0.02220030849074056,0.02190443701428843,0.030191445372247588,0.02163090653520087,0.03171927947721526,0.04126691345011047,0.03380834704366238,0.027866043551935855,0.03213563345632965\n" + "StddevsOfTESTAUROCRanksOverDsets:,2.3094010767585034,2.6457513110645907,3.214550253664318,3.7859388972001824,1.1547005383792517,2.0,3.214550253664318,3.214550253664318,4.163331998932266,2.6457513110645907\n" + "\n" + "NLL:TESTNLL,CAWPE,RC,WMV,SMM5,MV,NBC,SMLR,ES,SMLRE,PB\n" + "AvgTESTNLLOverDsets:,0.8844348810641535,1.2620831740276812,1.2542313397786657,1.1878670618373748,1.2883865067790932,1.228698888158254,1.1797077162509737,1.2904134315630473,1.3122877664502341,1.592847724655895\n" + "AvgTESTNLLRankOverDsets:,1.3333333333333333,4.666666666666667,5.0,5.333333333333333,5.333333333333333,5.666666666666667,6.0,6.666666666666667,7.333333333333333,7.666666666666667\n" + "StddevOfTESTNLLOverDsets:,0.6905190819957883,1.0277175319674012,1.014957453111849,1.1166436736959424,1.0192580874883377,0.7436645726101814,0.9900691677923797,1.0091099400693595,1.0235332594700968,1.5111772632891516\n" + "AvgOfStddevsOfTESTNLLOverDsetFolds:,0.13267895502027546,0.15689429192128332,0.16443411413096812,0.1588387064257464,0.17331314838440773,0.20424033600313699,0.19044258606104228,0.1860761289798324,0.09880785325499852,0.2786626997782997\n" + "StddevsOfTESTNLLRanksOverDsets:,0.5773502691896257,2.309401076758503,1.7320508075688772,3.7859388972001824,4.041451884327381,4.041451884327381,2.6457513110645907,2.0816659994661326,2.8867513459481287,2.516611478423583"; // assertEquals(sb.toString().trim(), expectedBigGlobalSummary.trim()); boolean res = sb.toString().trim().equals(expectedBigGlobalSummary.trim()); if (!res) { System.out.println("CAWPE not recreated sucessfully, expected: "); System.out.println(expectedBigGlobalSummary.trim()); System.out.println("\n\n\n\n"); System.out.println("Made just now: "); System.out.println(sb.toString().trim()); } return res; } @Test public void test() throws Exception { main(new String[0]); } public static void main(String[] args) throws Exception { // generateAllExpectedResults(); // generateMissingExpectedResults(); boolean classifiersComplete = confirmAllExpectedResultReproductions(); boolean analysisReproduced = testBuildCAWPEPaper_AllResultsForFigure3(); if (!classifiersComplete) { System.out.println("Classifiers simple eval recreation failed!"); } if (!analysisReproduced) { System.out.println("CAWPE analysis recreation failed!"); } if (!classifiersComplete || !analysisReproduced) { System.out.println("\n\n*********************Integration tests failed"); System.exit(1); //fail } System.out.println("\n\n*********************All tests passed"); } }
31,636
56.521818
263
java
tsml-java
tsml-java-master/src/main/java/experiments/BasicTransformReproductionTests.java
/* * Copyright (C) 2019 xmw13bzu * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package experiments; import experiments.data.DatasetLoading; import fileIO.InFile; import fileIO.OutFile; import org.junit.Test; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Converter; import tsml.transformers.ROCKET; import tsml.transformers.TrainableTransformer; import tsml.transformers.Transformer; import utilities.FileHandlingTools; import weka.core.Instances; import weka.core.Randomizable; import java.io.File; import java.lang.reflect.Method; import java.text.SimpleDateFormat; import java.util.*; import java.util.logging.Level; import java.util.logging.Logger; /** * * Tests to compare test accuracies for transforms on a quick italy power * demand run to saved expected results * * hacked version of BasicReproductionTests * * @author Matthew Middlehurst */ public class BasicTransformReproductionTests { public static final int defaultSeed = 0; public static String reproductionDirectory = "src/main/java/experiments/reproductions/transforms/"; static { new File(reproductionDirectory).mkdirs(); } private static final String tsTransformers = "tsml.transformers."; public static final String[] transformerPaths = { tsTransformers + "Catch22", tsTransformers + "Differences", tsTransformers + "PowerSpectrum", tsTransformers + "ROCKET", tsTransformers + "Fast_FFT", }; public static final double eps = 10e-6; public static boolean doubleEqual(double v1, double v2) { return Math.abs(v1 - v2) < eps; } public static boolean doubleArrayEquals(double[] a1, double[] a2) { for (int i = 0; i < a1.length; i++) if (!doubleEqual(a1[i], a2[i])) return false; return true; } public static class ExpectedTransformerResults { public String transformerName; //simple unconditioned class name public String fullClassName; //includes package paths for construction public Transformer transformer = null; public double[][] results; public String dateTime; public long time; public ExpectedTransformerResults(File resFile) throws Exception { transformerName = resFile.getName().split("\\.")[0]; //without any filetype extensions InFile in = new InFile(resFile.getAbsolutePath()); int numLines = in.countLines(); String[] meta = in.readLine().split(","); fullClassName = meta[0]; dateTime = meta[1]; time = Long.parseLong(meta[2]); String[] ln1 = in.readLine().split(","); results = new double[numLines-1][ln1.length]; for (int i = 0; i < ln1.length; i++){ results[0][i] = Double.parseDouble(ln1[i]); } for (int n = 1; n < numLines-1; n++){ String[] ln = in.readLine().split(","); for (int i = 0; i < ln.length; i++){ results[n][i] = Double.parseDouble(ln[i]); } } } public ExpectedTransformerResults(String className) throws Exception { fullClassName = className; String[] t = fullClassName.split("\\."); transformerName = t[t.length-1]; } public void save(String directory) throws Exception { directory.replace("\\", "/"); if (!directory.endsWith("/")) directory+="/"; Date date = new Date(); SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); dateTime = formatter.format(date); time = System.currentTimeMillis(); OutFile of = new OutFile(directory + transformerName + ".csv"); of.writeLine(fullClassName + "," + dateTime + "," + time); for (double[] line: results){ StringBuilder s = new StringBuilder(Double.toString(line[0])); for (int i = 1; i < line.length; i++){ s.append(",").append(line[i]); } of.writeLine(s.toString()); } } public boolean equal(double[][] newResults) throws Exception { for (int n = 0; n < newResults.length; n++){ if (!doubleArrayEquals(newResults[n], results[n])){ return false; } } return true; } } public static Transformer constructTransformer(String fullTransformerName) { Transformer inst = null; try { Class c = Class.forName(fullTransformerName); inst = (Transformer) c.newInstance(); //special cases if (inst instanceof ROCKET){ ((ROCKET)inst).setNumKernels(100); } if (inst instanceof Randomizable) ((Randomizable)inst).setSeed(defaultSeed); else { Method[] ms = c.getMethods(); for (Method m : ms) { if (m.getName().equals("setSeed") || m.getName().equals("setRandSeed")) { m.invoke(inst, defaultSeed); break; } } } } catch (Exception ex) { Logger.getLogger(BasicTransformReproductionTests.class.getName()).log(Level.SEVERE, null, ex); } return inst; } public static void generateMissingExpectedResults() throws Exception { List<String> failedTransformers = new ArrayList<>(); List<String> existingFiles = Arrays.asList((new File(reproductionDirectory)).list()); for (String transformerPath : transformerPaths) { String[] t = transformerPath.split("\\."); String simpleTransformerName = t[t.length-1]; boolean exists = false; for (String existingFile : existingFiles) { if (simpleTransformerName.equals(existingFile.split("\\.")[0])) { exists = true; break; } } if (exists) continue; else { System.out.println("Attempting to generate missing result for " + simpleTransformerName); } if (!generateExpectedResult(transformerPath)) failedTransformers.add(simpleTransformerName); } System.out.println("\n\n\n"); System.out.println("Failing transformers = " + failedTransformers); } public static void generateAllExpectedResults() throws Exception { List<String> failedTransformers = new ArrayList<>(); for (String transformersPath : transformerPaths) { String[] t = transformersPath.split("\\."); String simpleTransformerName = t[t.length-1]; if (!generateExpectedResult(transformersPath)) failedTransformers.add(simpleTransformerName); } System.out.println("\n\n\n"); System.out.println("Failing transformers = " + failedTransformers); } public static boolean generateExpectedResult(String transformerPath) throws Exception { ExpectedTransformerResults expres = new ExpectedTransformerResults(transformerPath); boolean worked = true; try { expres.transformer = constructTransformer(transformerPath); } catch (Exception e) { System.err.println(expres.transformerName + " construction FAILED"); System.err.println(e); e.printStackTrace(); worked = false; } try { Instances data = DatasetLoading.sampleItalyPowerDemand(defaultSeed)[0]; Instances t; if (expres.transformer instanceof TrainableTransformer){ t = ((TrainableTransformer)expres.transformer).fitTransform(data); } else { t = expres.transformer.transform(data); } expres.results = new double[t.numInstances()][t.numAttributes()-1]; for (int n = 0; n < t.numInstances(); n++) { for (int i = 0; i < t.numAttributes() - 1; i++) { expres.results[n][i] = t.get(n).value(i); } } } catch (Exception e) { System.err.println(expres.transformerName + " evaluation on ItalyPowerDemand FAILED"); System.err.println(e); e.printStackTrace(); worked = false; } if (worked) { expres.save(reproductionDirectory); System.err.println(expres.transformerName + " evaluated and saved SUCCESFULLY"); } return worked; } public static boolean confirmAllExpectedResultReproductions() throws Exception { System.out.println("--confirmAllExpectedResultReproductions()"); File[] expectedResults = FileHandlingTools.listFiles(reproductionDirectory); if (expectedResults == null) throw new Exception("No expected results saved to compare to, dir="+reproductionDirectory); List<String> failedTransformers = new ArrayList<>(); for (File expectedResultFile : expectedResults) { ExpectedTransformerResults expres = new ExpectedTransformerResults(expectedResultFile); Transformer transformer = constructTransformer(expres.fullClassName); Instances data = DatasetLoading.sampleItalyPowerDemand(defaultSeed)[0]; Instances t; if (transformer instanceof TrainableTransformer){ t = ((TrainableTransformer)transformer).fitTransform(data); } else { t = transformer.transform(data); } double[][] results = new double[t.numInstances()][t.numAttributes()-1]; for (int n = 0; n < t.numInstances(); n++) { for (int i = 0; i < t.numAttributes() - 1; i++) { results[n][i] = t.get(n).value(i); } } Transformer transformer2 = constructTransformer(expres.fullClassName); TimeSeriesInstances data2 = Converter.fromArff(DatasetLoading.sampleItalyPowerDemand(defaultSeed)[0]); TimeSeriesInstances t2; if (transformer2 instanceof TrainableTransformer){ t2 = ((TrainableTransformer)transformer2).fitTransform(data2); } else { t2 = transformer2.transform(data2); } double[][] results2 = new double[t2.numInstances()][t2.getMaxLength()]; for (int n = 0; n < t2.numInstances(); n++) { for (int i = 0; i < t2.getMaxLength(); i++) { results2[n][i] = t2.get(n).get(0).getValue(i); } } if (expres.equal(results) && expres.equal(results2)) System.out.println("\t" + expres.transformerName + " all good, parity with results created " + expres.dateTime); else { System.out.println("\t" + expres.transformerName + " was NOT recreated successfully! no parity with results created " + expres.dateTime); failedTransformers.add(expres.transformerName); } } if (failedTransformers.size() > 0) { System.out.println("\n\n\n"); System.out.println("Failing classifiers = " + failedTransformers); return false; } return true; } @Test public void test() throws Exception { main(new String[0]); } public static void main(String[] args) throws Exception { // generateAllExpectedResults(); // generateMissingExpectedResults(); boolean transformersComplete = confirmAllExpectedResultReproductions(); if (!transformersComplete) { System.out.println("Transformers simple eval recreation failed!"); System.exit(1); } System.out.println("\n\n*********************All tests passed"); } }
12,821
35.016854
153
java
tsml-java
tsml-java-master/src/main/java/experiments/ClassifierExperiments.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import com.google.common.testing.GcFinalization; import evaluation.evaluators.CrossValidationEvaluator; import evaluation.evaluators.SingleSampleEvaluator; import evaluation.evaluators.SingleTestSetEvaluator; import evaluation.evaluators.StratifiedResamplesEvaluator; import evaluation.storage.ClassifierResults; import experiments.data.DatasetLoading; import machine_learning.classifiers.SaveEachParameter; import machine_learning.classifiers.ensembles.SaveableEnsemble; import machine_learning.classifiers.tuned.TunedRandomForest; import tsml.classifiers.*; import tsml.classifiers.early_classification.AbstractEarlyClassifier; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; import weka.core.Randomizable; import java.io.*; import java.text.SimpleDateFormat; import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.logging.Level; import java.util.logging.Logger; import static utilities.GenericTools.indexOfMax; import static utilities.InstanceTools.truncateInstance; import static utilities.InstanceTools.truncateInstances; /** * The main experimental class of the timeseriesclassification codebase. The 'main' method to run is setupAndRunExperiment(ExperimentalArguments expSettings) An execution of this will evaluate a single classifier on a single resample of a single dataset. Given an ExperimentalArguments object, which may be parsed from command line arguments or constructed in code, (and in the future, perhaps other methods such as JSON files etc), will load the classifier and dataset specified, prep the location to write results to, train the classifier - potentially generating an error estimate via cross validation on the train set as well - and then predict the cases of the test set. The primary outputs are the train and/or 'testFoldX.csv' files, in the so-called ClassifierResults format, (see the class of the same name under utilities). * * @author James Large (james.large@uea.ac.uk), Tony Bagnall (anthony.bagnall@uea.ac.uk) */ public class ClassifierExperiments { private final static Logger LOGGER = Logger.getLogger(ClassifierExperiments.class.getName()); public static boolean debug = false; private static boolean testFoldExists; private static boolean trainFoldExists; /** * If true, experiments will not print or log to stdout/err anything other that exceptions (SEVERE) */ public static boolean beQuiet = false; //A few 'should be final but leaving them not final just in case' public static settings public static int numCVFolds = 10; private static String WORKSPACE_DIR = "Workspace"; private static String PREDICTIONS_DIR = "Predictions"; /** * Parses args into an ExperimentalArguments object, then calls setupAndRunExperiment(ExperimentalArguments expSettings). * Calling with the --help argument, or calling with un-parsable parameters, will print a summary of the possible parameters. Argument key-value pairs are separated by '='. The 5 basic, always required, arguments are: Para name (short/long) | Example -dp --dataPath | --dataPath=C:/Datasets/ -rp --resultsPath | --resultsPath=C:/Results/ -cn --classifierName | --classifierName=RandF -dn --datasetName | --datasetName=ItalyPowerDemand -f --fold | --fold=1 Use --help to see all the optional parameters, and more information about each of them. If running locally, it may be easier to build the ExperimentalArguments object yourself and call setupAndRunExperiment(...) directly, instead of building the String[] args and calling main like a lot of legacy code does. */ public static void main(String[] args) throws Exception { //even if all else fails, print the args as a sanity check for cluster. if (args.length > 0) { ExperimentalArguments expSettings = new ExperimentalArguments(args); ClassifierExperiments.setupAndRunExperiment(expSettings); } else {//Manually set args int start=1; int folds = 1; /* * Change these settings for your experiment: */ //Experiment Parameters, see String[] classifier = {"1NN-DTW"};//"Arsenal", "TDE","DrCIF","RotF",Classifier name: See ClassifierLists for valid options ArrayList<String> parameters = new ArrayList<>(); parameters.add("-dp=src\\main\\java\\experiments\\data\\tsc\\"); //Where to get datasets parameters.add("-rp=temp\\"); //Where to write results parameters.add("-gtf=true"); //Whether to generate train files or not parameters.add("-cn=" + classifier[0]); //Classifier name parameters.add("-dn="); //Problem name, don't change here as it is overwritten by probFiles parameters.add("-f=1"); //Fold number (fold number 1 is stored as testFold0.csv, its a cluster thing) parameters.add("-d=true"); //Debugging parameters.add("-ctr=1h"); //Whether to generate train files or not parameters.add("--force=true"); //Overwrites existing results if true, otherwise set to false // parameters.add("-ctr=3m"); //contract time, default in hours String[] probFiles ={"UnitTest"}; String[] settings = new String[parameters.size()]; int count = 0; for (String str : parameters) settings[count++] = str; // String[] probFiles= univariate; //Problem name(s) // String[] probFiles= univariate; //{"ArrowHead"}; //Problem name(s) // String[] probFiles= {"ChinaTown"}; //Problem name(s) // String[] probFiles = DatasetLists.equalLengthProblems; // String[] probFiles= DatasetLists.fixedLengthMultivariate; /* * END OF SETTINGS */ System.out.println("Manually set args:"); for (String str : settings) System.out.println("\t" + str); System.out.println(""); boolean threaded = false; if (threaded) { ExperimentalArguments expSettings = new ExperimentalArguments(settings); System.out.println("Threaded experiment with " + expSettings); // setupAndRunMultipleExperimentsThreaded(expSettings, classifier,probFiles,0,folds); ClassifierExperiments.setupAndRunMultipleExperimentsThreaded(expSettings, classifier, null, probFiles, 0, folds); } else {//Local run without args, mainly for debugging for (String prob : probFiles) { settings[4] = "-dn=" + prob; for (int i = start; i <= folds; i++) { settings[5] = "-f=" + i; ExperimentalArguments expSettings = new ExperimentalArguments(settings); // System.out.println("Sequential experiment with "+expSettings); ClassifierExperiments.setupAndRunExperiment(expSettings); } } } } } /** * Runs an experiment with the given settings. For the more direct method in case e.g * you have a bespoke classifier not handled by ClassifierList or dataset that * is sampled in a bespoke way, use runExperiment * * 1) Sets up the logger. * 2) Sets up the results write path * 3) Checks whether this experiments results already exist. If so, exit * 4) Constructs the classifier * 5) Samples the dataset. * 6) If we're good to go, runs the experiment. */ public static ClassifierResults[] setupAndRunExperiment(ExperimentalArguments expSettings) throws Exception { if (beQuiet) LOGGER.setLevel(Level.SEVERE); // only print severe things else { if (debug) LOGGER.setLevel(Level.FINEST); // print everything else LOGGER.setLevel(Level.INFO); // print warnings, useful info etc, but not simple progress messages, e.g. 'training started' DatasetLoading.setDebug(debug); //TODO when we go full enterprise and figure out how to properly do logging, clean this up } LOGGER.log(Level.FINE, expSettings.toString()); // if a pre-instantiated classifier instance hasn't been supplied, generate one here if (expSettings.classifier == null) { // if a classifier-generating-function has been given (typically in the case of bespoke classifiers wanted in threaded exps), // instantiate the classifier from that if (expSettings.classifierGenerator != null) expSettings.classifier = expSettings.classifierGenerator.get(); else { // else, use the classic setClassifier // Cases in the classifierlist can now change the classifier name to reflect particular parameters wanting to be // represented as different classifiers, e.g. ST_1day, ST_2day // The set classifier call is therefore made before defining paths that are dependent on the classifier name expSettings.classifier = ClassifierLists.setClassifier(expSettings); } } buildExperimentDirectoriesAndFilenames(expSettings, expSettings.classifier); //Check whether results already exists, if so and force evaluation is false: just quit if (quitEarlyDueToResultsExistence(expSettings)) return null; Instances[] data = DatasetLoading.sampleDataset(expSettings.dataReadLocation, expSettings.datasetName, expSettings.foldId); // replace missing values with 0 if enabled if (expSettings.replaceMissingValues) { for (Instance inst : data[0]) inst.replaceMissingValues(new double[data[0].numAttributes()]); for (Instance inst : data[1]) inst.replaceMissingValues(new double[data[1].numAttributes()]); } setupClassifierExperimentalOptions(expSettings, expSettings.classifier, data[0]); ClassifierResults[] results = runExperiment(expSettings, data[0], data[1], expSettings.classifier); LOGGER.log(Level.INFO, "Experiment finished " + expSettings.toShortString() + ", Test Acc:" + results[1].getAcc()); return results; } /** * Perform an actual experiment, using the loaded classifier and resampled dataset given, writing to the specified results location. * * 1) If needed, set up file paths and flags related to a single parameter evaluation and/or the classifier's internal parameter saving things * 2) If we want to be performing cv to find an estimate of the error on the train set, either do that here or set up the classifier to do it internally * during buildClassifier() * 3) Do the actual training, i.e buildClassifier() * 4) Save information needed from the training, e.g. train estimates, serialising the classifier, etc. * 5) Evaluate the trained classifier on the test set * 6) Save test results * 7) Done * * NOTES: 1. If the classifier is a SaveableEnsemble, then we save the * internal cross validation accuracy and the internal test predictions 2. * The output of the file testFold+fold+.csv is Line 1: * ProblemName,ClassifierName, train/test Line 2: parameter information for * final classifierName, if it is available Line 3: test accuracy then each line * is Actual Class, Predicted Class, Class probabilities * * @return the classifierresults for this experiment, {train, test} */ public static ClassifierResults[] runExperiment(ExperimentalArguments expSettings, Instances trainSet, Instances testSet, Classifier classifier) { ClassifierResults[] experimentResults = null; // the combined container, to hold { trainResults, testResults } on return LOGGER.log(Level.FINE, "Preamble complete, real experiment starting."); try { ClassifierResults trainResults = training(expSettings, classifier, trainSet); postTrainingOperations(expSettings, classifier); ClassifierResults testResults = testing(expSettings, classifier, testSet, trainResults); experimentResults = new ClassifierResults[] {trainResults, testResults}; } catch (Exception e) { //todo expand.. LOGGER.log(Level.SEVERE, "Experiment failed. Settings: " + expSettings + "\n\nERROR: " + e.toString(), e); e.printStackTrace(); return null; //error state } return experimentResults; } /** * Performs all operations related to training the classifier, and returns a ClassifierResults object holding the results * of training. * * At minimum these results hold the hardware benchmark timing (if requested in expSettings), the memory used, * and the build time. * * If a train estimate is to be generated, the results also hold predictions and results from the train set, and these * results are written to file. */ public static ClassifierResults training(ExperimentalArguments expSettings, Classifier classifier, Instances trainSet) throws Exception { ClassifierResults trainResults = new ClassifierResults(); long benchmark = findBenchmarkTime(expSettings); MemoryMonitor memoryMonitor = new MemoryMonitor(); memoryMonitor.installMonitor(); if (expSettings.generateErrorEstimateOnTrainSet && (!trainFoldExists || expSettings.forceEvaluation || expSettings.forceEvaluationTrainFold)) { //Tell the classifier to generate train results if it can do it internally, //otherwise perform the evaluation externally here (e.g. cross validation on the //train data if (EnhancedAbstractClassifier.classifierAbleToEstimateOwnPerformance(classifier)) ((EnhancedAbstractClassifier) classifier).setEstimateOwnPerformance(true); else trainResults = findExternalTrainEstimate(expSettings, classifier, trainSet, expSettings.foldId); } LOGGER.log(Level.FINE, "Train estimate ready."); //Build on the full train data here long buildTime = System.nanoTime(); classifier.buildClassifier(trainSet); buildTime = System.nanoTime() - buildTime; LOGGER.log(Level.FINE, "Training complete"); // Training done, collect memory monitor results // Need to wait for an update, otherwise very quick classifiers may not experience gc calls during training, // or the monitor may not update in time before collecting the max GcFinalization.awaitFullGc(); long maxMemory = memoryMonitor.getMaxMemoryUsed(); trainResults = finaliseTrainResults(expSettings, classifier, trainResults, buildTime, benchmark, TimeUnit.NANOSECONDS, maxMemory); //At this stage, regardless of whether the classifier is able to estimate it's //own accuracy or not, train results should contain either // a) timings, if expSettings.generateErrorEstimateOnTrainSet == false // b) full predictions, if expSettings.generateErrorEstimateOnTrainSet == true if (expSettings.generateErrorEstimateOnTrainSet && (!trainFoldExists || expSettings.forceEvaluation || expSettings.forceEvaluationTrainFold)) { writeResults(expSettings, trainResults, expSettings.trainFoldFileName, "train"); LOGGER.log(Level.FINE, "Train estimate written"); } return trainResults; } /** * Any operations aside from testing that we want to perform on the trained classifier. Performed after training, but before testing, * with exceptions caught and only severe warning logged instead of program failure; completion of testing is preferred instead * requiring retraining in a future execution */ public static void postTrainingOperations(ExperimentalArguments expSettings, Classifier classifier) { if (expSettings.serialiseTrainedClassifier) { if (classifier instanceof Serializable) { try { serialiseClassifier(expSettings, classifier); } catch (Exception ex) { LOGGER.log(Level.SEVERE, "Serialisation attempted but failed for classifier ("+classifier.getClass().getName()+")", ex); } } else LOGGER.log(Level.WARNING, "Serialisation requested, but the classifier ("+classifier.getClass().getName()+") does not extend Serializable."); } if (expSettings.visualise) { if (classifier instanceof Visualisable) { ((Visualisable) classifier).setVisualisationSavePath(expSettings.supportingFilePath); try { ((Visualisable) classifier).createVisualisation(); } catch (Exception ex) { LOGGER.log(Level.SEVERE, "Visualisation attempted but failed for classifier ("+classifier.getClass().getName()+")", ex); } } else { expSettings.visualise = false; LOGGER.log(Level.WARNING, "Visualisation requested, but the classifier (" + classifier.getClass().getName() + ") does not extend Visualisable."); } } if (expSettings.interpret) { if (classifier instanceof Interpretable) { ((Interpretable) classifier).setInterpretabilitySavePath(expSettings.supportingFilePath); } else { expSettings.interpret = false; LOGGER.log(Level.WARNING, "Interpretability requested, but the classifier (" + classifier.getClass().getName() + ") does not extend Interpretable."); } } } /** * Performs all operations related to testing the classifier, and returns a ClassifierResults object holding the results * of testing. * * Computational resource costs of the training process are taken from the train results. */ public static ClassifierResults testing(ExperimentalArguments expSettings, Classifier classifier, Instances testSet, ClassifierResults trainResults) throws Exception { ClassifierResults testResults = new ClassifierResults(); //And now evaluate on the test set, if this wasn't a single parameter fold if (expSettings.singleParameterID == null) { //This is checked before the buildClassifier also, but //a) another process may have been doing the same experiment //b) we have a special case for the file builder that copies the results over in buildClassifier (apparently?) //no reason not to check again if (expSettings.forceEvaluation || expSettings.forceEvaluationTestFold || !CollateResults.validateSingleFoldFile(expSettings.testFoldFileName)) { if (classifier instanceof AbstractEarlyClassifier) testResults = evaluateEarlyClassifier(expSettings, (AbstractEarlyClassifier) classifier, testSet); else testResults = evaluateClassifier(expSettings, classifier, testSet); testResults.setParas(trainResults.getParas()); testResults.turnOffZeroTimingsErrors(); testResults.setBenchmarkTime(testResults.getTimeUnit().convert(trainResults.getBenchmarkTime(), trainResults.getTimeUnit())); testResults.setBuildTime(testResults.getTimeUnit().convert(trainResults.getBuildTime(), trainResults.getTimeUnit())); testResults.turnOnZeroTimingsErrors(); testResults.setMemory(trainResults.getMemory()); LOGGER.log(Level.FINE, "Testing complete"); writeResults(expSettings, testResults, expSettings.testFoldFileName, "test"); LOGGER.log(Level.FINE, "Test results written"); } else { LOGGER.log(Level.INFO, "Test file already found, written by another process."); testResults = new ClassifierResults(expSettings.testFoldFileName); } } else { LOGGER.log(Level.INFO, "This experiment evaluated a single training iteration or parameter set, skipping test phase."); } return testResults; } /** * Based on experimental parameters passed, defines the target results file and workspace locations for use in the * rest of the experiment */ public static void buildExperimentDirectoriesAndFilenames(ExperimentalArguments expSettings, Classifier classifier) { //Build/make the directory to write the train and/or testFold files to // [writeLoc]/[classifier]/Predictions/[dataset]/ String fullWriteLocation = expSettings.resultsWriteLocation + expSettings.estimatorName + "/"+PREDICTIONS_DIR+"/" + expSettings.datasetName + "/"; File f = new File(fullWriteLocation); if (!f.exists()) f.mkdirs(); expSettings.testFoldFileName = fullWriteLocation + "testFold" + expSettings.foldId + ".csv"; expSettings.trainFoldFileName = fullWriteLocation + "trainFold" + expSettings.foldId + ".csv"; if (expSettings.singleParameterID != null && classifier instanceof ParameterSplittable) expSettings.testFoldFileName = expSettings.trainFoldFileName = fullWriteLocation + "fold" + expSettings.foldId + "_" + expSettings.singleParameterID + ".csv"; testFoldExists = CollateResults.validateSingleFoldFile(expSettings.testFoldFileName); trainFoldExists = CollateResults.validateSingleFoldFile(expSettings.trainFoldFileName); // If needed, build/make the directory to write any supporting files to, e.g. checkpointing files // [writeLoc]/[classifier]/Workspace/[dataset]/[fold]/ // todo foreseeable problems with threaded experiments: // user sets a supporting path for the 'master' exp, each generated exp to be run threaded inherits that path, // every classifier/dset/fold writes to same single location. For now, that's up to the user to recognise that's // going to be the case; supply a path and everything will be written there if (expSettings.supportingFilePath == null || expSettings.supportingFilePath.equals("")) expSettings.supportingFilePath = expSettings.resultsWriteLocation + expSettings.estimatorName + "/"+WORKSPACE_DIR+"/" + expSettings.datasetName + "/"; f = new File(expSettings.supportingFilePath); if (!f.exists()) f.mkdirs(); } /** * Returns true if the work to be done in this experiment already exists at the locations defined by the experimental settings, * indicating that this execution can be skipped. */ public static boolean quitEarlyDueToResultsExistence(ExperimentalArguments expSettings) { boolean quit = false; if (!expSettings.forceEvaluation && !expSettings.forceEvaluationTestFold && !expSettings.forceEvaluationTrainFold && ((!expSettings.generateErrorEstimateOnTrainSet && testFoldExists) || (expSettings.generateErrorEstimateOnTrainSet && trainFoldExists && testFoldExists))) { LOGGER.log(Level.INFO, expSettings.toShortString() + " already exists at " + expSettings.testFoldFileName + ", exiting."); quit = true; } return quit; } /** * This method cleans up and consolidates the information we have about the * build process into a ClassifierResults object, based on the capabilities of the * classifier and whether we want to be writing a train predictions file * * Regardless of whether the classifier is able to estimate it's own accuracy * or not, the returned train results should contain either * a) timings, if expSettings.generateErrorEstimateOnTrainSet == false * b) full predictions, if expSettings.generateErrorEstimateOnTrainSet == true * * @param exp * @param classifier * @param trainResults the results object so far which may be empty, contain the recorded * timings of the particular classifier, or contain the results of a previous executed * external estimation process, * @param buildTime as recorded by experiments.java, but which may not be used if the classifier * records it's own build time more accurately * @return the finalised train results object * @throws Exception */ public static ClassifierResults finaliseTrainResults(ExperimentalArguments exp, Classifier classifier, ClassifierResults trainResults, long buildTime, long benchmarkTime, TimeUnit expTimeUnit, long maxMemory) throws Exception { /* if estimateacc { //want full predictions timingToUpdateWith = buildTime (the one passed to this func) if is EnhancedAbstractClassifier { if able to estimate own acc just return getTrainResults() else timingToUpdateWith = getTrainResults().getBuildTime() } trainResults.setBuildTime(timingToUpdateWith) return trainResults } else not estimating acc { //just want timings if is EnhancedAbstractClassifier just return getTrainResults(), contains the timings and other maybe useful metainfo else trainResults passed are empty trainResults.setBuildTime(buildTime) return trainResults */ //todo just enforce nanos everywhere, this is ridiculous. this needs overhaul long estimateToUpdateWith = 0; // no estimate by default long timingToUpdateWith = buildTime; //the timing that experiments measured by default if (exp.generateErrorEstimateOnTrainSet) { //want timings and full predictions TimeUnit timeUnitToUpdateWith = expTimeUnit; String paras = "No parameter info"; if (classifier instanceof EnhancedAbstractClassifier) { EnhancedAbstractClassifier eac = ((EnhancedAbstractClassifier)classifier); if (eac.getEstimateOwnPerformance()) { ClassifierResults res = eac.getTrainResults(); //classifier internally estimateed/recorded itself, just return that directly res.setBenchmarkTime(res.getTimeUnit().convert(benchmarkTime, expTimeUnit)); res.setMemory(maxMemory); return res; } else { timingToUpdateWith = eac.getTrainResults().getBuildTime(); //update with classifier's own timings instead timeUnitToUpdateWith = eac.getTrainResults().getTimeUnit(); paras = eac.getParameters(); } } timingToUpdateWith = trainResults.getTimeUnit().convert(timingToUpdateWith, timeUnitToUpdateWith); estimateToUpdateWith = trainResults.getTimeUnit().convert(trainResults.getErrorEstimateTime(), timeUnitToUpdateWith); //update the externally produced results with the appropriate timing trainResults.setBuildTime(timingToUpdateWith); trainResults.setBuildPlusEstimateTime(timingToUpdateWith + estimateToUpdateWith); trainResults.setParas(paras); } else { // just want the timings if (classifier instanceof EnhancedAbstractClassifier) { trainResults = ((EnhancedAbstractClassifier) classifier).getTrainResults(); } else { trainResults.setBuildTime(trainResults.getTimeUnit().convert(buildTime, expTimeUnit)); } } trainResults.setBenchmarkTime(trainResults.getTimeUnit().convert(benchmarkTime, expTimeUnit)); trainResults.setMemory(maxMemory); return trainResults; } /** * Based on the experimental settings passed, make any classifier interface calls that modify how the classifier is TRAINED here, * e.g. give checkpointable classifiers the location to save, give contractable classifiers their contract, etc. * * @return If the classifier is set up to evaluate a single parameter set on the train data, a new trainfilename shall be returned, * otherwise null. * */ private static String setupClassifierExperimentalOptions(ExperimentalArguments expSettings, Classifier classifier, Instances train) throws Exception { String parameterFileName = null; if (classifier instanceof Randomizable && expSettings.useSeed) if (expSettings.seed > Integer.MIN_VALUE) ((Randomizable)classifier).setSeed(expSettings.seed); else ((Randomizable)classifier).setSeed(expSettings.foldId); if (classifier instanceof MultiThreadable && expSettings.numberOfThreads != 1) if (expSettings.numberOfThreads < 1) ((MultiThreadable)classifier).enableMultiThreading(); else ((MultiThreadable)classifier).enableMultiThreading(expSettings.numberOfThreads); if (classifier instanceof AbstractClassifier && expSettings.classifierOptions != null) ((AbstractClassifier)classifier).setOptions(expSettings.classifierOptions); // Parameter/thread/job splitting and checkpointing are treated as mutually exclusive, thus if/else if (expSettings.singleParameterID != null && classifier instanceof ParameterSplittable)//Single parameter fold { if (expSettings.checkpointing) LOGGER.log(Level.WARNING, "Parameter splitting AND checkpointing requested, but cannot do both. Parameter splitting turned on, checkpointing not."); if (classifier instanceof TunedRandomForest) ((TunedRandomForest) classifier).setNumFeaturesInProblem(train.numAttributes() - 1); expSettings.checkpointing = false; ((ParameterSplittable) classifier).setParametersFromIndex(expSettings.singleParameterID); parameterFileName = "fold" + expSettings.foldId + "_" + expSettings.singleParameterID + ".csv"; expSettings.generateErrorEstimateOnTrainSet = true; } else { // Only do all this if not an internal _single parameter_ experiment // Save internal info for ensembles if (classifier instanceof SaveableEnsemble) { // mostly legacy, original hivecote code afaik ((SaveableEnsemble) classifier).saveResults(expSettings.supportingFilePath + "internalCV_" + expSettings.foldId + ".csv", expSettings.supportingFilePath + "internalTestPreds_" + expSettings.foldId + ".csv"); } if (expSettings.checkpointing && classifier instanceof SaveEachParameter) { // for legacy things. mostly tuned classifiers ((SaveEachParameter) classifier).setPathToSaveParameters(expSettings.supportingFilePath + "fold" + expSettings.foldId + "_"); } // Main thing to set: if (expSettings.checkpointing && classifier instanceof Checkpointable) { ((Checkpointable) classifier).setCheckpointPath(expSettings.supportingFilePath); if (expSettings.checkpointInterval > 0) { // want to checkpoint at regular timings // todo setCheckpointTimeHours expects int hours only, review ((Checkpointable) classifier).setCheckpointTimeHours((int) TimeUnit.HOURS.convert(expSettings.checkpointInterval, TimeUnit.NANOSECONDS)); } //else, as default // want to checkpoint at classifier's discretion } } if(classifier instanceof TrainTimeContractable && expSettings.contractTrainTimeNanos>0) ((TrainTimeContractable) classifier).setTrainTimeLimit(TimeUnit.NANOSECONDS,expSettings.contractTrainTimeNanos); if(classifier instanceof TestTimeContractable && expSettings.contractTestTimeNanos >0) ((TestTimeContractable) classifier).setTestTimeLimit(TimeUnit.NANOSECONDS,expSettings.contractTestTimeNanos); return parameterFileName; } private static ClassifierResults findExternalTrainEstimate(ExperimentalArguments exp, Classifier classifier, Instances train, int fold) throws Exception { ClassifierResults trainResults = null; long trainBenchmark = findBenchmarkTime(exp); //todo clean up this hack. default is cv_10, as with all old trainFold results pre 2019/07/19 String[] parts = exp.trainEstimateMethod.split("_"); String method = parts[0]; String para1 = null; if (parts.length > 1) para1 = parts[1]; String para2 = null; if (parts.length > 2) para2 = parts[2]; switch (method) { case "cv": case "CV": case "CrossValidationEvaluator": int numCVFolds = ClassifierExperiments.numCVFolds; if (para1 != null) numCVFolds = Integer.parseInt(para1); numCVFolds = Math.min(train.numInstances(), numCVFolds); CrossValidationEvaluator cv = new CrossValidationEvaluator(); cv.setSeed(fold); cv.setNumFolds(numCVFolds); trainResults = cv.crossValidateWithStats(classifier, train); break; case "hov": case "HOV": case "SingleTestSetEvaluator": double trainPropHov = DatasetLoading.getProportionKeptForTraining(); if (para1 != null) trainPropHov = Double.parseDouble(para1); SingleSampleEvaluator hov = new SingleSampleEvaluator(); hov.setSeed(fold); hov.setPropInstancesInTrain(trainPropHov); trainResults = hov.evaluate(classifier, train); break; case "sr": case "SR": case "StratifiedResamplesEvaluator": int numSRFolds = 30; if (para1 != null) numSRFolds = Integer.parseInt(para1); double trainPropSRR = DatasetLoading.getProportionKeptForTraining(); if (para2 != null) trainPropSRR = Double.parseDouble(para2); StratifiedResamplesEvaluator srr = new StratifiedResamplesEvaluator(); srr.setSeed(fold); srr.setNumFolds(numSRFolds); srr.setUseEachResampleIdAsSeed(true); srr.setPropInstancesInTrain(trainPropSRR); trainResults = srr.evaluate(classifier, train); break; default: throw new Exception("Unrecognised method to estimate error on the train given: " + exp.trainEstimateMethod); } trainResults.setErrorEstimateMethod(exp.trainEstimateMethod); trainResults.setBenchmarkTime(trainBenchmark); return trainResults; } public static void serialiseClassifier(ExperimentalArguments expSettings, Classifier classifier) throws FileNotFoundException, IOException { String filename = expSettings.supportingFilePath + expSettings.estimatorName + "_" + expSettings.datasetName + "_" + expSettings.foldId + ".ser"; LOGGER.log(Level.FINE, "Attempting classifier serialisation, to " + filename); FileOutputStream fos = new FileOutputStream(filename); try (ObjectOutputStream out = new ObjectOutputStream(fos)) { out.writeObject(classifier); fos.close(); out.close(); } LOGGER.log(Level.FINE, "Classifier serialised successfully"); } /** * Meta info shall be set by writeResults(...), just generating the prediction info and * any info directly calculable from that here */ public static ClassifierResults evaluateClassifier(ExperimentalArguments exp, Classifier classifier, Instances testSet) throws Exception { SingleTestSetEvaluator eval = new SingleTestSetEvaluator(exp.foldId, false, true, exp.interpret); //DONT clone data, DO set the class to be missing for each inst return eval.evaluate(classifier, testSet); } /** * Mimics SingleTestSetEvaluator but for early classification classifiers. * Earliness for each test instance is written to the description. * Normalisation for experimental purposes should be handled by the individual classifiers/decision makers. */ public static ClassifierResults evaluateEarlyClassifier(ExperimentalArguments exp, AbstractEarlyClassifier classifier, Instances testSet) throws Exception { ClassifierResults res = new ClassifierResults(testSet.numClasses()); res.setTimeUnit(TimeUnit.NANOSECONDS); res.setEstimatorName(classifier.getClass().getSimpleName()); res.setDatasetName(testSet.relationName()); res.setFoldID(exp.foldId); res.setSplit("test"); int length = testSet.numAttributes()-1; int[] thresholds = classifier.getThresholds(); Instances[] truncatedInstances = new Instances[thresholds.length]; truncatedInstances[thresholds.length-1] = new Instances(testSet, 0); for (int i = 0; i < thresholds.length-1; i++) { truncatedInstances[i] = truncateInstances(truncatedInstances[thresholds.length-1], length, thresholds[i]); } res.turnOffZeroTimingsErrors(); for (Instance testinst : testSet) { double trueClassVal = testinst.classValue(); testinst.setClassMissing(); long startTime = System.nanoTime(); double[] dist = null; double earliness = 0; for (int i = 0; i < thresholds.length; i++){ Instance newInst = truncateInstance(testinst, length, thresholds[i]); newInst.setDataset(truncatedInstances[i]); dist = classifier.distributionForInstance(newInst); if (dist != null) { earliness = thresholds[i]/(double)length; break; } } long predTime = System.nanoTime() - startTime; res.addPrediction(trueClassVal, dist, indexOfMax(dist), predTime, Double.toString(earliness)); } res.turnOnZeroTimingsErrors(); res.finaliseResults(); return res; } /** * If exp.performTimingBenchmark = true, this will return the total time to * sort 1,000 arrays of size 10,000 * * Expected time on Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz is ~0.8 seconds * * This can still anecdotally vary between 0.75 to 1.05 on my windows machine, however. */ public static long findBenchmarkTime(ExperimentalArguments exp) { if (!exp.performTimingBenchmark) return -1; //the default in classifierresults, i.e no benchmark // else calc benchmark int arrSize = 10000; int repeats = 1000; long[] times = new long[repeats]; long total = 0L; for (int i = 0; i < repeats; i++) { times[i] = atomicBenchmark(arrSize); total+=times[i]; } if (debug) { long mean = 0L, max = Long.MIN_VALUE, min = Long.MAX_VALUE; for (long time : times) { mean += time; if (time < min) min = time; if (time > max) max = time; } mean/=repeats; int halfR = repeats/2; long median = repeats % 2 == 0 ? (times[halfR] + times[halfR+1]) / 2 : times[halfR]; double d = 1000000000; StringBuilder sb = new StringBuilder("BENCHMARK TIMINGS, summary of times to " + "sort "+repeats+" random int arrays of size "+arrSize+" - in seconds\n"); sb.append("total = ").append(total/d).append("\n"); sb.append("min = ").append(min/d).append("\n"); sb.append("max = ").append(max/d).append("\n"); sb.append("mean = ").append(mean/d).append("\n"); sb.append("median = ").append(median/d).append("\n"); LOGGER.log(Level.FINE, sb.toString()); } return total; } private static long atomicBenchmark(int arrSize) { long startTime = System.nanoTime(); int[] arr = new int[arrSize]; Random rng = new Random(0); for (int j = 0; j < arrSize; j++) arr[j] = rng.nextInt(); Arrays.sort(arr); return System.nanoTime() - startTime; } public static String buildExperimentDescription() { //TODO get system information, e.g. cpu clock-speed. generic across os too Date date = new Date(); SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); StringBuilder sb = new StringBuilder("Generated by ClassifierExperiments.java on " + formatter.format(date) + "."); sb.append(" SYSTEMPROPERTIES:{"); sb.append("user.name:").append(System.getProperty("user.name", "unknown")); sb.append(",os.arch:").append(System.getProperty("os.arch", "unknown")); sb.append(",os.name:").append(System.getProperty("os.name", "unknown")); sb.append("},ENDSYSTEMPROPERTIES"); return sb.toString().replace("\n", "NEW_LINE"); } public static void writeResults(ExperimentalArguments exp, ClassifierResults results, String fullTestWritingPath, String split) throws Exception { results.setEstimatorName(exp.estimatorName); results.setDatasetName(exp.datasetName); results.setFoldID(exp.foldId); results.setSplit(split); results.setDescription(buildExperimentDescription()); //todo, need to make design decisions with the classifierresults enum to clean this switch up switch (exp.classifierResultsFileFormat) { case 0: //PREDICTIONS results.writeFullResultsToFile(fullTestWritingPath); break; case 1: //METRICS results.writeSummaryResultsToFile(fullTestWritingPath); break; case 2: //COMPACT results.writeCompactResultsToFile(fullTestWritingPath); break; default: { System.err.println("Classifier Results file writing format not recognised, "+exp.classifierResultsFileFormat+", just writing the full predictions."); results.writeFullResultsToFile(fullTestWritingPath); break; } } File f = new File(fullTestWritingPath); if (f.exists()) { f.setWritable(true, false); } } /** * Will run through all combinations of classifiers*datasets*folds provided, using the meta experimental info stored in the * standardArgs. Will by default set numThreads = numCores * * If using bespoke classifiers (not found in setClassifier), e.g. different parameterisations, bespoke ensembles etc, * provide a generator function for each classifier, in a list that is parallel with classifierNames. Assuming the * classifier is Randomizable, the seed shall be set equal to the expSettings foldId * * If simply using setClassifier to instantiate classifiers, classifierGenerators itself or fields within it can be null * * For e.g. classifierNames = { "TSF" }, these methods of classifier instance generation are all equivalent * - classifierGenerators = null // uses setClassifier("TSF") * - classifierGenerators = Arrays.asList(null); // uses setClassifier("TSF") * - classifierGenerators = Arrays.asList(() -> {return new TSF();}); // be careful with rng seeding though * - classifierGenerators = Arrays.asList(() -> {return setClassifierClassic("TSF",0)}); */ public static void setupAndRunMultipleExperimentsThreaded(ExperimentalArguments standardArgs, String[] classifierNames, List<Supplier<Classifier>> classifierGenerators, String[] datasetNames, int minFolds, int maxFolds) throws Exception{ setupAndRunMultipleExperimentsThreaded(standardArgs, classifierNames, classifierGenerators, datasetNames, minFolds, maxFolds, 0); /* bespoke classifier example usage: ClassifierExperiments.ExperimentalArguments standardArgs = new ClassifierExperiments.ExperimentalArguments(); standardArgs.dataReadLocation = "src/main/java/experiments/data/uci/"; standardArgs.resultsWriteLocation = "C:/Temp/tests/"; String[] classifierNames = { "ED", "RandF", "BespokeEnsemble" }; Supplier<Classifier> ensembleSupplier = () -> { CAWPE cawpe = new CAWPE(); cawpe.setClassifiersForBuildingInMemory(new Classifier[] { new ED1NN(), new RandomForest() }); return cawpe; }; List<Supplier<Classifier>> classifierGenerators = Arrays.asList( () -> {return new ED1NN();}, () -> {return new RandomForest();}, ensembleSupplier ); String[] datasets = { "hayes-roth", "iris", "teaching" }; int numFolds = 3; ClassifierExperiments.setupAndRunMultipleExperimentsThreaded(standardArgs, classifierNames, classifierGenerators, datasets, 0, numFolds); */ } /** * Will run through all combinations of classifiers*datasets*folds provided, using the meta experimental info stored in the * standardArgs. If numThreads > 0, will spawn that many threads. If numThreads == 0, will use as many threads as there are cores, * else if numThreads == -1, will spawn as many threads as there are cores minus 1, to aid usability of the machine. * * If using bespoke classifiers (not found in setClassifier), e.g. different parameterisations, bespoke ensembles etc, * provide a generator function for each classifier, in a list that is parallel with classifierNames. Assuming the * classifier is Randomizable, the seed shall be set equal to the expSettings foldId * * If simply using setClassifier to instantiate classifiers, classifierGenerators itself or fields within it can be null * * For e.g. classifierNames = { "TSF" }, these methods of classifier instance generation are all equivalent * - classifierGenerators = null // uses setClassifier("TSF") * - classifierGenerators = Arrays.asList(null); // uses setClassifier("TSF") * - classifierGenerators = Arrays.asList(() -> {return new TSF();}); // be careful with rng seeding though * - classifierGenerators = Arrays.asList(() -> {return setClassifierClassic("TSF",0)}); */ public static void setupAndRunMultipleExperimentsThreaded(ExperimentalArguments standardArgs, String[] classifierNames, List<Supplier<Classifier>> classifierGenerators, String[] datasetNames, int minFolds, int maxFolds, int numThreads) throws Exception{ int numCores = Runtime.getRuntime().availableProcessors(); if (numThreads == 0) numThreads = numCores; else if (numThreads < 0) numThreads = Math.max(1, numCores-1); System.out.println("# cores ="+numCores); System.out.println("# threads ="+numThreads); ExecutorService executor = Executors.newFixedThreadPool(numThreads); List<ExperimentalArguments> exps = standardArgs.generateExperiments(classifierNames, classifierGenerators, datasetNames, minFolds, maxFolds); for (ExperimentalArguments exp : exps) executor.execute(exp); executor.shutdown(); while (!executor.isTerminated()) { } System.out.println("Finished all threads"); } }
49,547
48.498501
257
java
tsml-java
tsml-java-master/src/main/java/experiments/ClassifierLists.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package experiments; import evaluation.tuning.ParameterSpace; import machine_learning.classifiers.RidgeClassifierCV; import machine_learning.classifiers.ensembles.ContractRotationForest; import machine_learning.classifiers.ensembles.EnhancedRotationForest; import machine_learning.classifiers.tuned.TunedClassifier; import machine_learning.classifiers.tuned.TunedSVM; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.classifiers.distance_based.distances.dtw.DTWDistance; import tsml.classifiers.distance_based.distances.ed.EDistance; import tsml.classifiers.distance_based.distances.erp.ERPDistance; import tsml.classifiers.distance_based.distances.lcss.LCSSDistance; import tsml.classifiers.distance_based.distances.msm.MSMDistance; import tsml.classifiers.distance_based.distances.wdtw.WDTWDistance; import tsml.classifiers.distance_based.elastic_ensemble.ElasticEnsemble; import tsml.classifiers.distance_based.knn.KNN; import tsml.classifiers.distance_based.proximity.ProximityForest; import tsml.classifiers.early_classification.*; import tsml.classifiers.kernel_based.Arsenal; import tsml.classifiers.hybrids.Catch22Classifier; import tsml.classifiers.hybrids.HIVE_COTE; import tsml.classifiers.dictionary_based.*; import tsml.classifiers.dictionary_based.boss_variants.BOSSC45; import tsml.classifiers.dictionary_based.SpatialBOSS; import tsml.classifiers.dictionary_based.boss_variants.BoTSWEnsemble; import tsml.classifiers.distance_based.*; import tsml.classifiers.kernel_based.ROCKETClassifier; import tsml.classifiers.interval_based.*; import tsml.classifiers.legacy.COTE.FlatCote; import tsml.classifiers.legacy.elastic_ensemble.DTW1NN; import tsml.classifiers.multivariate.*; import tsml.classifiers.shapelet_based.*; import tsml.classifiers.shapelet_based.FastShapelets; import tsml.classifiers.shapelet_based.LearnShapelets; import tsml.classifiers.shapelet_based.ShapeletTree; import tsml.transformers.*; import weka.core.EuclideanDistance; import weka.core.Randomizable; import machine_learning.classifiers.ensembles.CAWPE; import machine_learning.classifiers.PLSNominalClassifier; import machine_learning.classifiers.kNN; import machine_learning.classifiers.tuned.TunedXGBoost; import weka.classifiers.Classifier; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.NaiveBayes; import weka.classifiers.functions.Logistic; import weka.classifiers.functions.MultilayerPerceptron; import weka.classifiers.functions.SMO; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.functions.supportVector.RBFKernel; import weka.classifiers.meta.RotationForest; import weka.classifiers.trees.J48; import weka.classifiers.trees.RandomForest; import java.util.Arrays; import java.util.HashSet; /** * * @author James Large (james.large@uea.ac.uk) and Tony Bagnall */ public class ClassifierLists { //All implemented classifiers in tsml //<editor-fold defaultstate="collapsed" desc="All univariate time series classifiers"> public static String[] allUnivariate={ //Distance Based "DTW","DTWCV", "EE","LEE","ApproxElasticEnsemble","ProximityForest","FastElasticEnsemble", "DD_DTW","DTD_C","CID_DTW","NN_CID", //Dictionary Based "BOP", "SAXVSM", "SAX_1NN", "BOSS", "cBOSS", "S-BOSS","BoTSWEnsemble","WEASEL", //Interval Based "LPS","TSF", //Frequency Based "RISE", //Shapelet Based "FastShapelets","LearnShapelets","ShapeletTransformClassifier","ShapeletTreeClassifier","STC", //Hybrids "HiveCoteAlpha","FlatCote","TS-CHIEF","HIVE-COTEv1" }; //</editor-fold> public static HashSet<String> allClassifiers=new HashSet<String>( Arrays.asList(allUnivariate)); /** * DISTANCE BASED: classifiers based on measuring the distance between two classifiers */ public static String[] distance= { //Nearest Neighbour with distances "1NN-ED","1NN-DTW","1NN-DTWCV", "DD_DTW","DTD_C","CID_DTW","NN_CID", "NN_ShapeDTW", "1NN-DTW_New","1NN-DTW-Jay","1NN-MSM","1NN-ERP","1NN-LCSS","1NN-WDTW", //EE derivatives "ElasticEnsemble","EE","LEE","ApproxElasticEnsemble","FastElasticEnsemble", //Tree based "ProximityForest","PF" }; public static HashSet<String> distanceBased=new HashSet<String>( Arrays.asList(distance)); private static Classifier setDistanceBased(ExperimentalArguments exp){ String classifier=exp.estimatorName; Classifier c = null; int fold=exp.foldId; switch(classifier) { case "1NN-ED": c = new KNN(); ((KNN) c).setDistanceMeasure(new EDistance()); break; case "1NN-DTW": c = new DTW_kNN(); ((DTW_kNN)c).optimiseWindow(false); ((DTW_kNN)c).setMaxR(1.0); break; case "1NN-DTW-Jay": c = new DTW1NN(); break; case "1NN-DTW_New": c = new KNN(); ((KNN) c).setDistanceMeasure(new DTWDistance()); break; case "1NN-MSM": c = new KNN(); ((KNN) c).setDistanceMeasure(new MSMDistance()); break; case "1NN-ERP": c = new KNN(); ((KNN) c).setDistanceMeasure(new ERPDistance()); break; case "1NN-LCSS": c = new KNN(); ((KNN) c).setDistanceMeasure(new LCSSDistance()); break; case "1NN-WDTW": c = new KNN(); ((KNN) c).setDistanceMeasure(new WDTWDistance()); break; case "1NN-DTWCV": c = new DTWCV(); break; case "EE": c = ElasticEnsemble.CONFIGS.get(classifier).build(); break; case "LEE": c = ElasticEnsemble.CONFIGS.get(classifier).build(); break; case "ApproxElasticEnsemble": c = new ApproxElasticEnsemble(); break; case "FastElasticEnsemble": c=new FastElasticEnsemble(); break; case "DD_DTW": c=new DD_DTW(); break; case "DTD_C": c=new DTD_C(); break; case "CID_DTW": c=new NN_CID(); ((NN_CID)c).useDTW(); break; case "NN_CID": c = new NN_CID(); break; case "ProximityForest": c = new ProximityForest(); ((ProximityForest)c).setTrainEstimateMethod("OOB"); break; case "NN_ShapeDTW_Raw": c=new ShapeDTW_1NN(30,null,false,null); break; case "NN_ShapeDTW_PAA": PAA p = new PAA(); p.setNumIntervals(5); c=new ShapeDTW_1NN(30,p,false,null); break; case "NN_ShapeDTW_DWT": DWT dwt = new DWT(); c=new ShapeDTW_1NN(30,dwt,false,null); break; case "NN_ShapeDTW_Der": Derivative der = new Derivative(); c=new ShapeDTW_1NN(30,der,false,null); break; case "NN_ShapeDTW_Slope": Slope s = new Slope(5); c=new ShapeDTW_1NN(30,s,false,null); break; case "NN_ShapeDTW_Hog": HOG1D h = new HOG1D(); c=new ShapeDTW_1NN(30,h,false,null); break; case "NN_ShapeDTW_Comp": DWT dwt2 = new DWT(); HOG1D h2 = new HOG1D(); c=new ShapeDTW_1NN(30,dwt2,true,h2); break; case "SVM_ShapeDTW_Poly": c=new ShapeDTW_SVM(); break; case "SVM_ShapeDTW_RBF": c=new ShapeDTW_SVM(30, ShapeDTW_SVM.KernelType.RBF); break; default: System.out.println("Unknown distance based classifier "+classifier+" should not be able to get here "); System.out.println("There is a mismatch between array distance and the switch statement "); throw new UnsupportedOperationException("Unknown distance based classifier "+classifier+" should not be able to get here. " + "There is a mismatch between array distance and the switch statement."); } return c; } /** * DICTIONARY BASED: classifiers based on counting the occurrence of words in series */ public static String[] dictionary= {"BOP","SAXVSM","SAX_1NN","BOSS","cBOSS","S-BOSS","BoTSWEnsemble","WEASEL", "TDE"}; public static HashSet<String> dictionaryBased=new HashSet<String>( Arrays.asList(dictionary)); private static Classifier setDictionaryBased(ExperimentalArguments exp){ String classifier=exp.estimatorName; Classifier c; int fold=exp.foldId; switch(classifier) { case "BOP": c=new BagOfPatternsClassifier(); break; case "SAXVSM": c=new SAXVSM(); break; case "SAX_1NN": c=new SAXVSM(); break; case "BOSS": c=new BOSS(); break; case "cBOSS": c = new cBOSS(); break; case "BOSSC45": c = new BOSSC45(); break; case "S-BOSS": c = new SpatialBOSS(); break; case "BoTSWEnsemble": c = new BoTSWEnsemble(); break; case "WEASEL": c = new WEASEL(); break; case "TDE": c = new TDE(); break; default: System.out.println("Unknown dictionary based classifier "+classifier+" should not be able to get here "); System.out.println("There is a mismatch between array dictionary and the switch statement "); throw new UnsupportedOperationException("Unknown dictionary based classifier "+classifier+" should not be able to get here." + "There is a mismatch between array dictionary and the switch statement "); } return c; } /** * INTERVAL BASED: classifiers that form multiple intervals over series and summarise */ public static String[] interval= {"LPS","TSF","RISE","CIF","STSF","DrCIF"}; public static HashSet<String> intervalBased=new HashSet<String>( Arrays.asList(interval)); private static Classifier setIntervalBased(ExperimentalArguments exp){ String classifier=exp.estimatorName; Classifier c; int fold=exp.foldId; switch(classifier) { case "LPS": c=new LPS(); break; case "TSF": c=new TSF(); break; case "RISE": c=new RISE(); break; case "CIF": c=new CIF(); break; case "STSF": c=new STSF(); break; case "DrCIF": c=new DrCIF(); break; default: System.out.println("Unknown interval based classifier "+classifier+" should not be able to get here "); System.out.println("There is a mismatch between array interval and the switch statement "); throw new UnsupportedOperationException("Unknown interval based classifier "+classifier+" should not be able to get here." + "There is a mismatch between array interval and the switch statement "); } return c; } /** * SHAPELET BASED: Classifiers that use shapelets in some way. */ public static String[] shapelet= {"FastShapelets","LearnShapelets","ShapeletTransformClassifier", "ShapeletTreeClassifier","STC","ROCKET","Arsenal","STC-Pruned"}; public static HashSet<String> shapeletBased=new HashSet<String>( Arrays.asList(shapelet)); private static Classifier setShapeletBased(ExperimentalArguments exp){ String classifier=exp.estimatorName; Classifier c; int fold=exp.foldId; switch(classifier) { case "LearnShapelets": c=new LearnShapelets(); break; case "FastShapelets": c=new FastShapelets(); break; case "ShapeletTransformClassifier": case "STC": c=new ShapeletTransformClassifier(); break; case "STC-Pruned": ShapeletTransformClassifier stc=new ShapeletTransformClassifier(); stc.setPruneMatchingShapelets(true); c=stc; break; case "ShapeletTreeClassifier": c=new ShapeletTree(); break; case "ROCKET": c = new ROCKETClassifier(); break; case "Arsenal": c = new Arsenal(); break; default: System.out.println("Unknown shapelet based classifier "+classifier+" should not be able to get here "); System.out.println("There is a mismatch between array interval and the switch statement "); throw new UnsupportedOperationException("Unknown interval based classifier, should not be able to get here " + "There is a mismatch between array interval and the switch statement "); } return c; } /** * HYBRIDS: Classifiers that combine two or more of the above approaches */ public static String[] hybrids= {"HiveCoteAlpha", "FlatCote", "HIVE-COTEv1","HIVE-COTEv2", "catch22", "HC-oob", "HC-cv","HC-cv-pf-stc", "HC-cv-stc", "HCV2-cv", //HC 2 variants "HIVE-COTE","HC2","HiveCote", "HC-1", "HC-2", "HC-3", "HC-4", "HC-5", "HC-6", "HC-7", "HC-8", "HC-9", "HC-10", "HC-11", "HC-12", "HC-13", "HC-14", "HC-15", "HC-16", "HC-17", "HC-18", "HC-19", "HC-20", "HC-21", "HC-22", "HC-23", "HC-24", "HC-25", "HC-26", "HC2-FromFile" }; public static HashSet<String> hybridBased=new HashSet<String>( Arrays.asList(hybrids)); private static Classifier setHybridBased(ExperimentalArguments exp){ String classifier=exp.estimatorName; Classifier c; int fold=exp.foldId; switch(classifier) { case "FlatCote": c=new FlatCote(); break; case "HIVE-COTEv1": HIVE_COTE hc=new HIVE_COTE(); hc.setFillMissingDistsWithOneHotVectors(true); hc.setSeed(fold); hc.setupHIVE_COTE_1_0(); c=hc; break; case "HIVE-COTEv2": case "HiveCote": case "HIVE-COTE": hc=new HIVE_COTE(); hc.setSeed(fold); c=hc; break; case "HC2": hc=new HIVE_COTE(); hc.setSeed(fold); hc.enableMultiThreading(4); c=hc; break; case "HC2-FromFile": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); String[] classifiers={"Arsenal","DrCIF","TDE","STC-D"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-cv-stc": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"Arsenal-cv","DrCIF-cv","TDE-cv","STC-cv"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-cv": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"Arsenal-cv","DrCIF-cv","TDE-cv","STC-cv","PF-cv"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-oob": HIVE_COTE hc2=new HIVE_COTE(); hc2.setBuildIndividualsFromResultsFiles(true); hc2.setSeed(fold); hc2.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); String[] x2={"Arsenal-oob","DrCIF-oob","TDE-oob","STC-oob","PF-oob"}; hc2.setClassifiersNamesForFileRead(x2); c=hc2; break; case "catch22": c = new Catch22Classifier(); break; case "HC-1": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"DrCIF","Arsenal"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-2": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"DrCIF","STC"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-3": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"DrCIF","TDE"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-4": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"Arsenal","STC"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-5": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"Arsenal","TDE"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-6": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"STC","TDE"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-7": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"DrCIF","Arsenal","STC"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-8": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"DrCIF","Arsenal","TDE"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-9": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"DrCIF","STC","TDE"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-10": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"Arsenal","STC","TDE"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; case "HC-11": hc=new HIVE_COTE(); hc.setBuildIndividualsFromResultsFiles(true); hc.setSeed(fold); hc.setDebug(false); hc.setResultsFileLocationParameters(exp.resultsWriteLocation,exp.datasetName,fold); classifiers=new String[]{"DrCIF","Arsenal","STC","TDE"}; hc.setClassifiersNamesForFileRead(classifiers); c=hc; break; default: System.out.println("Unknown hybrid based classifier "+classifier+" should not be able to get here "); System.out.println("There is a mismatch between array hybrids and the switch statement "); throw new UnsupportedOperationException("Unknown hybrid based classifier, should not be able to get here " + "There is a mismatch between array hybrids and the switch statement "); } return c; } /** * MULTIVARIATE time series classifiers, all in one list for now */ public static String[] allMultivariate={"Shapelet_I","Shapelet_D","Shapelet_Indep","ED_I","ED_D","DTW_I","DTW_D", "DTW_A","HIVE-COTE_I", "HC_I", "CBOSS_I", "RISE_I", "STC_I", "TSF_I","PF_I","TS-CHIEF_I","HC-PF_I", "HIVE-COTEn_I","WEASEL-MUSE", "STC-D"};//Not enough to classify yet public static HashSet<String> multivariateBased=new HashSet<String>( Arrays.asList(allMultivariate)); private static Classifier setMultivariate(ExperimentalArguments exp){ String classifier=exp.estimatorName,resultsPath="",dataset=""; int fold=exp.foldId; Classifier c; boolean canLoadFromFile=true; if(exp.resultsWriteLocation==null || exp.datasetName==null) canLoadFromFile=false; else{ resultsPath=exp.resultsWriteLocation; dataset=exp.datasetName; } switch(classifier) { case "Shapelet_I": case "Shapelet_D": case "Shapelet_Indep"://Multivariate version 1 c=new MultivariateShapeletTransformClassifier(); //Default to 1 day max run: could do this better ((MultivariateShapeletTransformClassifier)c).setOneDayLimit(); ((MultivariateShapeletTransformClassifier)c).setSeed(fold); ((MultivariateShapeletTransformClassifier)c).setTransformType(classifier); break; case "ED_I": c=new NN_ED_I(); break; case "ED_D": c=new NN_ED_D(); break; case "DTW_I": c=new NN_DTW_I(); break; case "DTW_D": c=new NN_DTW_D(); break; case "DTW_A": c=new NN_DTW_A(); break; case "HC_I": c=new MultivariateHiveCote(exp.resultsWriteLocation, exp.datasetName, exp.foldId); break; case "CBOSS_I": c=new MultivariateSingleEnsemble("cBOSS", exp.resultsWriteLocation, exp.datasetName, exp.foldId); break; case "RISE_I": c=new MultivariateSingleEnsemble("RISE", exp.resultsWriteLocation, exp.datasetName, exp.foldId); break; case "STC_I": c=new MultivariateSingleEnsemble("STC", exp.resultsWriteLocation, exp.datasetName, exp.foldId); ((EnhancedAbstractClassifier)c).setDebug(true); break; case "TSF_I": c=new MultivariateSingleEnsemble("TSF", exp.resultsWriteLocation, exp.datasetName, exp.foldId); break; case "PF_I": c=new MultivariateSingleEnsemble("ProximityForest", exp.resultsWriteLocation, exp.datasetName, exp.foldId); break; case "TS-CHIEF_I": c=new MultivariateSingleEnsemble("TSCHIEF", exp.resultsWriteLocation, exp.datasetName, exp.foldId); break; case "HIVE-COTE_I": if(canLoadFromFile){ String[] cls={"TSF_I","cBOSS_I","RISE_I","STC_I"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HIVE-COTEn_I": if(canLoadFromFile){ String[] cls={"TSF_I","cBOSS_I","RISE_I","STC_I","TSFn_I","cBOSSn_I","RISEn_I","STCn_I"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC-PF_I": if(canLoadFromFile){ String[] cls={"PF_I","TSF_I","cBOSS_I","RISE_I","STC_I"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "WEASEL-MUSE": c=new WEASEL_MUSE(); break; case "STC-D": c=new ShapeletTransformClassifier(); break; default: System.out.println("Unknown multivariate classifier, should not be able to get here "); System.out.println("There is a mismatch between multivariateBased and the switch statement "); throw new UnsupportedOperationException("Unknown multivariate classifier, should not be able to get here " + "There is a mismatch between multivariateBased and the switch statement "); } return c; } /** * STANDARD classifiers such as random forest etc */ public static String[] standard= { "XGBoostMultiThreaded","XGBoost","TunedXGBoost","SmallTunedXGBoost","RandF","RotF", "ContractRotF","ERotF","ERotFBag","ERotFOOB","ERotFCV","ERotFTRAIN","PLSNominalClassifier","BayesNet","ED","C45", "SVML","SVMQ","TunedSVM","SVMRBF","RidgeCV","MLP","Logistic","CAWPE","NN","NB","BN"}; public static HashSet<String> standardClassifiers=new HashSet<String>( Arrays.asList(standard)); private static Classifier setStandardClassifiers(ExperimentalArguments exp){ String classifier=exp.estimatorName; int fold=exp.foldId; Classifier c; switch(classifier) { //TIME DOMAIN CLASSIFIERS case "XGBoostMultiThreaded": c = new TunedXGBoost(); break; case "XGBoost": c = new TunedXGBoost(); ((TunedXGBoost)c).setRunSingleThreaded(true); break; case "TunedXGBoost": c = new TunedXGBoost(); ((TunedXGBoost)c).setRunSingleThreaded(true); ((TunedXGBoost)c).setTuneParameters(true); break; case "SmallTunedXGBoost": c = new TunedXGBoost(); ((TunedXGBoost)c).setRunSingleThreaded(true); ((TunedXGBoost)c).setSmallParaSearchSpace_64paras(); break; case "RandF": RandomForest r=new RandomForest(); r.setNumTrees(500); c = r; break; case "RotF": RotationForest rf=new RotationForest(); rf.setNumIterations(200); c = rf; break; case "ContractRotF": ContractRotationForest crf=new ContractRotationForest(); crf.setMaxNumTrees(200); c = crf; break; case "ERotFBag": EnhancedRotationForest erf=new EnhancedRotationForest(); erf.setBagging(true); erf.setMaxNumTrees(200); c = erf; break; case "ERotFOOB": erf=new EnhancedRotationForest(); erf.setBagging(false); erf.setTrainEstimateMethod("OOB"); erf.setMaxNumTrees(200); c = erf; break; case "ERotFCV": erf=new EnhancedRotationForest(); erf.setBagging(false); erf.setTrainEstimateMethod("CV"); erf.setMaxNumTrees(200); c = erf; break; case "ERotF": case "ERotFTRAIN": erf=new EnhancedRotationForest(); erf.setTrainEstimateMethod("TRAIN"); erf.setMaxNumTrees(200); c = erf; break; case "PLSNominalClassifier": c = new PLSNominalClassifier(); break; case "BayesNet": c = new BayesNet(); break; case "ED": c= KNN.CONFIGS.get(classifier).build(); break; case "C45": c=new J48(); break; case "NB": c=new NaiveBayes(); break; case "SVML": c=new SMO(); PolyKernel p=new PolyKernel(); p.setExponent(1); ((SMO)c).setKernel(p); ((SMO)c).setRandomSeed(fold); ((SMO)c).setBuildLogisticModels(true); break; case "SVMQ": c=new SMO(); PolyKernel poly=new PolyKernel(); poly.setExponent(2); ((SMO)c).setKernel(poly); ((SMO)c).setRandomSeed(fold); ((SMO)c).setBuildLogisticModels(true); break; case "SVMRBF": c=new SMO(); RBFKernel rbf=new RBFKernel(); rbf.setGamma(0.5); ((SMO)c).setC(5); ((SMO)c).setKernel(rbf); ((SMO)c).setRandomSeed(fold); ((SMO)c).setBuildLogisticModels(true); break; case "TunedSVM": c=new TunedSVM(); ((TunedSVM)c).optimiseKernel(true); ((TunedSVM)c).setRandomSeed(fold); break; case "RidgeCV": c=new RidgeClassifierCV(); break; case "BN": c=new BayesNet(); break; case "MLP": c=new MultilayerPerceptron(); break; case "Logistic": c= new Logistic(); break; case "CAWPE": c=new CAWPE(); break; case "NN": kNN k=new kNN(100); k.setCrossValidate(true); k.normalise(false); k.setDistanceFunction(new EuclideanDistance()); return k; default: System.out.println("Unknown standard classifier "+classifier+" should not be able to get here "); System.out.println("There is a mismatch between otherClassifiers and the switch statement "); throw new UnsupportedOperationException("Unknown standard classifier "+classifier+" should not be able to get here " + "There is a mismatch between otherClassifiers and the switch statement "); } return c; } /** * BESPOKE classifiers for particular set ups. Use if you want some special configuration/pipeline * not encapsulated within a single classifier */ public static String[] bespoke= {"HIVE-COTE 1.0","HIVE-COTE 2.0","HIVE-COTE","HC-TDE","HC-CIF","HC-WEASEL", "HC-BcSBOSS","HC-cSBOSS","TunedHIVE-COTE","HC-S-BOSS", "HC2-MultiArsenal","HC2-MiniArsenal","HC2-FreshPRINCE","HC2-FreshPRINCE-MultiArsenal"}; public static HashSet<String> bespokeClassifiers=new HashSet<String>( Arrays.asList(bespoke)); private static Classifier setBespokeClassifiers(ExperimentalArguments exp){ String classifier=exp.estimatorName,resultsPath="",dataset=""; int fold=exp.foldId; Classifier c; boolean canLoadFromFile=true; if(exp.resultsWriteLocation==null || exp.datasetName==null) canLoadFromFile=false; else{ resultsPath=exp.resultsWriteLocation; dataset=exp.datasetName; } switch(classifier) { case "HIVE-COTE 1.0": if(canLoadFromFile){ String[] cls={"TSF","RISE","STC","cBOSS"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HIVE-COTE 2.0": if(canLoadFromFile){ String[] cls={"DrCIF","TDE","Arsenal","STC"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC2-FreshPRINCE": if(canLoadFromFile){ String[] cls={"DrCIF","TDE","Arsenal","STC","FreshPRINCE"}; c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC2-MiniArsenal": if(canLoadFromFile){ String[] cls={"DrCIF","TDE","MiniArsenal","STC"}; c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC2-MultiArsenal": if(canLoadFromFile){ String[] cls={"DrCIF","TDE","MultiArsenal","STC"}; c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC2-FreshPRINCE-MultiArsenal": if(canLoadFromFile){ String[] cls={"DrCIF","TDE","MultiArsenal","STC","FreshPRINCE"}; c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC-TDE": if(canLoadFromFile){ String[] cls={"TSF","TDE","RISE","STC"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC-CIF": if(canLoadFromFile){ String[] cls={"CIF","cBOSS","RISE","STC"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC-WEASEL": if(canLoadFromFile){ String[] cls={"TSF","WEASEL","RISE","STC"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC-S-BOSS": if(canLoadFromFile){ String[] cls={"TSF","S-BOSS","RISE","STC"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC-BcSBOSS": if(canLoadFromFile){ String[] cls={"TSF","BcS-BOSS","RISE","STC"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HC-cSBOSS": if(canLoadFromFile){ String[] cls={"TSF","cS-BOSS","RISE","STC"};//RotF for ST c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); ((HIVE_COTE)c).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)c).setResultsFileLocationParameters(resultsPath, dataset, fold); ((HIVE_COTE)c).setClassifiersNamesForFileRead(cls); } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; case "HIVE-COTE": c=new HIVE_COTE(); ((HIVE_COTE)c).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)c).setSeed(fold); break; case "TunedHIVE-COTE": if(canLoadFromFile){ String[] cls=new String[]{"TSF","BOSS","RISE","STC"};//RotF for ST HIVE_COTE hc=new HIVE_COTE(); hc.setFillMissingDistsWithOneHotVectors(true); hc.setSeed(fold); hc.setBuildIndividualsFromResultsFiles(true); hc.setResultsFileLocationParameters(resultsPath, dataset, fold); hc.setClassifiersNamesForFileRead(cls); TunedClassifier tuner=new TunedClassifier(); tuner.setClassifier(hc); ParameterSpace pc=new ParameterSpace(); double[] alphaVals={1,2,3,4,5,6,7,8,9,10}; pc.addParameter("A",alphaVals); tuner.setParameterSpace(pc); c=tuner; } else throw new UnsupportedOperationException("ERROR: currently only loading from file for CAWPE and no results file path has been set. " + "Call setClassifier with an ExperimentalArguments object exp with exp.resultsWriteLocation (contains component classifier results) and exp.datasetName set"); break; default: System.out.println("Unknown bespoke classifier, should not be able to get here "); System.out.println("There is a mismatch between bespokeClassifiers and the switch statement "); throw new UnsupportedOperationException("Unknown bespoke classifier, should not be able to get here " + "There is a mismatch between bespokeClassifiers and the switch statement "); } return c; } /** * BESPOKE classifiers for particular set ups. Use if you want some special configuration/pipeline * not encapsulated within a single classifier */ public static String[] earlyClassification= {"TEASER","ECEC","eSTC","P85-DrCIF","P85c3-DrCIF","TEASER-DrCIF", "SR1CF1-DrCIF","ECEC-DrCIF" }; public static HashSet<String> earlyClassifiers=new HashSet<String>( Arrays.asList(earlyClassification)); private static Classifier setEarlyClassifiers(ExperimentalArguments exp){ String classifier=exp.estimatorName,resultsPath="",dataset=""; int fold=exp.foldId; Classifier c; boolean canLoadFromFile=true; if(exp.resultsWriteLocation==null || exp.datasetName==null) canLoadFromFile=false; else{ resultsPath=exp.resultsWriteLocation; dataset=exp.datasetName; } switch(classifier) { case "TEASER": c = new EarlyDecisionMakerClassifier(new WEASEL(), new TEASER()); break; case "ECEC": c = new EarlyDecisionMakerClassifier(new WEASEL(), new ECEC()); break; case "eSTC": c = new ShapeletTransformEarlyClassifier(); break; case "P85-DrCIF": c = new EarlyDecisionMakerClassifier(new DrCIF(), new ProbabilityThreshold()); break; case "P85c3-DrCIF": c = new EarlyDecisionMakerClassifier(new DrCIF(), new ProbabilityThreshold()); ((ProbabilityThreshold) ((EarlyDecisionMakerClassifier) c).getDecisionMaker()).setConsecutivePredictions(3); break; case "TEASER-DrCIF": c = new EarlyDecisionMakerClassifier(new DrCIF(), new TEASER()); break; case "SR1CF1-DrCIF": c = new EarlyDecisionMakerClassifier(new DrCIF(), new SR1CF1()); break; case "ECEC-DrCIF": c = new EarlyDecisionMakerClassifier(new DrCIF(), new ECEC()); break; default: System.out.println("Unknown bespoke classifier, should not be able to get here "); System.out.println("There is a mismatch between bespokeClassifiers and the switch statement "); throw new UnsupportedOperationException("Unknown bespoke classifier, should not be able to get here " + "There is a mismatch between bespokeClassifiers and the switch statement "); } return c; } /** * * setClassifier, which takes the experimental * arguments themselves and therefore the classifiers can take from them whatever they * need, e.g the dataset name, the fold id, separate checkpoint paths, etc. * * To take this idea further, to be honest each of the TSC-specific classifiers * could/should have a constructor and/or factory that builds the classifier * from the experimental args. * * previous usage was setClassifier(String classifier name, int fold). * this can be reproduced with setClassifierClassic below. * */ public static Classifier setClassifier(ExperimentalArguments exp){ String classifier=exp.estimatorName; Classifier c = null; if(distanceBased.contains(classifier)) c=setDistanceBased(exp); else if(dictionaryBased.contains(classifier)) c=setDictionaryBased(exp); else if(intervalBased.contains(classifier)) c=setIntervalBased(exp); else if(shapeletBased.contains(classifier)) c=setShapeletBased(exp); else if(hybridBased.contains(classifier)) c=setHybridBased(exp); else if(multivariateBased.contains(classifier)) c=setMultivariate(exp); else if(standardClassifiers.contains(classifier)) c=setStandardClassifiers(exp); else if(bespokeClassifiers.contains(classifier)) c=setBespokeClassifiers(exp); else if(earlyClassifiers.contains(classifier)) c=setEarlyClassifiers(exp); else{ System.out.println("Unknown classifier "+classifier+" it is not in any of the sublists "); throw new UnsupportedOperationException("Unknown classifier "+classifier+" it is not in any of the sublists on ClassifierLists "); } if (c instanceof EnhancedAbstractClassifier) { ((EnhancedAbstractClassifier) c).setSeed(exp.foldId); ((EnhancedAbstractClassifier) c).setDebug(exp.debug); } else if (c instanceof Randomizable) { //normal weka classifiers that aren't EnhancedAbstractClassifiers //EAC's setSeed sets up a random object internally too. ((Randomizable)c).setSeed(exp.foldId); } return c; } /** * This method redproduces the old usage exactly as it was in old experiments.java. * If you try build any classifier that uses any experimental info other than * exp.classifierName or exp.foldID, an exception will be thrown. * In particular, any classifier that needs access to the results from others * e.g. CAWPEFROMFILE, will throw an UnsupportedOperationException if you try use it like this. * * @param classifier * @param fold * @return */ public static Classifier setClassifierClassic(String classifier, int fold){ ExperimentalArguments exp=new ExperimentalArguments(); exp.estimatorName =classifier; exp.foldId=fold; return setClassifier(exp); } public static void main(String[] args) throws Exception { System.out.println("Testing set classifier by running through the list in ClassifierLists.allUnivariate and " + "ClassifierLists.allMultivariate"); for(String str:allUnivariate){ System.out.println("Initialising "+str); Classifier c= setClassifierClassic(str,0); System.out.println("Returned classifier "+c.getClass().getSimpleName()); } for(String str:allMultivariate){ System.out.println("Initialising "+str); Classifier c= setClassifierClassic(str,0); System.out.println("Returned classifier "+c.getClass().getSimpleName()); } for(String str:standard){ System.out.println("Initialising "+str); Classifier c= setClassifierClassic(str,0); System.out.println("Returned classifier "+c.getClass().getSimpleName()); } for(String str:bespoke){ System.out.println("Initialising "+str); Classifier c= setClassifierClassic(str,0); System.out.println("Returned classifier "+c.getClass().getSimpleName()); } } }
57,791
46.331695
205
java
tsml-java
tsml-java-master/src/main/java/experiments/ClustererLists.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package experiments; import machine_learning.clusterers.CAST; import machine_learning.clusterers.DensityPeaks; import machine_learning.clusterers.KMeans; import machine_learning.clusterers.KMedoids; import tsml.classifiers.distance_based.distances.DistanceFunctionAdapter; import tsml.classifiers.distance_based.distances.dtw.DTWDistance; import tsml.classifiers.distance_based.distances.erp.ERPDistance; import tsml.classifiers.distance_based.distances.lcss.LCSSDistance; import tsml.classifiers.distance_based.distances.msm.MSMDistance; import tsml.classifiers.distance_based.distances.transformed.BaseTransformDistanceMeasure; import tsml.classifiers.distance_based.distances.twed.TWEDistance; import tsml.classifiers.distance_based.distances.wdtw.WDTWDistance; import tsml.clusterers.KShape; import tsml.clusterers.TTC; import tsml.clusterers.UnsupervisedShapelets; import tsml.transformers.Derivative; import weka.clusterers.Clusterer; import weka.clusterers.EM; import weka.clusterers.NumberOfClustersRequestable; import weka.clusterers.SimpleKMeans; import java.util.Arrays; import java.util.HashSet; /** * * @author James Large (james.large@uea.ac.uk) and Tony Bagnall */ public class ClustererLists { //All implemented clusterers in tsml //<editor-fold defaultstate="collapsed" desc="All univariate time series classifiers"> public static String[] allClst={ "KMeans", "KShape" }; //</editor-fold> public static HashSet<String> allClusterers=new HashSet<>( Arrays.asList(allClst)); /** * * setClusterer, which takes the experimental * arguments themselves and therefore the classifiers can take from them whatever they * need, e.g the dataset name, the fold id, separate checkpoint paths, etc. * * To take this idea further, to be honest each of the TSC-specific classifiers * could/should have a constructor and/or factory that builds the classifier * from the experimental args. * * previous usage was setClusterer(String clusterer name, int fold). * this can be reproduced with setClassifierClassic below. * */ public static Clusterer setClusterer(ExperimentalArguments exp) throws Exception { String cls = exp.estimatorName.toLowerCase(); Clusterer c = null; switch(cls) { case "simplekmeans": c = new SimpleKMeans(); break; case "em": c = new EM(); break; case "kmeans": c = new KMeans(); break; case "kmedoids": c = new KMedoids(); break; case "densitypeaks": c = new DensityPeaks(); break; case "cast": c = new CAST(); break; case "ushapelets": c = new UnsupervisedShapelets(); break; case "kshape": c = new KShape(); break; case "ttc": c = new TTC(); break; case "ushapelets2": c = new UnsupervisedShapelets(); ((UnsupervisedShapelets)c).setExhaustiveSearch(true); break; case "ushapelets3": c = new UnsupervisedShapelets(); ((UnsupervisedShapelets)c).setRandomSearchProportion(0.2); break; case "ushapelets4": c = new UnsupervisedShapelets(); ((UnsupervisedShapelets)c).setUseKMeans(false); break; case "ushapelets5": c = new UnsupervisedShapelets(); ((UnsupervisedShapelets)c).setExhaustiveSearch(true); ((UnsupervisedShapelets)c).setUseKMeans(false); break; case "ushapelets6": c = new UnsupervisedShapelets(); ((UnsupervisedShapelets)c).setRandomSearchProportion(0.2); ((UnsupervisedShapelets)c).setUseKMeans(false); break; case "kmeans-msm": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new MSMDistance())); break; case "kmeans-dtwfull": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new DTWDistance())); break; case "kmeans-dtw20": c = new KMeans(); ((KMeans) c).setNormaliseData(false); DTWDistance dtw201 = new DTWDistance(); dtw201.setWindow(0.2); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(dtw201)); break; case "kmeans-lcss": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new LCSSDistance())); break; case "kmeans-erp": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new ERPDistance())); break; case "kmeans-twe": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new TWEDistance())); break; case "kmeans-wdtw": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new WDTWDistance())); break; case "kmeans-ddtwfull": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new BaseTransformDistanceMeasure("DDTWDistanceFull", new Derivative(), new DTWDistance()))); break; case "kmeans-ddtw20": c = new KMeans(); ((KMeans) c).setNormaliseData(false); DTWDistance dtw202 = new DTWDistance(); dtw202.setWindow(0.2); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new BaseTransformDistanceMeasure("DDTWDistance20", new Derivative(), dtw202))); break; case "kmeans-wddtw": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new BaseTransformDistanceMeasure("WDDTWDistance", new Derivative(), new WDTWDistance()))); break; case "kmeans-dtwfull-refined": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setRefinedInitialCenters(true); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(new DTWDistance())); break; case "kmeans-dtw20-refined": c = new KMeans(); ((KMeans) c).setNormaliseData(false); ((KMeans) c).setRefinedInitialCenters(true); DTWDistance dtw203 = new DTWDistance(); dtw203.setWindow(0.2); ((KMeans) c).setDistanceFunction(new DistanceFunctionAdapter(dtw203)); break; default: System.out.println("Unknown clusterer " + cls); } if (c instanceof NumberOfClustersRequestable) ((NumberOfClustersRequestable)c).setNumClusters(exp.numClassValues); return c; } /** * This method redproduces the old usage exactly as it was in old experiments.java. * If you try build any clusterer that uses any experimental info other than * expestimatorName or exp.foldID, an exception will be thrown. * @param clusterer * @param fold * @return clusterer */ public static Clusterer setClustererClassic(String clusterer, int fold) throws Exception { ExperimentalArguments exp=new ExperimentalArguments(); exp.estimatorName =clusterer; exp.foldId=fold; return setClusterer(exp); } public static void main(String[] args) throws Exception { } }
9,223
38.758621
169
java
tsml-java
tsml-java-master/src/main/java/experiments/ClusteringExperiments.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import com.google.common.testing.GcFinalization; import evaluation.storage.ClustererResults; import experiments.data.DatasetLoading; import tsml.clusterers.EnhancedAbstractClusterer; import utilities.ClusteringUtilities; import weka.clusterers.Clusterer; import weka.core.Instance; import weka.core.Instances; import weka.core.Randomizable; import java.io.*; import java.text.SimpleDateFormat; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; import static utilities.ClusteringUtilities.zNormaliseWithClass; import static utilities.GenericTools.indexOfMax; /** * The clustering experimental class of the tsml codebase. The 'main' method to run is setupAndRunExperiment(ExperimentalArguments expSettings) An execution of this will evaluate a single clusterer on a single resample of a single dataset. Given an ExperimentalArguments object, which may be parsed from command line arguments or constructed in code, (and in the future, perhaps other methods such as JSON files etc), will load the clusterer and dataset specified, prep the location to write results to, train the clusterer - potentially generating an error estimate via cross validation on the train set as well - and then predict the cases of the test set. The primary outputs are the train and/or 'testResampleX.csv' files * * @author James Large (james.large@uea.ac.uk), Tony Bagnall (anthony.bagnall@uea.ac.uk) */ public class ClusteringExperiments { private final static Logger LOGGER = Logger.getLogger(ClusteringExperiments.class.getName()); public static boolean debug = false; private static boolean testFoldExists; private static boolean trainFoldExists; /*If true, experiments will not print or log to stdout/err anything other that exceptions (SEVERE)*/ public static boolean beQuiet = false; private static final String WORKSPACE_DIR = "Workspace"; private static final String PREDICTIONS_DIR = "Predictions"; /** * Parses args into an ExperimentalArguments object, then calls setupAndRunExperiment(ExperimentalArguments expSettings). * Calling with the --help argument, or calling with un-parsable parameters, will print a summary of the possible parameters. Argument key-value pairs are separated by '='. The 5 basic, always required, arguments are: Para name (short/long) | Example -dp --dataPath | --dataPath=C:/Datasets/ -rp --resultsPath | --resultsPath=C:/Results/ -cn --classifierName | --classifierName=RandF -dn --datasetName | --datasetName=ItalyPowerDemand -f --fold | --fold=1 Use --help to see all the optional parameters, and more information about each of them. If running locally, it may be easier to build the ExperimentalArguments object yourself and call setupAndRunExperiment(...) directly, instead of building the String[] args and calling main like a lot of legacy code does. */ public static void main(String[] args) throws Exception { //even if all else fails, print the args as a sanity check for cluster. if (!beQuiet) { System.out.println("Raw args:"); for (String str : args) System.out.println("\t"+str); System.out.print("\n"); } if (args.length > 0) { ExperimentalArguments expSettings = new ExperimentalArguments(args); setupAndRunExperiment(expSettings); } else {//Manually set args int folds=30; /* Change these settings for your experiment:*/ String clusterer="KMeans";//Clusterer name: See ClustererLists for valid options ArrayList<String> parameters= new ArrayList<>(); parameters.add("-dp=src\\main\\java\\experiments\\data\\tsc\\"); //Where to get datasets parameters.add("-rp=temp\\"); //Where to write results parameters.add("-cn="+clusterer); //Clusterer name parameters.add("-dn="); //Problem name, don't change here as it is overwritten by probFiles parameters.add("-f=1"); //Resample number (resample number 1 is stored as testResample0.csv, its a cluster thing) parameters.add("-d=true"); //Debugging parameters.add("--force=true"); //Overwrites existing results if true, otherwise set to false String[] settings=new String[parameters.size()]; int count=0; for(String str:parameters) settings[count++]=str; String[] probFiles= {"ChinaTown"}; //Problem name(s) // String[] probFiles= DatasetLists.fixedLengthMultivariate; /* * END OF SETTINGS */ System.out.println("Manually set args:"); for (String str : settings) System.out.println("\t"+str); System.out.println(""); for (String prob:probFiles) { settings[4]="-dn="+prob; for(int i=1;i<=folds;i++) { settings[5]="-f="+i; ExperimentalArguments expSettings = new ExperimentalArguments(settings); System.out.println("Sequential experiment with "+expSettings); setupAndRunExperiment(expSettings); } } } } /** * Runs an experiment with the given settings. For the more direct method in case e.g * you have a bespoke clusterer not handled by ClustererList or dataset that * is sampled in a bespoke way, use runExperiment * * 1) Sets up the logger. * 2) Sets up the results write path * 3) Checks whether this experiments results already exist. If so, exit * 4) Constructs the clusterer * 5) Samples the dataset. * 6) If we're good to go, runs the experiment. */ public static ClustererResults[] setupAndRunExperiment(ExperimentalArguments expSettings) throws Exception { if (beQuiet) LOGGER.setLevel(Level.SEVERE); // only print severe things else { if (debug) LOGGER.setLevel(Level.FINEST); // print everything else LOGGER.setLevel(Level.INFO); // print warnings, useful info etc, but not simple progress messages, e.g. 'training started' DatasetLoading.setDebug(debug); } LOGGER.log(Level.FINE, expSettings.toString()); buildExperimentDirectoriesAndFilenames(expSettings); //Check whether results already exists, if so and force evaluation is false: just quit if (quitEarlyDueToResultsExistence(expSettings)) return null; Instances[] data = DatasetLoading.sampleDataset(expSettings.dataReadLocation, expSettings.datasetName, expSettings.foldId); expSettings.numClassValues = data[0].numClasses(); if (expSettings.normalise){ zNormaliseWithClass(data[0]); zNormaliseWithClass(data[1]); } // replace missing values with 0 if enabled if (expSettings.replaceMissingValues) { for (Instance inst : data[0]) inst.replaceMissingValues(new double[data[0].numAttributes()]); for (Instance inst : data[1]) inst.replaceMissingValues(new double[data[1].numAttributes()]); } // if a pre-instantiated clusterer instance hasn't been supplied, generate one here using setClusterer if (expSettings.clusterer == null) { expSettings.clusterer = ClustererLists.setClusterer(expSettings); } setupClustererExperimentalOptions(expSettings, expSettings.clusterer); ClustererResults[] results = runExperiment(expSettings, data[0], data[1], expSettings.clusterer); LOGGER.log(Level.INFO, "Experiment finished " + expSettings.toShortString()); return results; } /** * Perform an actual experiment, using the loaded clusterer and resampled dataset given, writing to the specified results location. * 1) If needed, set up file paths and flags related to a single parameter evaluation and/or the clusterer's internal parameter saving things * 2) If we want to be performing cv to find an estimate of the error on the train set, either do that here or set up the clusterer to do it internally * during buildClusterer() * 3) Do the actual training, i.e buildClusterer() * 4) Save information needed from the training, e.g. train estimates, serialising the clusterer, etc. * 5) Evaluate the trained clusterer on the test set * 6) Save test results * @return the ClustererResults for this experiment, {train, test} */ public static ClustererResults[] runExperiment(ExperimentalArguments expSettings, Instances trainSet, Instances testSet, Clusterer clusterer) { ClustererResults[] experimentResults; // the combined container, to hold { trainResults, testResults } on return LOGGER.log(Level.FINE, "Preamble complete, real experiment starting."); try { //Since we are copying train and test data, no need to copy it again if (clusterer instanceof EnhancedAbstractClusterer) ((EnhancedAbstractClusterer)clusterer).setCopyInstances(false); ClustererResults trainResults = training(expSettings, clusterer, trainSet); ClustererResults testResults = testing(expSettings, clusterer, testSet, trainResults); experimentResults = new ClustererResults[] {trainResults, testResults}; } catch (Exception e) { LOGGER.log(Level.SEVERE, "Experiment failed. Settings: " + expSettings + "\n\nERROR: " + e, e); e.printStackTrace(); return null; //error state } return experimentResults; } /** * Performs all operations related to training the clusterer, and returns a ClustererResults object holding the results * of training. * * At minimum these results hold the hardware benchmark timing (if requested in expSettings), the memory used, * and the build time. * * If a train estimate is to be generated, the results also hold predictions and results from the train set, and these * results are written to file. */ public static ClustererResults training(ExperimentalArguments expSettings, Clusterer clusterer, Instances trainSet) throws Exception { //For now, just cloning the data and removing the class label Instances clsTrain = new Instances(trainSet); clsTrain.setClassIndex(-1); if (trainSet.classIndex() >= 0) clsTrain.deleteAttributeAt(trainSet.classIndex()); long benchmark = ClassifierExperiments.findBenchmarkTime(expSettings); MemoryMonitor memoryMonitor = new MemoryMonitor(); memoryMonitor.installMonitor(); //Build on the full train data here long buildTime = System.nanoTime(); clusterer.buildClusterer(clsTrain); buildTime = System.nanoTime() - buildTime; LOGGER.log(Level.FINE, "Training complete"); // Training done, collect memory monitor results // Need to wait for an update, otherwise very quick clusterers may not experience gc calls during training, // or the monitor may not update in time before collecting the max GcFinalization.awaitFullGc(); long maxMemory = memoryMonitor.getMaxMemoryUsed(); ClustererResults trainResults; if (!trainFoldExists || expSettings.forceEvaluation) { if (clusterer instanceof EnhancedAbstractClusterer) trainResults = ClusteringUtilities.getClusteringResults((EnhancedAbstractClusterer)clusterer, trainSet); else trainResults = evaluateClusterer(clusterer, clsTrain, trainSet, trainSet.numClasses()); trainResults.setBenchmarkTime(benchmark); trainResults.setBuildTime(buildTime); trainResults.setMemory(maxMemory); writeResults(expSettings, trainResults, expSettings.trainFoldFileName, "train"); LOGGER.log(Level.FINE, "Train estimate written"); } else{ trainResults = new ClustererResults(trainSet.numClasses()); trainResults.setBenchmarkTime(benchmark); trainResults.setBuildTime(buildTime); trainResults.setMemory(maxMemory); } return trainResults; } /** * Performs all operations related to testing the clusterer, and returns a ClustererResults object holding the results * of testing. * * Computational resource costs of the training process are taken from the train results. */ public static ClustererResults testing(ExperimentalArguments expSettings, Clusterer clusterer, Instances testSet, ClustererResults trainResults) throws Exception { ClustererResults testResults; if (expSettings.forceEvaluation || !CollateResults.validateSingleFoldFile(expSettings.testFoldFileName)) { Instances clsTest = new Instances(testSet); clsTest.setClassIndex(-1); if (testSet.classIndex() >= 0) clsTest.deleteAttributeAt(testSet.classIndex()); testResults = evaluateClusterer(clusterer, clsTest, testSet, testSet.numClasses()); testResults.setBenchmarkTime(trainResults.getBenchmarkTime()); testResults.setBuildTime(trainResults.getBuildTime()); testResults.setMemory(trainResults.getMemory()); LOGGER.log(Level.FINE, "Testing complete"); writeResults(expSettings, testResults, expSettings.testFoldFileName, "test"); LOGGER.log(Level.FINE, "Test results written"); } else { LOGGER.log(Level.INFO, "Test file already found, written by another process."); testResults = new ClustererResults(expSettings.testFoldFileName); } return testResults; } /** * Based on experimental parameters passed, defines the target results file and workspace locations for use in the * rest of the experiment */ public static void buildExperimentDirectoriesAndFilenames(ExperimentalArguments expSettings) { //Build/make the directory to write the train and/or testFold files to // [writeLoc]/[clusterer]/Predictions/[dataset]/ String fullWriteLocation = expSettings.resultsWriteLocation + expSettings.estimatorName + "/" +PREDICTIONS_DIR+"/" + expSettings.datasetName + "/"; File f = new File(fullWriteLocation); if (!f.exists()) f.mkdirs(); expSettings.testFoldFileName = fullWriteLocation + "testResample" + expSettings.foldId + ".csv"; expSettings.trainFoldFileName = fullWriteLocation + "trainResample" + expSettings.foldId + ".csv"; testFoldExists = CollateResults.validateSingleFoldFile(expSettings.testFoldFileName); trainFoldExists = CollateResults.validateSingleFoldFile(expSettings.trainFoldFileName); if (expSettings.supportingFilePath == null || expSettings.supportingFilePath.equals("")) expSettings.supportingFilePath = expSettings.resultsWriteLocation + expSettings.estimatorName + "/" +WORKSPACE_DIR+"/" + expSettings.datasetName + "/"; f = new File(expSettings.supportingFilePath); if (!f.exists()) f.mkdirs(); } /** * Returns true if the work to be done in this experiment already exists at the locations defined by the experimental settings, * indicating that this execution can be skipped. */ public static boolean quitEarlyDueToResultsExistence(ExperimentalArguments expSettings) { boolean quit = false; if (!expSettings.forceEvaluation && trainFoldExists && testFoldExists) { LOGGER.log(Level.INFO, expSettings.toShortString() + " already exists at write location, exiting."); quit = true; } return quit; } /** * Based on the experimental settings passed, make any clusterer interface calls that modify how the clusterer is TRAINED here, * e.g. give checkpointable clustererss the location to save, give contractable clustererss their contract, etc. */ private static void setupClustererExperimentalOptions(ExperimentalArguments expSettings, Clusterer clusterer) { if (clusterer instanceof Randomizable) ((Randomizable)clusterer).setSeed(expSettings.foldId); } /** * Meta info shall be set by writeResults(...), just generating the prediction info and * any info directly calculable from that here */ public static ClustererResults evaluateClusterer(Clusterer clusterer, Instances clusterData, Instances fullData, int numClasses) throws Exception { ClustererResults res = new ClustererResults(numClasses); for(int i = 0; i < clusterData.numInstances(); i++) { double trueClassVal = fullData.instance(i).classValue(); long startTime = System.nanoTime(); double[] dist = clusterer.distributionForInstance(clusterData.instance(i)); long predTime = System.nanoTime() - startTime; res.addPrediction(trueClassVal, dist, indexOfMax(dist), predTime, ""); } res.finaliseResults(); return res; } public static String buildExperimentDescription() { Date date = new Date(); SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); StringBuilder sb = new StringBuilder("Generated by ClusteringExperiments.java on " + formatter.format(date) + "."); sb.append(" SYSTEMPROPERTIES:{"); sb.append("user.name:").append(System.getProperty("user.name", "unknown")); sb.append(",os.arch:").append(System.getProperty("os.arch", "unknown")); sb.append(",os.name:").append(System.getProperty("os.name", "unknown")); sb.append("},ENDSYSTEMPROPERTIES"); return sb.toString().replace("\n", "NEW_LINE"); } public static void writeResults(ExperimentalArguments exp, ClustererResults results, String fullTestWritingPath, String split) throws Exception { results.setTimeUnit(TimeUnit.NANOSECONDS); results.setEstimatorName(exp.estimatorName); results.setDatasetName(exp.datasetName); results.setFoldID(exp.foldId); results.setSplit(split); results.setDescription(buildExperimentDescription()); results.writeFullResultsToFile(fullTestWritingPath); File f = new File(fullTestWritingPath); if (f.exists()) { f.setWritable(true, false); } } }
19,601
45.56057
167
java
tsml-java
tsml-java-master/src/main/java/experiments/CollateResults.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package experiments; import evaluation.MultipleEstimatorsPairwiseTest; import evaluation.MultipleEstimatorEvaluation; import fileIO.InFile; import fileIO.OutFile; import java.io.File; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import statistics.distributions.BinomialDistribution; import statistics.tests.OneSampleTests; import evaluation.storage.ClassifierResults; import java.util.HashMap; /** * Class to collate results from any classifier creating standard output * There are two ways to collate results. * 1. (Tony Bagnall) The code in this class creates summary info for individual classifiers. * It does not do comparisons between classifiers, and it will build with incomplete * data, ignoring incomplete data sets. This can be run on the cluster (see below). * See method individualClassifiersCollate() for example usage * 2 (James Large) Using the MultipleEstimatorEvaluation class, detailed * comparisons between classifier can be conducted. This can create matlab driven * critical difference diagrams **On the cluster usage:** * Class to collate standard results files over multiple classifiers and problems * Usage * (assuming Collate.jar has this as the main class): * java -jar Collate.jar ResultsDirectory/ ProblemDirectory/ NumberOfFolds Classifier1 Classifier2 .... ClassifierN NoParasC1 NoParasC2 .... NoParasCn * e.g. java -jar -Xmx6000m Collate.jar Results/ UCIContinuous/ 30 RandF RotF 2 2 * collates the results for 30 folds for RandF and RotF in the directory for Results * on all the problems in UCIContinous (as defined by having a directory in the folder) * How it works: * * Stage 1: take all the single fold files, work out the diagnostics on test data: * Accuracy, BalancedAccuracy, NegLogLikelihood, AUROC and F1 and store the TrainCV accuracy. * all done by call to collateFolds(); * Combine folds into a single file for each statistic in ResultsDirectory/ClassifierName * these are * Counts: counts.csv, number per problem (max number is NumberOfFolds, it does not check for more). * Diagnostics: TestAcc.csv, TestF1.csv, TestBAcc.csv, TestNLL.csv, TestAUROC.csv, TrainCVAcc.csv, Timings.csv * Parameter info: Parameter1.csv, Parameter2.csv...AllTuningAccuracies.csv (if tuning occurs, all tuning values). * * Stage 2: * Output: Classifier Summary: call to method averageOverFolds() * Creates average and standard deviation over all folds based on the * created at stage 1 with the addition of the mean difference per fold. All put in a single directory. * * Stage 3 * Final Comparison Summary: call to method basicSummaryComparisons(); * a single file in ResultsDirectory directory called summaryTests<ClassifierNames>.csv * contains pairwise comparisons of all the classifiers. * 1. All Pairwise Comparisons for TestAccDiff, TestAcc, TestBAcc, TestNLL.csv and TestAUROC * 1. Wins/Draws/Loses * 2. Mean (and std dev) difference * 3. Two sample tests of the mean values * * @author ajb **/ public class CollateResults { public static File[] dir; static String basePath; static String[] classifiers; static ArrayList<String> problems; static String problemPath; static int folds; static int numClassifiers; static int[] numParas; static DecimalFormat df=new DecimalFormat("##.######"); static double[][] data; static boolean countPartials=false; static boolean oldFormat=true; /** * Arguments: 1. ResultsDirectory/ 2. Either ProblemDirectory/ or ProblemFiles.csv or ProblemFiles.txt * Basically checks for an extension and if its there reads a file. * 3. NumberOfFolds 4-4+nosClassifiers Classifier1 Classifier2 .... ClassifierN 4+nosClassifiers+1 to 4+2*nosClassifiers NoParasC1 NoParasC2 .... NoParasCn * */ public static void readData(String[] args){ int numInitArgs=4; basePath=args[0]; System.out.println("Base path = "+basePath); problemPath=args[1]; System.out.println("Problem path = "+problemPath); folds =Integer.parseInt(args[2]); System.out.println("Folds = "+folds); String partial=args[3].toLowerCase(); if(partial.equals("true")) countPartials=true; else countPartials=false; numClassifiers=(args.length-numInitArgs)/2; classifiers=new String[numClassifiers]; for(int i=0;i<classifiers.length;i++) classifiers[i]=args[i+numInitArgs]; numParas=new int[classifiers.length]; for(int i=0;i<classifiers.length;i++) numParas[i]=Integer.parseInt(args[i+numInitArgs+classifiers.length]); //Get problem files File f=new File(problemPath); problems=new ArrayList<>(); if(problemPath.contains(".txt") || problemPath.contains(".csv")){//Read from file if(!f.exists()) System.out.println("Error loading problems from file ="+problemPath); else{ InFile inf=new InFile(problemPath); String prob=inf.readLine(); while(prob!=null){ problems.add(prob); prob=inf.readLine(); } } } else{ if(!f.isDirectory()){ System.out.println("Error in problem path ="+problemPath); } dir=f.listFiles(); for(File p:dir){ if(p.isDirectory()){ problems.add(p.getName()); } } } Collections.sort(problems); } /*Returns True if the file is present and correct Changed cos it is too slow at the moment */ public static boolean validateSingleFoldFile(String str){ File f= new File(str); if(f.exists()){ // Check 1: non zero if(f.length()==0){//Empty, delete file f.delete(); } else{ try{ /* InFile inf=new InFile(str); int c=inf.countLines(); if(c<=3){//No predictions, delete inf.closeFile(); f.delete(); return false; } inf.closeFile(); */ return true; }catch(Exception e){ System.out.println("Exception thrown trying to read file "+str); System.out.println("Exception = "+e+" THIS MAY BE A GOTCHA LATER"); e.printStackTrace(); return false; } //Something in there, it is up to ClassifierResults to validate the rest } } return false; } /** * Stage 1: take all the single fold files, work out the diagnostics on test data: Accuracy, BalancedAccuracy, NegLogLikelihood, AUROC and F1 and store the TrainCV accuracy. all done by call to collateFolds(); Combine folds into a single file for each statistic in ResultsDirectory/ClassifierName these are Counts: counts.csv, number per problem (max number is NumberOfFolds, it does not check for more). Diagnostics: TestAcc.csv, TestF1.csv, TestBAcc.csv, TestNLL.csv, TestAUROC.csv, TrainCVAcc.csv Timings: Timings.csv Memory: Memory.csv Parameter info: Parameter1.csv, Parameter2.csv...AllTuningAccuracies.csv (if tuning occurs, all tuning values). */ public static int MAXNUMPARAS=1180; public static void collateFolds(){ // String[] allStats={"TestAcc","TrainCVAcc","TestNLL","TestBACC","TestAUROC","TestF1"}; for(int i=0;i<classifiers.length;i++){ String cls=classifiers[i]; System.out.println("Processing classifier ="+cls); File f=new File(basePath+cls); if(f.isDirectory()){ //Check classifier directory exists. System.out.println("Base path "+basePath+cls+" exists"); File stats=new File(basePath+cls+"/SummaryStats"); if(!stats.isDirectory()) stats.mkdir(); String filePath=basePath+cls+"/SummaryStats/"; OutFile clsResults=new OutFile(filePath+cls+"TestAcc.csv"); OutFile f1Results=new OutFile(filePath+cls+"TestF1.csv"); OutFile BAccResults=new OutFile(filePath+cls+"TestBAcc.csv"); OutFile nllResults=new OutFile(filePath+cls+"TestNLL.csv"); OutFile AUROCResults=new OutFile(filePath+cls+"TestAUROC.csv"); OutFile trainResults=new OutFile(filePath+cls+"TrainCVAcc.csv"); OutFile[] paraFiles=null; if(numParas[i]>0){ paraFiles=new OutFile[numParas[i]]; for(int j=0;j<paraFiles.length;j++) paraFiles[j]=new OutFile(filePath+cls+"Parameter"+(j+1)+".csv"); } OutFile timings=new OutFile(filePath+cls+"Timings.csv"); OutFile mem=new OutFile(filePath+cls+"Memory.csv"); OutFile allAccSearchValues=new OutFile(filePath+cls+"AllTuningAccuracies.csv"); OutFile missing=null; OutFile counts=new OutFile(filePath+cls+"Counts.csv"); OutFile partials=null; if(countPartials) partials=new OutFile(filePath+cls+"PartialCounts.csv");; OutFile of = new OutFile(filePath+cls+"Corrupted.csv"); int missingCount=0; for(String name:problems){ //Write collated results for this classifier to a single file OutFile mergedResults=new OutFile(filePath+cls+"AllTestPrediction"+name+".csv"); clsResults.writeString(name); trainResults.writeString(name); f1Results.writeString(name); BAccResults.writeString(name); nllResults.writeString(name); AUROCResults.writeString(name); allAccSearchValues.writeString(name); timings.writeString(name); mem.writeString(name); if(numParas[i]>0){ for(OutFile out:paraFiles) out.writeString(name+","); } //GAVIN HACK // String path=basePath+cls+"/"+name+"/results/"; String path=basePath+cls+"//Predictions//"+name; if(missing!=null && missingCount>0) missing.writeString("\n"); missingCount=0; if(countPartials) partials.writeString(name); int caseCount=0; for(int j=0;j<folds;j++){ //Check fold exists and is a valid file boolean valid=validateSingleFoldFile(path+"//testFold"+j+".csv"); if(valid){ //This could fail if file only has partial probabilities on the line //Read in test accuracy and store //Check fold exists //Read in test accuracy and store InFile inf=null; String[] trainRes=null; try{ inf=new InFile(path+"//testFold"+j+".csv"); inf.readLine(); trainRes=inf.readLine().split(",");//Stores train CV and parameter info clsResults.writeString(","+inf.readDouble()); if(trainRes.length>1){//There IS parameter info //First is train time build String str=trainRes[1].trim(); timings.writeString(","+df.format(Double.parseDouble(str))); //second is the trainCV testAcc if(trainRes.length>3){ str=trainRes[3].trim(); trainResults.writeString(","+Double.parseDouble(str)); //Then variable list of numParas int pos=5; for(int k=0;k<numParas[i];k++){ if(trainRes.length>pos){ paraFiles[k].writeString(trainRes[pos]+","); pos+=2; } else paraFiles[k].writeString(","); } // write the rest to the para search file while(pos<trainRes.length) allAccSearchValues.writeString(","+trainRes[pos++]); } } else{ trainResults.writeString(","); for(int k=0;k<numParas[i];k++) paraFiles[k].writeString(","); } //Read in the rest into a ClassifierResults object inf.closeFile(); // inf.openFile(path+"//testFold"+j+".csv"); // int temp=(inf.countLines()-3); // inf.closeFile(); // System.out.println("Number of items in bag "+(j+1)+" = "+temp); // caseCount+=temp; ClassifierResults res=new ClassifierResults(); res.loadResultsFromFile(path+"//testFold"+j+".csv"); mergedResults.writeLine(res.instancePredictionsToString()); res.findAllStats(); f1Results.writeString(","+res.f1); BAccResults.writeString(","+res.balancedAcc); nllResults.writeString(","+res.nll); AUROCResults.writeString(","+res.meanAUROC); }catch(Exception e){ System.out.println(" Error "+e+" in "+path); if(trainRes!=null){ System.out.println(" second line read has "+trainRes.length+" entries :"); for(String str:trainRes) System.out.print(str+","); of.writeLine(name+","+j); e.printStackTrace(); System.exit(1); } }finally{ if(inf!=null) inf.closeFile(); } if(countPartials) partials.writeString(",0"); } else{ if(missing==null) missing=new OutFile(filePath+cls+"MissingFolds.csv"); if(missingCount==0) missing.writeString(name); missingCount++; missing.writeString(","+j); if(countPartials){ //Fold j missing, count here how many parameters are complete on it int x=0; for(int k=1;k<MAXNUMPARAS;k++){ if(validateSingleFoldFile(path+"//fold"+j+"_"+k+".csv")) x++; } if(countPartials) partials.writeString(","+x); } } } // System.out.println(" Total number of cases ="+caseCount); counts.writeLine(name+","+(folds-missingCount)); if(countPartials) partials.writeString("\n"); clsResults.writeString("\n"); trainResults.writeString("\n"); f1Results.writeString("\n"); BAccResults.writeString("\n"); nllResults.writeString("\n"); AUROCResults.writeString("\n"); timings.writeString("\n"); allAccSearchValues.writeString("\n"); for(int k=0;k<numParas[i];k++) paraFiles[k].writeString("\n"); } clsResults.closeFile(); trainResults.closeFile(); for(int k=0;k<numParas[i];k++) paraFiles[k].closeFile(); } else{ System.out.println("Classifier "+cls+" has no results directory: "+basePath+cls); System.out.println("Exit "); System.exit(0); } } } /** Stage 2: Output: Classifier Summary: call to method averageOverFolds() Creates average and standard deviation over all folds based on the files created at stage 1 with the addition of the mean difference per fold. All put in a single directory. * **/ public static void averageOverFolds(){ String name=classifiers[0]; for(int i=1;i<classifiers.length;i++) name+=classifiers[i]; String filePath=basePath+name+"/"; if(classifiers.length==1) filePath+="SummaryStats/"; File nf=new File(filePath); if(!nf.isDirectory()) nf.mkdirs(); String[] allStats={"MeanTestAcc","MeanTrainCVAcc","MeanTestNLL","MeanTestBAcc","MeanTestAUROC","MeanTestF1","MeanTimings"}; String[] testStats={"TestAcc","TrainCVAcc","TestNLL","TestBAcc","TestAUROC","TestF1","Timings"}; OutFile[] means=new OutFile[allStats.length]; for(int i=0;i<means.length;i++) means[i]=new OutFile(filePath+allStats[i]+name+".csv"); OutFile[] stDev=new OutFile[allStats.length]; for(int i=0;i<stDev.length;i++) stDev[i]=new OutFile(filePath+allStats[i]+"StDev"+name+".csv"); OutFile count=new OutFile(filePath+"Counts"+name+".csv"); //Headers for(int i=0;i<classifiers.length;i++){ for(OutFile of:means) of.writeString(","+classifiers[i]); for(OutFile of:stDev) of.writeString(","+classifiers[i]); count.writeString(","+classifiers[i]); } for(OutFile of:means) of.writeString("\n"); for(OutFile of:stDev) of.writeString("\n"); count.writeString("\n"); //Do counts first InFile[] allClassifiers=new InFile[classifiers.length]; for(int i=0;i<allClassifiers.length;i++){ String str=basePath+classifiers[i]+"/SummaryStats/"+classifiers[i]; System.out.println("Loading "+str+"Counts.csv"); String p=str+"Counts.csv"; if(new File(p).exists()) allClassifiers[i]=new InFile(p); else{ allClassifiers[i]=null;//superfluous System.out.println("File "+p+" does not exist"); } } for(String str:problems){ count.writeString(str); for(int i=0;i<allClassifiers.length;i++){ if(allClassifiers[i]!=null){ allClassifiers[i].readString(); count.writeString(","+allClassifiers[i].readInt()); } else{ count.writeString(","); } } count.writeString("\n"); } for(int j=0;j<allStats.length;j++){ //Open files with data for all folds for(int i=0;i<allClassifiers.length;i++){ String str=basePath+classifiers[i]+"/SummaryStats/"+classifiers[i]; String p=str+testStats[j]+".csv"; if(new File(p).exists()) allClassifiers[i]=new InFile(p); else{ allClassifiers[i]=null;//superfluous System.out.println("File "+p+" does not exist"); } } //Find means for(String str:problems){ means[j].writeString(str); stDev[j].writeString(str); String prev="First"; for(int i=0;i<allClassifiers.length;i++){ if(allClassifiers[i]==null){ means[j].writeString(","); stDev[j].writeString(","); } else{//Find mean try{ String r=allClassifiers[i].readLine(); String[] res=r.split(","); double mean=0; double sumSquare=0; for(int m=1;m<res.length;m++){ double d=Double.parseDouble(res[m].trim()); mean+=d; sumSquare+=d*d; } if(res.length>1){ int size=(res.length-1); mean=mean/size; double stdDev=sumSquare/size-mean*mean; stdDev=Math.sqrt(stdDev); means[j].writeString(","+df.format(mean)); stDev[j].writeString(","+df.format(stdDev)); } else{ means[j].writeString(","); stDev[j].writeString(","); } prev=r; }catch(Exception ex){ System.out.println("failed to read line: "+ex+" previous line = "+prev+" file index ="+j+" classifier index ="+i); } } } means[j].writeString("\n"); stDev[j].writeString("\n"); if(j==0) count.writeString("\n"); } for(InFile inf:allClassifiers) if(inf!=null) inf.closeFile(); } } public static void basicSummaryComparisons(){ //Only compares first two DecimalFormat df = new DecimalFormat("###.#####"); if(classifiers.length<=1) return; String name=classifiers[0]; for(int i=1;i<classifiers.length;i++) name+=classifiers[i]; OutFile s=new OutFile(basePath+"summaryTests"+name+".csv"); String[] allStatistics={"TestAcc","TestBAcc","TestNLL","TestAUROC"}; data=new double[problems.size()][classifiers.length]; s.writeLine(name); for(String str:allStatistics){ s.writeLine("**************"+str+"********************"); System.out.println("Loading "+basePath+name+"/"+str+name+".csv"); InFile f=new InFile(basePath+name+"/"+str+name+".csv"); f.readLine(); for(int i=0;i<problems.size();i++){ String ss=f.readLine(); String[] d=ss.split(","); for(int j=0;j<classifiers.length;j++) data[i][j]=-1; for(int j=0;j<d.length-1;j++){ try{ double v=Double.parseDouble(d[j+1]); data[i][j]=v; }catch(Exception e){ // yes yes I know its horrible, but this is text parsing, not rocket science // System.out.println("No entry for classifier "+j); } } // for(int j=0;j<classifiers.length;j++) // System.out.println(" Classifier "+j+" has data "+data[i][j]); } for(int x=0;x<classifiers.length-1;x++){ for (int y=x+1; y < classifiers.length; y++) {//Compare x and y int wins=0,draws=0,losses=0; int sigWins=0,sigLosses=0; double meanDiff=0; double sumSq=0; double count=0; for(int i=0;i<problems.size();i++){ if(data[i][x]!=-1 && data[i][y]!=-1){ if(data[i][x]>data[i][y]) wins++; else if(data[i][x]==data[i][y]) draws++; else losses++; meanDiff+=data[i][x]-data[i][y]; sumSq+=(data[i][x]-data[i][y])*(data[i][x]-data[i][y]); count++; } } // DecimalFormat df = new DecimalFormat("##.#####"); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",WIN/DRAW/LOSE,"+wins+","+draws+","+losses); BinomialDistribution bin=new BinomialDistribution(); bin.setParameters(wins+losses,0.5); double p=bin.getCDF(wins); if(p>0.5) p=1-p; s.writeLine(str+","+classifiers[x]+","+classifiers[y]+",WIN/DRAW/LOSE,"+wins+","+draws+","+losses+", p =,"+df.format(p)); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",COUNT,"+count+",MeanDiff,"+df.format(meanDiff/count)+",StDevDiff,"+df.format((sumSq-(meanDiff*meanDiff)/count))+" p ="+df.format(p)); //3. Find out how many are statistically different within folds //Do paired T-tests from fold files InFile first=new InFile(basePath+classifiers[x]+"/"+classifiers[x]+str+".csv"); InFile second=new InFile(basePath+classifiers[y]+"/"+classifiers[y]+str+".csv"); for(int i=0;i<problems.size();i++){ //Read in both: Must be the same number to proceed String[] probX=first.readLine().split(","); String[] probY=second.readLine().split(","); if(probX.length<=folds || probY.length<=folds) continue; //Skip this problem double[] diffs=new double[folds]; boolean notAllTheSame=false; for(int j=0;j<folds;j++){ diffs[j]=Double.parseDouble(probX[j+1])-Double.parseDouble(probY[j+1]); if(!notAllTheSame && !probX[j+1].equals(probY[j+1])) notAllTheSame=true; } if(notAllTheSame){ OneSampleTests test=new OneSampleTests(); String res=test.performTests(diffs); System.out.println("Results = "+res); String[] results=res.split(","); double tTestPValue=Double.parseDouble(results[2]); if(tTestPValue>=0.95) sigWins++; else if(tTestPValue<=0.05) sigLosses++; } else System.out.println("**************ALL THE SAME problem = "+probX[0]+" *************"); } s.writeLine(str+","+classifiers[x]+","+classifiers[y]+",SIGWIN/SIGLOSS,"+sigWins+","+sigLosses); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",SIGWIN/SIGLOSS,"+sigWins+","+sigLosses); //2. Overall mean difference s.writeLine(str+","+classifiers[x]+","+classifiers[y]+",COUNT,"+count+",MeanDiff,"+df.format(meanDiff/count)+",StDevDiff,"+df.format((sumSq-(meanDiff*meanDiff)/count))); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",COUNT,"+count+",MeanDiff,"+df.format(meanDiff/count)+",StDevDiff,"+df.format((sumSq-(meanDiff*meanDiff)/count))); } } //Do pairwise tests over all common datasets. //1. First need to condense to remove any with one missing ArrayList<double[]> res=new ArrayList<>(); for(int i=0;i<data.length;i++){ int j=0; while(j<data[i].length && data[i][j]!=-1) j++; if(j==data[i].length) res.add(data[i]); } System.out.println("REDUCED DATA SIZE = "+res.size()); double[][] d2=new double[res.size()][]; for(int i=0;i<res.size();i++) d2[i]=res.get(i); //2. Do pairwise tests StringBuilder resultsString= MultipleEstimatorsPairwiseTest.runSignRankTest(d2,classifiers); s.writeString(resultsString.toString()); System.out.println(resultsString); } s.closeFile(); } public static void collate(String[] args){ //STAGE 1: Read from arguments, find problems readData(args); System.out.println(" number of classifiers ="+numClassifiers); //STAGE 2: Collate the individual fold files into one System.out.println("Collating folds ...."); collateFolds(); System.out.println("Collate folds finished. \n Averaging over folds...."); //STAGE 3: Summarise over folds averageOverFolds(); System.out.println("averaging folds finished.\n Basic stats comparison ...."); //STAGE 4: Do statical comparisons basicSummaryComparisons(); } /** * *First argument: String path to results directories * Second argument: path to directory with problem allStats to look for * Third argument: number of folds * Next x arguments: x Classifiers to collate * Next x arguments: number of numParas stored for each classifier **/ public static void singleClassifierFullStats(String[] args) throws Exception{ if(args.length>1) collate(args); else{ String[] classifiers={"TSF"}; for(String classifier:classifiers){ String parameters="0"; String[] str={"E:\\Results\\Java\\", "Z:\\Data\\TSCProblems2018\\","30","false",classifier,parameters}; collate(str); } } } /** * Usage of MultipleEstimatorEvaluation. See the class for more info * @throws Exception */ public static void multipleClassifierFullStats(String[] args) throws Exception{ if(args.length>0){ //TO DO } else{ //Example manual setting MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation("E://Results//UCI//Analysis//", "Tuned", 5); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); m.setDatasets(Arrays.copyOfRange(experiments.data.DatasetLists.UCIContinuousWithoutBigFour, 0, 117)); m.readInEstimators(new String[] {"MLP2","SVMRBF","SVMP","RandF","RotF","XGBoost"}, "E://Results/UCI/Tuned"); m.runComparison(); } } public static String bakeOffPath="Z:/ReferenceResults/CollatedResults/Bakeoff2015/byClassifier/"; public static String hiveCotePath="Z:/ReferenceResults/CollatedResults/HIVE-COTE2017/"; public static String bakeOffPathBeast="Z:/ReferenceResults/CollatedResults/Bakeoff2015/byClassifier/"; public static String hiveCotePathBeast="Z:/ReferenceResults/CollatedResults/HIVE-COTE2017/"; public static String reduxPathBeast="Z:/ReferenceResults/CollatedResults/BakeoffRedux2019/"; public static String bakeOffPathCluster="/gpfs/home/ajb/ReferenceResults/Bakeoff2015/ByClassifier/"; public static String hiveCotePathCluster="/gpfs/home/ajb/ReferenceResults/HIVE-COTE2017/"; public static String reduxPathCluster="/gpfs/home/ajb/ReferenceResults/BakeoffRedux2019/"; /** * Quick in place collation and comparison to reference results * @param args * @throws Exception * Primary: these results are built from file using predictions or just reading line 3 * para 1: String: location of primary results, including classifier name * para 2: Boolean stored as string: true: calculate acc from preds and check. False: just read from line 3. * Para 3: Integer stored as string: number of folds to look for * OPTIONAL: these results are read directly, can have as many as desired * Input format ProblemSource,ClassifierName. Problem source must be Bakeoff, HIVE-COTE, or Redux * para 3: comparison classifier TYPE,NAME 1 * para 4: comparison classifier full path 2 * .. * Notes: * 1. Only uses accuracy, does not require classes map 0... numClasses-1 or probabilities. * 2. Assumes file structure is arg[0]/Predictions/ProblemName/testFold0.csv * 3. Assumes every directory in Predictions is a results folder * 4. For the fold averages, it ignores any problem without a full set of results, will print the results as empty * 5. Prints results to arg[0]/QuickResults/TrainTest<classifierName>.csv, */ public static void singleClassifiervsReferenceResults(String[] args) throws Exception{ if(args.length<4){ String input=""; for(String s:args) input+=s+" "; throw new Exception("Wrong input args =:"+input); } for(int i=0;i<args.length;i++){ System.out.println("args["+i+"] = "+args[i]); } String fullPath=args[0]; String[] temp=args[0].split("/"); String classifierName=temp[temp.length-1]; System.out.println(" Primary Classifier = "+classifierName); boolean calcAcc=Boolean.parseBoolean(args[1]); folds=Integer.parseInt(args[2]); oldFormat=Boolean.parseBoolean(args[3]); File f= new File(fullPath+"/QuickResults"); f.mkdirs(); //Get primary results ArrayList<String> problems =new ArrayList<>(); ArrayList<String> missing =new ArrayList<>(); f=new File(fullPath+"/Predictions"); System.out.println(fullPath+"/Predictions"); File[] fileList=f.listFiles(); System.out.println("File names in "+fullPath+"/Predictions : has "+f.length()+" files "); for(File t:fileList){ System.out.println("\t"+t.getName()); if(t.isDirectory()){ // Note 3: assume all dirs are problems problems.add(t.getName()); } } Collections.sort(problems); double[] trainTest= new double[problems.size()]; double[] trainTestTime= new double[problems.size()]; double[] means= new double[problems.size()]; double[][] allFolds= new double[problems.size()][folds]; double[] meansTime= new double[problems.size()]; double[][] allFoldsTime= new double[problems.size()][folds]; OutFile trTsFile=new OutFile(fullPath+"/QuickResults/TrainTest"+classifierName+".csv"); OutFile meansFile=new OutFile(fullPath+"/QuickResults/Average"+folds+classifierName+".csv"); OutFile allFoldsFile=new OutFile(fullPath+"/QuickResults/AllFolds"+classifierName+".csv"); OutFile trTsTimesFile=new OutFile(fullPath+"/QuickResults/TimesTrainTest"+classifierName+".csv"); OutFile meanTimesFile=new OutFile(fullPath+"/QuickResults/TimeAverage"+folds+classifierName+".csv"); OutFile allFoldsTimesFile=new OutFile(fullPath+"/QuickResults/TimeAllFolds"+classifierName+".csv"); OutFile trainFileCount=new OutFile(fullPath+"/QuickResults/trainFileCount"+classifierName+".csv"); InFile inf=null; boolean readTimes=true; for(int i=0;i<trainTest.length;i++){ System.out.println("Processing "+problems.get(i)); int trainCount=0; boolean cont=true; for(int j=0;j<folds && cont;j++){ try{ inf=new InFile(fullPath+"/Predictions/"+problems.get(i)+"/testFold"+j+".csv"); inf.readLine();//Ignore first two String secondLine=inf.readLine(); String[] split; String[] secondSplit=secondLine.split(","); String thirdLine=inf.readLine(); String[] thirdSplit=thirdLine.split(","); //Under the old format, the time is the second argument of line 2 //Under the new format, the time is double time=0; try{ if(oldFormat){ time=Double.parseDouble(secondSplit[1]); }else{ time=Double.parseDouble(thirdSplit[1]); } }catch(Exception e){ System.out.println("Error reading in times for base classifier, oldFormat="+oldFormat+" may be wrong"); System.out.println("Continue without timing"); readTimes=false; } double acc=Double.parseDouble(thirdSplit[0]);// if(calcAcc){ String line=inf.readLine(); double a=0; int count=0; while(line!=null){ split=line.split(","); count++; if(split[0].equals(split[1])) a++; line=inf.readLine(); } if(count>0) a/=count; if((a-acc)>0.000000001){ System.out.println("Mismatch in acc read from file and acc calculated from file"); System.out.println("THIS NEEDS INVESTIGATING. Abandoning the whole problem compilation "); System.exit(1); } } if(j==0){ trainTest[i]=acc; trainTestTime[i]=time; } allFolds[i][j]=acc; allFoldsTime[i][j]=time; File tr_f=new File(fullPath+"/Predictions/"+problems.get(i)+"/trainFold"+j+".csv"); if(tr_f.exists()){//Train fold present trainCount++; } }catch(Exception e){ missing.add(problems.get(i)); System.out.println("Some error processing "+fullPath+"/Predictions/"+problems.get(i)+"/testFold"+j+".csv"); System.out.println(" Abandoning entire problem "+problems.get(i)); cont=false; } finally{ if(inf!=null) inf.closeFile(); } } if(cont){//Should have all the data trTsFile.writeString(problems.get(i)); meansFile.writeString(problems.get(i)); allFoldsFile.writeString(problems.get(i)); trainFileCount.writeLine(problems.get(i)+","+trainCount); trTsFile.writeString(","+trainTest[i]); means[i]=0; for(int j=0;j<allFolds[i].length;j++){ allFoldsFile.writeString(","+allFolds[i][j]); means[i]+=allFolds[i][j]; } means[i]/=folds; meansFile.writeString(","+means[i]); trTsFile.writeString("\n"); meansFile.writeString("\n"); allFoldsFile.writeString("\n"); if(readTimes){ trTsTimesFile.writeString(problems.get(i)); meanTimesFile.writeString(problems.get(i)); allFoldsTimesFile.writeString(problems.get(i)); trTsTimesFile.writeString(","+trainTestTime[i]); meansTime[i]=0; for(int j=0;j<allFolds[i].length;j++){ allFoldsTimesFile.writeString(","+allFoldsTime[i][j]); meansTime[i]+=allFoldsTime[i][j]; } meansTime[i]/=folds; meanTimesFile.writeString(","+meansTime[i]); trTsTimesFile.writeString("\n"); meanTimesFile.writeString("\n"); allFoldsTimesFile.writeString("\n"); } }else{//Write trainTest if present if(trainTest[i]>0){ //Captured fold 0, lets use it trTsFile.writeLine(problems.get(i)+","+trainTest[i]); if(readTimes) trTsTimesFile.writeString(problems.get(i)); } } } if(args.length>4){ //Going to compare to some others String[] rc=new String[args.length-4]; for(int i=4;i<args.length;i++) rc[i-4]=args[i]; System.out.println("Comparing "+classifierName+" to "); String[][] classifiers=new String[rc.length][]; for(int i=0;i<rc.length;i++) classifiers[i]=rc[i].split(","); ArrayList<HashMap<String,Double>> trainTestResults=new ArrayList<>(); ArrayList<HashMap<String,Double>> averageResults=new ArrayList<>(); for(int i=0;i<classifiers.length;i++){ classifiers[i][0]=classifiers[i][0].toUpperCase(); System.out.println(classifiers[i][0]+"_"+classifiers[i][1]); HashMap<String,Double> trTest=new HashMap<>(); HashMap<String,Double> averages=new HashMap<>(); //Look for train results String path=""; switch(classifiers[i][0]){ case "BAKEOFF": path=bakeOffPath; break; case "HIVE-COTE": path=hiveCotePath; break; default: System.out.println("UNKNOWN LOCATION INDICATOR "+classifiers[i][0]); throw new Exception("UNKNOWN LOCATION INDICATOR "+classifiers[i][0]); } f=new File(path+"TrainTest/TrainTest"+classifiers[i][1]+".csv"); if(f.exists()){ inf=new InFile(path+"TrainTest/TrainTest"+classifiers[i][1]+".csv"); String line=inf.readLine(); while(line!=null){ String[] split=line.split(","); String prob=split[0]; if(prob.equals("CinCECGtorso"))//Hackhackityhack: legacy problem prob="CinCECGTorso"; if(prob.equals("StarlightCurves"))//Hackhackityhack: legacy problem prob="StarLightCurves"; if(prob.equals("NonInvasiveFatalECGThorax1"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax1"; if(prob.equals("NonInvasiveFatalECGThorax2"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax2"; Double d = Double.parseDouble(split[1]); trTest.put(prob,d); line=inf.readLine(); } } f=new File(path+"Average30/Average30"+classifiers[i][1]+".csv"); if(f.exists()){ inf=new InFile(path+"Average30/Average30"+classifiers[i][1]+".csv"); // inf.readLine(); String line=inf.readLine(); while(line!=null){ String[] split=line.split(","); String prob=split[0]; if(prob.equals("CinCECGtorso"))//Hackhackityhack: legacy problem prob="CinCECGTorso"; if(prob.equals("StarlightCurves"))//Hackhackityhack: legacy problem prob="StarLightCurves"; if(prob.equals("NonInvasiveFatalECGThorax1"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax1"; if(prob.equals("NonInvasiveFatalECGThorax2"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax2"; Double d = Double.parseDouble(split[1]); averages.put(prob,d); line=inf.readLine(); } } trainTestResults.add(trTest); averageResults.add(averages); } trTsFile=new OutFile(fullPath+"/QuickResults/CompareTrainTest"+classifierName+".csv"); OutFile trTsFileComplete=new OutFile(fullPath+"/QuickResults/CompareTrainTestCompleteOnly"+classifierName+".csv"); meansFile=new OutFile(fullPath+"/QuickResults/CompareAverage"+folds+"_"+classifierName+".csv"); OutFile meansFileComplete=new OutFile(fullPath+"/QuickResults/CompareAverageCompleteOnly"+classifierName+".csv"); trTsFile.writeString("Problem,"+classifierName); meansFile.writeString("Problem,"+classifierName); meansFileComplete.writeString("Problem,"+classifierName); trTsFileComplete.writeString("Problem,"+classifierName); for(int i=0;i<classifiers.length;i++){ trTsFile.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); meansFile.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); meansFileComplete.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); trTsFileComplete.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); } trTsFile.writeString("\n"); meansFile.writeString("\n"); meansFileComplete.writeString("\n"); trTsFileComplete.writeString("\n"); for(int i=0;i<problems.size();i++){ String name=problems.get(i); boolean present=true; //Train test if(trainTest[i]>0){ //Captured fold 0, lets use it String line=name+","+trainTest[i]; trTsFile.writeString(name+","+trainTest[i]); for(int j=0;j<classifiers.length;j++){ HashMap<String,Double> trTest=trainTestResults.get(j); if(trTest.containsKey(name)){ Double x=trTest.get(name); trTsFile.writeString(","+x); line+=","+x; } else{ trTsFile.writeString(","); present=false; } } trTsFile.writeString("\n"); if(present) trTsFileComplete.writeLine(line); } //Averages if(!missing.contains(name)){ String line=name+","+means[i]; meansFile.writeString(name+","+means[i]); for(int j=0;j<classifiers.length;j++){ HashMap<String,Double> av=averageResults.get(j); if(av.containsKey(name)){ Double x=av.get(name); meansFile.writeString(","+x); line+=","+x; } else{ meansFile.writeString(","); present=false; } } meansFile.writeString("\n"); if(present) meansFileComplete.writeLine(line); } } } } public static void quickStats(String primary, boolean calcAcc, int folds, boolean oldForm,String...others) throws Exception{ String[] input; if(others==null) input=new String[4]; else input=new String[4+others.length]; input[0]=primary; input[1]=calcAcc+""; input[2]=folds+""; input[3]=oldForm+""; if(others!=null) for(int i=0;i<others.length;i++) input[i+4]=others[i]; singleClassifiervsReferenceResults(input); } public static void main(String[] args) throws Exception { if (args.length == 0) {//Local run bakeOffPath=bakeOffPathBeast; hiveCotePath=hiveCotePathBeast; quickStats("C:/Temp/CNN/CNN10hours",false,1,false); //,"HIVE-COTE,ST","HIVE-COTE,HIVE-COTE"); // quickStats("Z:/Results/BOSS variants/Univariate/RBOSS250",false,30,false,"HIVE-COTE,BOSS"); //TunedTSF // quickStats("E:/Results/UCR Debug/Java/TunedTSF",false,30,"Bakeoff,ST","Bakeoff,TSF","Bakeoff,BOSS","Bakeoff,DTWCV"); //ProximityForest // quickStats("E:/Results/UCR Debug/Java/ProximityForest",false,30,"HIVE-COTE,EE","HIVE-COTE,BOSS","HIVE-COTE,TSF","HIVE-COTE,RISE","HIVE-COTE,ST","HIVE-COTE,HIVE-COTE"); // quickStats("Z:/Results/Post Bakeoff Results/resnet/",false,30,"HIVE-COTE,EE","HIVE-COTE,BOSS","HIVE-COTE,TSF","HIVE-COTE,RISE","HIVE-COTE,ST","HIVE-COTE,HIVE-COTE"); // quickStats("Z:/Results/Post Bakeoff Results/WEASEL/",false,30,"HIVE-COTE,EE","HIVE-COTE,BOSS","HIVE-COTE,TSF","HIVE-COTE,RISE","HIVE-COTE,ST","HIVE-COTE,HIVE-COTE"); //REDUX: EE // quickStats("Z:/Results/Bakeoff Redux/Java/EE",false,30,"HIVE-COTE,EE","Bakeoff,EE"); //REDUX: TSF // quickStats("Z:/Results/Bakeoff Redux/Java/TSF",false,30,"HIVE-COTE,TSF","Bakeoff,TSF"); //REDUX: BOSS // quickStats("Z:/Results/Bakeoff Redux/Java/BOSS",false,30,"HIVE-COTE,BOSS","Bakeoff,BOSS"); //REDUX: RISE // quickStats("E:/Results/UCR Debug/Python/TSF",false,30,"HIVE-COTE,TSF","HIVE-COTE,EE","HIVE-COTE,BOSS","HIVE-COTE,RISE","HIVE-COTE,ST","HIVE-COTE,HIVE-COTE"); // quickStats("Z:/Results/Bakeoff Redux/Java/RISE",false,30,"HIVE-COTE,RISE"); //REDUX: ST // quickStats("Z:/Results/Bakeoff Redux/Java/ST",false,30,"HIVE-COTE,ST","Bakeoff,ST"); ///REDUX: HIVE-COTE // quickStats("Z:/Results/Bakeoff Redux/Java/HIVE-COTE",false,30,"HIVE-COTE,HIVE-COTE"); } else{ //Cluster run bakeOffPath=bakeOffPathCluster; hiveCotePath=hiveCotePathCluster; System.out.println("Cluster Job Args:"); for(String s:args) System.out.println(s); singleClassifiervsReferenceResults(args); } System.exit(0); boolean singleClassifierStats=true; if(singleClassifierStats) singleClassifierFullStats(args); else multipleClassifierFullStats(args); } }
52,847
46.397309
214
java
tsml-java
tsml-java-master/src/main/java/experiments/ExperimentalArguments.java
package experiments; import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.beust.jcommander.Parameters; import tsml.classifiers.distance_based.utils.strings.StrUtils; import weka.classifiers.Classifier; import weka.clusterers.Clusterer; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.logging.Level; @Parameters(separators = "=") public class ExperimentalArguments implements Runnable { //REQUIRED PARAMETERS @Parameter(names = {"-dp", "--dataPath"}, required = true, order = 0, description = "(String) The directory that contains the dataset to be evaluated on, in the form " + "[--dataPath]/[--datasetName]/[--datasetname].arff (the actual arff file(s) may be in different forms, see ClassifierExperiments.sampleDataset(...).") public String dataReadLocation = null; @Parameter(names = {"-rp", "--resultsPath"}, required = true, order = 1, description = "(String) The parent directory to write the results of the evaluation to, in the form " + "[--resultsPath]/[--classifierName]/Predictions/[--datasetName]/... This defaults to current working directory + 'results/' ") public String resultsWriteLocation = "results/"; @Parameter(names = {"-cn", "--classifierName"}, required = true, order = 2, description = "(String) The name of the classifier to evaluate. A case matching this value should exist within the ClassifierLists") public String estimatorName = null; @Parameter(names = {"-dn", "--datasetName"}, required = true, order = 3, description = "(String) The name of the dataset to be evaluated on, which resides within the dataPath in the form " + "[--dataPath]/[--datasetName]/[--datasetname].arff (the actual arff file(s) may be of different forms, see ClassifierExperiments.sampleDataset(...).") public String datasetName = null; @Parameter(names = {"-f", "--fold"}, required = true, order = 4, description = "(int) The fold index for dataset resampling, also used as the rng seed. *Indexed from 1* to conform with cluster array " + "job indices. The fold id pass will be automatically decremented to be zero-indexed internally.") public int foldId = 0; //OPTIONAL PARAMETERS @Parameter(names = {"--help"}, hidden = true) //hidden from usage() printout private boolean help = false; //todo separate verbosity into it own thing @Parameter(names = {"-d", "--debug"}, arity = 1, description = "(boolean) Increases verbosity and turns on the printing of debug statements") public boolean debug = false; @Parameter(names = {"-s", "--seed"}, arity = 1, description = "(int) seed for the classifier. If not set the foldId is used as the seed unless --useSeed is set to false.") public int seed = Integer.MIN_VALUE; @Parameter(names = {"-us", "--useSeed"}, arity = 1, description = "(boolean) Whether to use the foldId or seed (if set) for the classifier, defaults to true. If false " + "prevents the classifiers setSeed() from being called.") public boolean useSeed = true; @Parameter(names = {"-gtf", "--genTrainFiles"}, arity = 1, description = "(boolean) Turns on the production of trainFold[fold].csv files, the results of which are calculate either via a cross validation of " + "the train data, or if a classifier implements the TrainAccuracyEstimate interface, the classifier will write its own estimate via its own means of evaluation.") public boolean generateErrorEstimateOnTrainSet = false; @Parameter(names = {"-cp", "--checkpointing"}, arity = 1, description = "(boolean or String) Turns on the usage of checkpointing, if the classifier implements the SaveParameterInfo and/or CheckpointClassifier interfaces. " + "Default is false/0, for no checkpointing. if -cp = true, checkpointing is turned on and checkpointing frequency is determined by the classifier. if -cp is a timing of the form [int][char], e.g. 1h, " + "checkpoints shall be made at that frequency (as close as possible according to the atomic unit of learning for the classifier). Possible units, in order: n (nanoseconds), u, m, s, M, h, d (days)." + "Lastly, if -cp is of the the [int] only, it is assumed to be a timing in hours." + "The classifier by default will write its checkpointing files to workspace path parallel to the --resultsPath, unless another path is optionally supplied to --supportingFilePath.") private String checkpointingStr = null; public boolean checkpointing = false; public long checkpointInterval = 0; @Parameter(names = {"-vis", "--visualisation"}, arity = 1, description = "(boolean) Turns on the production of visualisation files, if the classifier implements the Visualisable interface. " + "Figures are created using Python. Exact requirements are to be determined, but a a Python 3.7 installation is the current recommendation with the numpy and matplotlib packages installed on the global environment. " + "The classifier by default will write its visualisation files to workspace path parallel to the --resultsPath, unless another path is optionally supplied to --supportingFilePath.") public boolean visualise = false; @Parameter(names = {"-int", "--interpretability"}, arity = 1, description = "(boolean) Turns on the production of interpretability files, if the classifier implements the Interpretable interface. " + "The classifier by default will write its interpretability files to workspace path parallel to the --resultsPath, unless another path is optionally supplied to --supportingFilePath.") public boolean interpret = false; @Parameter(names = {"-sp", "--supportingFilePath"}, description = "(String) Specifies the directory to write any files that may be produced by the classifier if it is a FileProducer. This includes but may not be " + "limited to: parameter evaluations, checkpoints, and logs. By default, these files are written to a generated subdirectory in the same location that the train and testFold[fold] files are written, relative" + "the --resultsPath. If a path is supplied via this parameter however, the files shall be written to that precisely that directory, as opposed to e.g. [-sp]/[--classifierName]/Predictions... " + "THIS IS A PLACEHOLDER PARAMETER. TO BE FULLY IMPLEMENTED WHEN INTERFACES AND SETCLASSIFIER ARE UPDATED.") public String supportingFilePath = null; @Parameter(names = {"-pid", "--parameterSplitIndex"}, description = "(Integer) If supplied and the classifier implements the ParameterSplittable interface, this execution of experiments will be set up to evaluate " + "the parameter set -pid within the parameter space used by the classifier (whether that be a supplied space or default). How the integer -pid maps onto the parameter space is up to the classifier.") public Integer singleParameterID = null; @Parameter(names = {"-tb", "--timingBenchmark"}, arity = 1, description = "(boolean) Turns on the computation of a standard operation to act as a simple benchmark for the speed of computation on this hardware, which may " + "optionally be used to normalise build/test/predictions times across hardware in later analysis. Expected time on Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz is ~0.8 seconds. For experiments that are likely to be very " + "short, it is recommended to leave this off, as it will proportionally increase the total time to perform all your experiments by a great deal, and for short evaluation time the proportional affect of " + "any processing noise may make any benchmark normalisation process unreliable anyway.") public boolean performTimingBenchmark = false; //todo expose the filetype enum in some way, currently just using an unconnected if statement, if e.g the order of the enum values changes in the classifierresults, which we have no knowledge //of here, the ifs will call the wrong things. decide on the design of this @Parameter(names = {"-ff", "--fileFormat"}, description = "(int) Specifies the format for the classifier results file to be written in, accepted values = { 0, 1, 2 }, default = 0. 0 writes the first 3 lines of meta information " + "as well as the full prediction information, and requires the most disk space. 1 writes the first three lines and a list of the performance metrics calculated from the prediction info. 2 writes the first three lines only, and " + "requires the least space. Use options other than 0 if generating too many files with too much prediction information for the disk space available, however be aware that there is of course a loss of information.") public int classifierResultsFileFormat = 0; @Parameter(names = {"-nt", "--numberOfThreads"}, arity = 1, description = "(int) Number of threads to be set for MultiThreadable classifiers, defaults to 1. If set to" + " < 1, Runtime.getRuntime().availableProcessors()-1 threads are used.") public int numberOfThreads = 1; @Parameter(names = {"-rmv", "--replaceMissingValues"}, arity = 1, description = "(boolean) Whether to replace any missing values in the train and test set with 0. " + "(clustering only currently)") public boolean replaceMissingValues = false; @Parameter(names = {"-co", "--classifierOptions"}, arity = 1, description = "(String) Classifier specific comma delimited options string to be split and passed to a" + " classifiers setOptions() method. Each option should have the parameter name/tag, a comma and then the parameter value for each options i.e. T,500,I,0.5") private String classifierOptionsStr = null; public String[] classifierOptions = null; @Parameter(names = {"-ctr", "--contractTrain"}, description = "(String) Defines a time limit for the training of the classifier if it implements the TrainTimeContractClassifier interface. Defaults to " + "no contract time. If an integral value is given, it is assumed to be in HOURS. Otherwise, a string of the form [int][char] can be supplied, with the [char] defining the time unit. " + "e.g.1 10s = 10 seconds, e.g.2 1h = 60M = 3600s. Possible units, in order: n (nanoseconds), u, m, s, M, h, d (days).") private String contractTrainTimeString = null; public long contractTrainTimeNanos = 0; @Parameter(names = {"-cte", "--contractTest"}, description = "(String) Defines a time limit for the testing of the classifier if it implements the TestTimeContractable interface. Defaults to " + "no contract time. If an integral value is given, it is assumed to be in HOURS. Otherwise, a string of the form [int][char] can be supplied, with the [char] defining the time unit. " + "e.g.1 10s = 10 seconds, e.g.2 1h = 60M = 3600s. Possible units, in order: n (nanoseconds), u, m, s, M, h, d (days).") private String contractTestTimeString = null; public long contractTestTimeNanos = 0; @Parameter(names = {"-sc", "--serialiseClassifier"}, arity = 1, description = "(boolean) If true, and the classifier is serialisable, the classifier will be serialised to the --supportingFilesPath after training, but before testing.") public boolean serialiseTrainedClassifier = false; @Parameter(names = {"--force"}, arity = 1, description = "(boolean) If true, the evaluation will occur even if what would be the resulting files already exists. The old files will be overwritten with the new evaluation results.") public boolean forceEvaluation = false; @Parameter(names = {"--forceTest"}, arity = 1, description = "(boolean) If true, the evaluation will occur even if what would be the resulting test file already exists. The old test file will be overwritten with the new evaluation results.") public boolean forceEvaluationTestFold = false; @Parameter(names = {"--forceTrain"}, arity = 1, description = "(boolean) If true, the evaluation will occur even if what would be the resulting train file already exists. The old train file will be overwritten with the new evaluation results.") public boolean forceEvaluationTrainFold = false; @Parameter(names = {"-tem", "--trainEstimateMethod"}, arity = 1, description = "(String) Defines the method and parameters of the evaluation method used to estimate error on the train set, if --genTrainFiles == true. Current implementation is a hack to get the option in for" + " experiment running in the short term. Give one of 'cv' and 'hov' for cross validation and hold-out validation set respectively, and a number of folds (e.g. cv_10) or train set proportion (e.g. hov_0.7) respectively. Default is a 10 fold cv, i.e. cv_10.") public String trainEstimateMethod = "cv_10"; @Parameter(names = {"-norm", "--normalise"}, arity = 1, description = "(boolean) If true, will z-normalise all series prior to training and testing (clustering only currently).") public boolean normalise = false; @Parameter(names = {"--conTrain"}, arity = 2, description = "todo") private List<String> trainContracts = new ArrayList<>(); @Parameter(names = {"--contractInName"}, arity = 1, description = "todo") private boolean appendTrainContractToClassifierName = true; @Parameter(names = {"-l", "--logLevel"}, description = "log level") private String logLevelStr = null; private Level logLevel = null; public boolean hasTrainContracts() { return trainContracts.size() > 0; } // calculated/set during experiment setup, indirectly using the parameters passed public String trainFoldFileName = null; public String testFoldFileName = null; // a function that returns a classifier instance, mainly for generating multiple instances for different // threaded exps. If not supplied (default), the classifier is instantiated via setClassifier(classifierName) public Supplier<Classifier> classifierGenerator = null; public Classifier classifier = null; public Clusterer clusterer = null; public int numClassValues = 2; public ExperimentalArguments() { } public ExperimentalArguments(String[] args) throws Exception { parseArguments(args); } @Override //Runnable public void run() { try { ClassifierExperiments.setupAndRunExperiment(this); } catch (Exception ex) { ex.printStackTrace(); } } /** * This is a bit of a bolt-on method for now. It assumes that the object on which * this method is being called has all the other parameters not passed to it set already * (e.g data location, results location) and these will be replicated across all experiments. * The current value of this.classifierName, this.datasetName, and this.foldId are ignored within * this method. * * @param minFold inclusive * @param maxFold exclusive, i.e will make folds [ for (int f = minFold; f < maxFold; ++f) ] * @return a list of unique experimental arguments, covering all combinations of classifier, datasets, and folds passed, with the same meta info as 'this' currently stores */ public List<ExperimentalArguments> generateExperiments(String[] classifierNames, List<Supplier<Classifier>> classifierGenerators, String[] datasetNames, int minFold, int maxFold) { if (minFold > maxFold) { int t = minFold; minFold = maxFold; maxFold = t; } ArrayList<ExperimentalArguments> exps = new ArrayList<>(classifierNames.length * datasetNames.length * (maxFold - minFold)); for (int i = 0; i < classifierNames.length; i++) { String classifier = classifierNames[i]; for (String dataset : datasetNames) { for (int fold = minFold; fold < maxFold; fold++) { ExperimentalArguments exp = new ExperimentalArguments(); exp.estimatorName = classifier; exp.datasetName = dataset; exp.foldId = fold; // enforce that if a classifier instance has been provided, it's nulled to avoid // the same instance being accessed across multiple threads exp.classifier = null; if (classifierGenerators != null && classifierGenerators.get(i) != null) exp.classifierGenerator = classifierGenerators.get(i); else exp.classifierGenerator = null; // copying fields via reflection now to avoid cases of forgetting to account for newly added paras for (Field field : ExperimentalArguments.class.getFields()) { // these are the ones being set individually per exp, skip the copying over if (field.getName().equals("estimatorName") || field.getName().equals("datasetName") || field.getName().equals("foldId") || field.getName().equals("classifier") || field.getName().equals("classifierGenerator") ) continue; try { field.set(exp, field.get(this)); } catch (IllegalAccessException ex) { System.out.println("Fatal, should-be-unreachable exception thrown while copying across exp args"); System.out.println(ex); ex.printStackTrace(); System.exit(0); } } exps.add(exp); } } } return exps; } private void parseArguments(String[] args) throws Exception { JCommander.Builder b = JCommander.newBuilder(); b.addObject(this); JCommander jc = b.build(); jc.setProgramName("ClassifierExperiments.java"); //todo maybe add copyright etcetc try { jc.parse(args); } catch (Exception e) { if (!help) { //we actually errored, instead of the program simply being called with the --help flag System.err.println("Parsing of arguments failed, parameter information follows after the error. Parameters that require values should have the flag and value separated by '='."); System.err.println("For example: java -jar TimeSeriesClassification.jar -dp=data/path/ -rp=results/path/ -cn=someClassifier -dn=someDataset -f=0"); System.err.println("Parameters prefixed by a * are REQUIRED. These are the first five parameters, which are needed to run a basic experiment."); System.err.println("Error: \n\t" + e + "\n\n"); } jc.usage(); // Thread.sleep(1000); //usage can take a second to print for some reason?... no idea what it's actually doing // System.exit(1); } foldId -= 1; //go from one-indexed to zero-indexed ClassifierExperiments.debug = this.debug; resultsWriteLocation = StrUtils.asDirPath(resultsWriteLocation); dataReadLocation = StrUtils.asDirPath(dataReadLocation); if (checkpointingStr != null) { //some kind of checkpointing is wanted // is it simply "true"? checkpointing = Boolean.parseBoolean(checkpointingStr.toLowerCase()); if (!checkpointing) { //it's not. must be a timing string checkpointing = true; checkpointInterval = parseTiming(checkpointingStr); } } if (classifierOptionsStr != null) classifierOptions = classifierOptionsStr.split(","); //populating the contract times if present if (contractTrainTimeString != null) contractTrainTimeNanos = parseTiming(contractTrainTimeString); if (contractTestTimeString != null) contractTestTimeNanos = parseTiming(contractTestTimeString); if (contractTrainTimeNanos > 0) { trainContracts.add(String.valueOf(contractTrainTimeNanos)); trainContracts.add(TimeUnit.NANOSECONDS.toString()); } // check the contracts are in ascending order // todo sort them for (int i = 1; i < trainContracts.size(); i += 2) { trainContracts.set(i, trainContracts.get(i).toUpperCase()); } long prev = -1; for (int i = 0; i < trainContracts.size(); i += 2) { long nanos = TimeUnit.NANOSECONDS.convert(Long.parseLong(trainContracts.get(i)), TimeUnit.valueOf(trainContracts.get(i + 1))); if (prev > nanos) { throw new IllegalArgumentException("contracts not in asc order"); } prev = nanos; } if (trainContracts.size() % 2 != 0) { throw new IllegalStateException("illegal number of args for time"); } if (logLevelStr != null) { logLevel = Level.parse(logLevelStr); } } /** * Helper func to parse a timing string of the form [int][char], e.g. 10s = 10 seconds = 10,000,000,000 nanosecs. * 1h = 60M = 3600s = 3600,000,000,000n * <p> * todo Alternatively, string can be of form [int][TimeUnit.toString()], e.g. 10SECONDS * <p> * If just a number is given without a time unit character, HOURS is assumed to be the time unit * <p> * Possible time unit chars: * n - nanoseconds * u - microseconds * m - milliseconds * s - seconds * M - minutes * h - hours * d - days * w - weeks * <p> * todo learn/use java built in timing things if really wanted, e.g. TemporalAmount * * @return long number of nanoseconds the input string represents */ private long parseTiming(String timeStr) throws IllegalArgumentException { try { // check if it's just a number, in which case return it under assumption that it's in hours int val = Integer.parseInt(timeStr); return TimeUnit.NANOSECONDS.convert(val, TimeUnit.HOURS); } catch (Exception e) { //pass } // convert it char unit = timeStr.charAt(timeStr.length() - 1); int amount = Integer.parseInt(timeStr.substring(0, timeStr.length() - 1)); long nanoAmount = 0; switch (unit) { case 'n': nanoAmount = amount; break; case 'u': nanoAmount = TimeUnit.NANOSECONDS.convert(amount, TimeUnit.MICROSECONDS); break; case 'm': nanoAmount = TimeUnit.NANOSECONDS.convert(amount, TimeUnit.MILLISECONDS); break; case 's': nanoAmount = TimeUnit.NANOSECONDS.convert(amount, TimeUnit.SECONDS); break; case 'M': nanoAmount = TimeUnit.NANOSECONDS.convert(amount, TimeUnit.MINUTES); break; case 'h': nanoAmount = TimeUnit.NANOSECONDS.convert(amount, TimeUnit.HOURS); break; case 'd': nanoAmount = TimeUnit.NANOSECONDS.convert(amount, TimeUnit.DAYS); break; default: throw new IllegalArgumentException("Unrecognised time unit string conversion requested, was given " + timeStr); } return nanoAmount; } public String toShortString() { return "[" + estimatorName + "," + datasetName + "," + foldId + "]"; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("EXPERIMENT SETTINGS " + this.toShortString()); // printing fields via reflection now to avoid cases of forgetting to account for newly added paras for (Field field : ExperimentalArguments.class.getFields()) { try { sb.append("\n").append(field.getName()).append(": ").append(field.get(this)); } catch (IllegalAccessException ex) { System.out.println("Fatal, should-be-unreachable exception thrown while printing exp args"); System.out.println(ex); ex.printStackTrace(); System.exit(0); } } return sb.toString(); } }
24,805
57.230047
279
java
tsml-java
tsml-java-master/src/main/java/experiments/MemoryMonitor.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import com.sun.management.GarbageCollectionNotificationInfo; import javax.management.Notification; import javax.management.NotificationEmitter; import javax.management.NotificationListener; import javax.management.openmbean.CompositeData; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.MemoryUsage; import java.util.ArrayList; import java.util.List; import java.util.Map; /** * @author Tony Bagnall * Simple utility program to approximate the memory usage of a program. It works by waiting for notification from * the garbage collection, then recording the maximum used. This is probably not that reliable, so should be averaged over * runs.Could easily be adapted to store the series of memory calls, although would then need to store the time intervals * * only used in simulation experiments, and from Feb 2020 in ClassifierResults and ClassifierExperiments * only records max memory. * * adapted from code here http://www.fasterj.com/articles/gcnotifs.shtml * * MemoryMonitor mem=new MemoryMonitor(); * mem.installMonitor(); * //DO SOME STUFF * long time=mem.getMaxMemoryUsed(); * * by default, I compare this to final memory. */ public class MemoryMonitor { private long maxMemInit=0; private long maxMemCommitted=0; private long maxMemMax=0; private long maxMemUsed=0; public long getMaxMemoryUsed(){return maxMemUsed;} public void installMonitor(){ //get all the GarbageCollectorMXBeans - there's one for each heap generation //so probably two - the old generation and young generation List<GarbageCollectorMXBean> gcbeans = java.lang.management.ManagementFactory.getGarbageCollectorMXBeans(); //Install a notification handler for each bean for (GarbageCollectorMXBean gcbean : gcbeans) { NotificationEmitter emitter = (NotificationEmitter) gcbean; //use an anonymously generated listener for this example // - proper code should really use a named class NotificationListener listener = new NotificationListener() { //keep a count of the total time spent in GCs long totalGcDuration = 0; //implement the notifier callback handler @Override public void handleNotification(Notification notification, Object handback) { //we only handle GARBAGE_COLLECTION_NOTIFICATION notifications here if (notification.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { //get the information associated with this notification GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) notification.getUserData()); //get all the info and pretty print it //Get the information about each memory space, and pretty print it Map<String, MemoryUsage> membefore = info.getGcInfo().getMemoryUsageBeforeGc(); Map<String, MemoryUsage> mem = info.getGcInfo().getMemoryUsageAfterGc(); long memInit=0; long memCommitted=0; long memMax=0; long memUsed=0; // MemoryUsage before; for (Map.Entry<String, MemoryUsage> entry : mem.entrySet()) { String name = entry.getKey(); MemoryUsage memdetail = entry.getValue(); memInit += memdetail.getInit(); memCommitted += memdetail.getCommitted(); memMax += memdetail.getMax(); memUsed += memdetail.getUsed(); // MemoryUsage before = membefore.get(name); // System.out.print(name + (memCommitted==memMax?"(fully expanded)":"(still expandable)") +"used: "+(beforepercent/10)+"."+(beforepercent%10)+"%->"+(percent/10)+"."+(percent%10)+"%("+((memUsed/1048576)+1)+"MB) / "); } // System.out.println(" Mem max (max used or available?)"+memMax/100000+" mem used (before or after?)"+memUsed/100000+" mem committed? ="+memCommitted/1000000); if(memMax>maxMemMax) maxMemMax=memMax; if(memUsed>maxMemUsed) maxMemUsed=memUsed; if(memCommitted>maxMemCommitted) maxMemCommitted= memCommitted; } } }; //Add the listener emitter.addNotificationListener(listener, null, null); } } public static void main(String[] args) { // installGCMonitoring(); MemoryMonitor mem=new MemoryMonitor(); mem.installMonitor(); ArrayList<double[]> d=new ArrayList<>(); try { long memoryBefore = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); for(int i=0;i<=100000;i++){ double[] data=new double[10000]; d.add(data); if(i%1000==0){ d=new ArrayList<>(); } } d=new ArrayList<>(); System.gc(); Thread.sleep(4000); long memoryAfter = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); System.out.println("Final memory in use in the program = "+(memoryAfter-memoryBefore)/100000); System.out.println(" Max observed via execution = "+mem.maxMemUsed/100000); } catch (Exception e) { System.out.println(" Thread interrupted, exit"); } } public static void installGCMonitoring(){ //get all the GarbageCollectorMXBeans - there's one for each heap generation //so probably two - the old generation and young generation List<GarbageCollectorMXBean> gcbeans = java.lang.management.ManagementFactory.getGarbageCollectorMXBeans(); //Install a notification handler for each bean for (GarbageCollectorMXBean gcbean : gcbeans) { System.out.println(gcbean); NotificationEmitter emitter = (NotificationEmitter) gcbean; //use an anonymously generated listener for this example // - proper code should really use a named class NotificationListener listener = new NotificationListener() { //keep a count of the total time spent in GCs long totalGcDuration = 0; //implement the notifier callback handler @Override public void handleNotification(Notification notification, Object handback) { //we only handle GARBAGE_COLLECTION_NOTIFICATION notifications here if (notification.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { //get the information associated with this notification GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) notification.getUserData()); //get all the info and pretty print it long duration = info.getGcInfo().getDuration(); String gctype = info.getGcAction(); if ("end of minor GC".equals(gctype)) { gctype = "Young Gen GC"; } else if ("end of major GC".equals(gctype)) { gctype = "Old Gen GC"; } System.out.println(); System.out.println(gctype + ": - " + info.getGcInfo().getId()+ " " + info.getGcName() + " (from " + info.getGcCause()+") "+duration + " milliseconds; start-end times " + info.getGcInfo().getStartTime()+ "-" + info.getGcInfo().getEndTime()); //System.out.println("GcInfo CompositeType: " + info.getGcInfo().getCompositeType()); //System.out.println("GcInfo MemoryUsageAfterGc: " + info.getGcInfo().getMemoryUsageAfterGc()); //System.out.println("GcInfo MemoryUsageBeforeGc: " + info.getGcInfo().getMemoryUsageBeforeGc()); //Get the information about each memory space, and pretty print it Map<String, MemoryUsage> membefore = info.getGcInfo().getMemoryUsageBeforeGc(); Map<String, MemoryUsage> mem = info.getGcInfo().getMemoryUsageAfterGc(); for (Map.Entry<String, MemoryUsage> entry : mem.entrySet()) { String name = entry.getKey(); MemoryUsage memdetail = entry.getValue(); long memInit = memdetail.getInit(); long memCommitted = memdetail.getCommitted(); long memMax = memdetail.getMax(); long memUsed = memdetail.getUsed(); MemoryUsage before = membefore.get(name); long beforepercent = ((before.getUsed()*1000L)/before.getCommitted()); long percent = ((memUsed*1000L)/before.getCommitted()); //>100% when it gets expanded System.out.println(" Mem max (max used or available?)"+memMax/100000+" mem used (before or after?)"+memUsed/100000+" mem committed? ="+memCommitted/1000000); // System.out.print(name + (memCommitted==memMax?"(fully expanded)":"(still expandable)") +"used: "+(beforepercent/10)+"."+(beforepercent%10)+"%->"+(percent/10)+"."+(percent%10)+"%("+((memUsed/1048576)+1)+"MB) / "); } // System.out.println(); totalGcDuration += info.getGcInfo().getDuration(); long percent = totalGcDuration*1000L/info.getGcInfo().getEndTime(); System.out.println("GC cumulated overhead "+(percent/10)+"."+(percent%10)+"%"); } } }; //Add the listener emitter.addNotificationListener(listener, null, null); } } }
11,256
54.453202
264
java
tsml-java
tsml-java-master/src/main/java/experiments/ShapeDTWExperiments.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import java.io.File; import java.util.Scanner; public class ShapeDTWExperiments { public static void main(String[] args){ try { String fileLoc = "C:\\Users\\Vince\\Documents\\Dissertation Repositories\\datasets\\datasetsList.txt"; Scanner scan = new Scanner(new File(fileLoc)); while(scan.hasNextLine()) { String [] experimentArguments = new String[5]; experimentArguments[0] = "--dataPath=C:\\Users\\Vince\\Documents\\Dissertation Repositories\\datasets\\Univariate2018_arff"; experimentArguments[1] = "--resultsPath=C:\\Users\\Vince\\Documents\\Dissertation Repositories\\results\\java"; experimentArguments[2] = "--classifierName=NN_ShapeDTW_Raw"; experimentArguments[3] = "--datasetName=" + scan.nextLine(); experimentArguments[4] = "--fold=10"; ClassifierExperiments.main(experimentArguments); } scan.close(); } catch (Exception e) { e.printStackTrace(); } } }
1,866
42.418605
140
java
tsml-java
tsml-java-master/src/main/java/experiments/SimulationExperiments.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; //import com.sun.management.GarbageCollectionNotificationInfo; //import com.sun.management.GarbageCollectorMXBean; import tsml.classifiers.dictionary_based.*; import tsml.classifiers.distance_based.DTWCV; import tsml.classifiers.legacy.COTE.FlatCote; import tsml.classifiers.shapelet_based.LearnShapelets; import tsml.classifiers.shapelet_based.FastShapelets; import tsml.classifiers.interval_based.TSBF; import tsml.classifiers.interval_based.TSF; import tsml.classifiers.distance_based.DTD_C; import tsml.classifiers.shapelet_based.ShapeletTransformClassifier; import tsml.classifiers.interval_based.LPS; import tsml.classifiers.distance_based.ElasticEnsemble; import tsml.classifiers.distance_based.DD_DTW; import tsml.classifiers.legacy.COTE.HiveCote; import fileIO.InFile; import fileIO.OutFile; import java.io.File; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.logging.Level; import java.util.logging.Logger; import statistics.simulators.ElasticModel; import statistics.simulators.Model; import statistics.simulators.SimulateSpectralData; import statistics.simulators.SimulateDictionaryData; import statistics.simulators.SimulateIntervalData; import statistics.simulators.SimulateShapeletData; import statistics.simulators.SimulateWholeSeriesData; import statistics.simulators.SimulateElasticData; import statistics.simulators.SimulateMatrixProfileData; import tsml.classifiers.EnhancedAbstractClassifier; import utilities.InstanceTools; import weka.classifiers.Classifier; import weka.classifiers.meta.RotationForest; import machine_learning.classifiers.ensembles.CAWPE; import machine_learning.classifiers.ensembles.SaveableEnsemble; import tsml.classifiers.legacy.elastic_ensemble.DTW1NN; import tsml.transformers.MatrixProfile; import weka.core.Instances; import utilities.ClassifierTools; import machine_learning.classifiers.kNN; import weka.core.Instance; import tsml.transformers.RowNormalizer; import javax.management.Notification; /* Class to run one of various simulations. AJB Oct 2016 Model to simulate data where matrix profile should be optimal. /* Basic experimental design in SimulationExperiments.java: Simulate data, [possibly normalise for standard classifiers], take MP build ED on that. Variants tried are in First scenario: Each class is defined by two locations, noise is very low (0.1) Config 1: Cycle through the two possible shapes, give random base and amplitude to both Model calls : ((MatrixProfileModel)model). No normalisation: MatrixProfileExperiments.normalize=false; ED mean acc =0.7633 MP_ED mean acc =1 DTW mean acc =0.6389 RotF mean acc =0.5444 ST mean acc =0.6333 TSF mean acc =1 BOSS mean acc =0.7844 Normalisation: MatrixProfileExperiments.normalize=true ED mean acc =1 MP_ED mean acc =1 DTW mean acc =0.6567 RotF mean acc =0.7522 ST mean acc =1 TSF mean acc =0.5767 BOSS mean acc =1 Unfortunately need to normalise for any credibility, so on to cnfig 2: Config 2: Give different random base and amplitude to both No normalisation: (ran these by mistake, will no longer run normalisation) ED mean acc =0.7822 MP_ED mean acc =0.9844 DTW mean acc =0.6144 RotF mean acc =0.539 ST mean acc =0.57777 TSF mean acc =1.0 BOSS mean acc =0.773 Normalisation: ED mean acc =1 MP_ED mean acc =0.9844 DTW mean acc =0.6467 RotF mean acc =0.8133 ST mean acc =1 TSF mean acc =0.6856 BOSS mean acc =1 Config 3: After shock model. 1. Make second shape smaller than the first. 2. Fix position of first shape. 2. Make one model have only one shape. WAIT and go back. Set up with amplitude between 2 and 4 we get this. Sig =0.1 Mean 1NN Acc =0.81222 Mean 1NN Norm Acc =0.91889 Mean 1NN MP Acc = 1 CHANGE: Ranomise the shape completely! */ public class SimulationExperiments { static boolean local = false; static int[] casesPerClass = { 50, 50 }; static int seriesLength = 500; static double trainProp = 0.5; static boolean normalize = true; static String[] allClassifiers = { // Benchmarks "ED", "RotF", "DTW", // Whole series // "DD_DTW","DTD_C", "EE", "HESCA", // Interval "TSF", // "TSBF","LPS", // Shapelet // "FastShapelets","LearnShapelets", "ST", // Dictionary "BOP", "BOSS", // Spectral "RISE", // Combos "FLATCOTE", "HIVECOTE" }; static String[] allSimulators = { "WholeSeriesElastic", "Interval", "Shapelet", "Dictionary", "ARMA" }; public static Classifier setClassifier(String str) throws RuntimeException { Classifier c; switch (str) { case "ED": case "MP_ED": c = new kNN(1); break; case "HESCA": c = new CAWPE(); break; case "RotF": c = new RotationForest(); break; case "DTW": case "MP_DTW": c = new DTW1NN(); break; case "DD_DTW": c = new DD_DTW(); break; case "DTD_C": c = new DTD_C(); break; case "EE": c = new ElasticEnsemble(); break; case "TSF": c = new TSF(); break; case "TSBF": c = new TSBF(); break; case "LPS": c = new LPS(); break; case "FastShapelets": c = new FastShapelets(); break; case "ST": c = new ShapeletTransformClassifier(); if (local) ((ShapeletTransformClassifier) c).setOneMinuteLimit(); else ((ShapeletTransformClassifier) c).setOneHourLimit(); // ((ShapeletTransformClassifier)c).setOneMinuteLimit();//DEBUG break; case "BOP": c = new BagOfPatternsClassifier(); break; case "BOSS": c = new BOSS(); break; case "COTE": case "FLATCOTE": c = new FlatCote(); break; case "HIVECOTE": c = new HiveCote(); // ((HiveCote)c).setNosHours(2); break; default: throw new RuntimeException(" UNKNOWN CLASSIFIER " + str); } return c; } public static void setStandardGlobalParameters(String str) { switch (str) { case "ARMA": case "AR": case "Spectral": casesPerClass = new int[] { 200, 200 }; seriesLength = 200; trainProp = 0.1; Model.setDefaultSigma(1); break; case "Shapelet": casesPerClass = new int[] { 250, 250 }; seriesLength = 300; trainProp = 0.1; Model.setDefaultSigma(1); break; case "Dictionary": casesPerClass = new int[] { 200, 200 }; seriesLength = 1500; trainProp = 0.1; SimulateDictionaryData.setShapeletsPerClass(new int[] { 5, 10 }); SimulateDictionaryData.setShapeletLength(29); // SimulateDictionaryData.checkGlobalSeedForIntervals(); Model.setDefaultSigma(1); break; case "Interval": seriesLength = 1000; trainProp = 0.1; casesPerClass = new int[] { 200, 200 }; Model.setDefaultSigma(1); // SimulateIntervalData.setAmp(1); SimulateIntervalData.setNosIntervals(3); SimulateIntervalData.setNoiseToSignal(10); break; case "WholeSeriesElastic": case "WholeSeries": seriesLength = 100; trainProp = 0.1; casesPerClass = new int[] { 100, 100 }; Model.setDefaultSigma(1); ElasticModel.setBaseAndAmp(-2, 4); ElasticModel.setWarpPercent(0.4); // SimulateWholeSeriesElastic. break; case "MatrixProfile": seriesLength = 150; trainProp = 0.1; casesPerClass = new int[] { 50, 50 }; Model.setDefaultSigma(1); break; default: throw new RuntimeException(" UNKNOWN SIMULATOR "); } } public static Instances simulateData(String str, int seed) throws RuntimeException { Instances data; // for(int:) Model.setGlobalRandomSeed(seed); switch (str) { case "ARMA": case "AR": case "SPECTRAL": data = SimulateSpectralData.generateSpectralEmbeddedData(seriesLength, casesPerClass); // data=SimulateSpectralData.generateARDataSet(seriesLength, casesPerClass, // true); break; case "Shapelet": data = SimulateShapeletData.generateShapeletData(seriesLength, casesPerClass); break; case "Dictionary": data = SimulateDictionaryData.generateDictionaryData(seriesLength, casesPerClass); break; case "Interval": data = SimulateIntervalData.generateIntervalData(seriesLength, casesPerClass); break; case "WholeSeries": data = SimulateWholeSeriesData.generateWholeSeriesdData(seriesLength, casesPerClass); break; case "WholeSeriesElastic": data = SimulateElasticData.generateElasticData(seriesLength, casesPerClass); break; case "MatrixProfile": data = SimulateMatrixProfileData.generateMatrixProfileData(seriesLength, casesPerClass); break; default: throw new RuntimeException(" UNKNOWN SIMULATOR " + str); } return data; } // arg[0]: simulator // arg[1]: classifier // arg[2]: fold number public static double runSimulationExperiment(String[] args, boolean useStandard) throws Exception { String simulator = args[0]; if (useStandard) setStandardGlobalParameters(simulator); String classifier = args[1]; Classifier c = setClassifier(classifier); int fold = Integer.parseInt(args[2]) - 1; String resultsPath = args[3]; // Set up the train and test files File f = new File(resultsPath + simulator); if (!f.exists()) f.mkdirs(); String predictions = resultsPath + simulator + "/" + classifier; f = new File(predictions); if (!f.exists()) f.mkdir(); // Check whether fold already exists, if so, dont do it, just quit f = new File(predictions + "/testFold" + fold + ".csv"); if (!f.exists() || f.length() == 0) { // Do the experiment: find train preds through cross validation // Then generate all test predictions Instances data = simulateData(args[0], fold); Instances[] split = InstanceTools.resampleInstances(data, fold, trainProp); System.out.println(" Train size =" + split[0].numInstances() + " test size =" + split[1].numInstances()); // Check if it is MP or not if (classifier.contains("MP_")) { try { System.out.println("MAtrix profile run ...."); MatrixProfile mp = new MatrixProfile(29); split[0] = mp.transform(split[0]); split[1] = mp.transform(split[1]); } catch (Exception ex) { Logger.getLogger(SimulationExperiments.class.getName()).log(Level.SEVERE, null, ex); } } else if (normalize) { RowNormalizer nc = new RowNormalizer(); split[0] = nc.transform(split[0]); split[1] = nc.transform(split[1]); } double acc=singleSampleExperiment(split[0],split[1],c,fold,predictions); // System.out.println("simulator ="+simulator+" Classifier ="+classifier+" Fold "+fold+" Acc ="+acc); return acc; } else System.out.println(predictions+"/testFold"+fold+".csv already exists"); // of.writeString("\n"); return -1; } public static void pairwiseTests(){ } public static void combineTestResults(String classifier, String simulator,String resultsPath){ int folds=200; File f=new File(resultsPath+"/"+simulator); if(!f.exists() || !f.isDirectory()){ f.mkdir(); } else{ boolean results=false; for(int i=0;i<folds && !results;i++){ //Check fold exists f= new File(resultsPath+"/"+simulator+"/"+classifier+"/testFold"+i+".csv"); if(f.exists()) results=true; } if(results){ OutFile of=new OutFile(resultsPath+"/"+simulator+"/"+classifier+".csv"); for(int i=0;i<folds;i++){ //Check fold exists f= new File(resultsPath+"/"+simulator+"/"+classifier+"/testFold"+i+".csv"); if(f.exists() && f.length()>0){ InFile inf=new InFile(resultsPath+"/"+simulator+"/"+classifier+"/testFold"+i+".csv"); inf.readLine(); inf.readLine(); of.writeLine(i+","+inf.readDouble()); } } of.closeFile(); } } } public static double singleSampleExperiment(Instances train, Instances test, Classifier c, int sample,String preds){ double acc=0; OutFile p=new OutFile(preds+"/testFold"+sample+".csv"); // hack here to save internal CV for further ensembling // if(c instanceof TrainAccuracyEstimate) // ((TrainAccuracyEstimate)c).writeCVTrainToFile(preds+"/trainFold"+sample+".csv"); if(c instanceof SaveableEnsemble) ((SaveableEnsemble)c).saveResults(preds+"/internalCV_"+sample+".csv",preds+"/internalTestPreds_"+sample+".csv"); try{ c.buildClassifier(train); int[][] predictions=new int[test.numInstances()][2]; for(int j=0;j<test.numInstances();j++){ predictions[j][0]=(int)test.instance(j).classValue(); test.instance(j).setMissing(test.classIndex());//Just in case .... } for(int j=0;j<test.numInstances();j++) { predictions[j][1]=(int)c.classifyInstance(test.instance(j)); if(predictions[j][0]==predictions[j][1]) acc++; } acc/=test.numInstances(); String[] names=preds.split("/"); p.writeLine(names[names.length-1]+","+c.getClass().getName()+",test"); if(c instanceof EnhancedAbstractClassifier) p.writeLine(((EnhancedAbstractClassifier)c).getParameters()); else if(c instanceof SaveableEnsemble) p.writeLine(((SaveableEnsemble)c).getParameters()); else p.writeLine("NoParameterInfo"); p.writeLine(acc+""); for(int j=0;j<test.numInstances();j++){ p.writeString(predictions[j][0]+","+predictions[j][1]+","); double[] dist =c.distributionForInstance(test.instance(j)); for(double d:dist) p.writeString(","+d); p.writeString("\n"); } }catch(Exception e) { System.out.println(" Error ="+e+" in method simpleExperiment"+e); e.printStackTrace(); System.out.println(" TRAIN "+train.relationName()+" has "+train.numAttributes()+" attributes and "+train.numInstances()+" instances"); System.out.println(" TEST "+test.relationName()+" has "+test.numAttributes()+" attributes and "+test.numInstances()+" instances"); System.exit(0); } return acc; } public static void collateAllResults(){ String resultsPath="C:\\Users\\ajb\\Dropbox\\Results\\SimulationExperiments\\BasicExperiments\\"; for(String s:allClassifiers){ for(String a:allSimulators){ // String a="WholeSeriesElastic"; combineTestResults(s,a,resultsPath); } } int folds=200; for(String a:allSimulators){ if(new File(resultsPath+a).exists()){ System.out.println(" Simulation = "+a); OutFile of=new OutFile(resultsPath+a+"CombinedResults.csv"); InFile[] ins=new InFile[allClassifiers.length]; int count=0; of.writeString(","); for(String s:allClassifiers){ File f=new File(resultsPath+a+"\\"+s+".csv"); if(f.exists()){ InFile inf=new InFile(resultsPath+a+"\\"+s+".csv"); int lines=inf.countLines(); if(lines>=folds){ System.out.println(" Doing "+a+" and "+s); of.writeString(s+","); ins[count++]=new InFile(resultsPath+a+"\\"+s+".csv"); } } } of.writeString("\n"); for(int i=0;i<folds;i++){ of.writeString("Rep"+i+","); for(int j=0;j<count;j++){ ins[j].readInt(); double acc=ins[j].readDouble(); of.writeString(acc+","); } of.writeString("\n"); } } } } /** * FINAL VERSION * Stand alone method to exactly reproduce shapelet experiment which we normally */ public static void runShapeletSimulatorExperiment(){ Model.setDefaultSigma(1); seriesLength=300; casesPerClass=new int[]{50,50}; String[] classifiers={"RotF","DTW","FastShapelets","ST","BOSS"}; // "EE","CAWPE","TSF","TSBF","FastShapelets","ST","LearnShapelets","BOP","BOSS","C_RISE","COTE"}; OutFile of=new OutFile("C:\\Temp\\ShapeletSimExperiment.csv"); setStandardGlobalParameters("Shapelet"); of.writeLine("Shapelet Sim, series length= "+seriesLength+" cases class 0 ="+casesPerClass[0]+" class 1"+casesPerClass[0]+" train proportion = "+trainProp); of.writeString("Rep"); for(String s:classifiers) of.writeString(","+s); of.writeString("\n"); for(int i=0;i<100;i++){ of.writeString(i+","); //Generate data Model.setGlobalRandomSeed(i); Instances data=SimulateShapeletData.generateShapeletData(seriesLength,casesPerClass); //Split data Instances[] split=InstanceTools.resampleInstances(data, i,trainProp); for(String str:classifiers){ Classifier c; //Build classifiers switch(str){ case "RotF": c=new RotationForest(); break; case "DTW": c=new DTWCV(); break; case "EE": c=new ElasticEnsemble(); break; case "TSF": c=new TSF(); break; case "TSBF": c=new TSBF(); break; case "FastShapelets": c=new FastShapelets(); break; case "ST": c=new ShapeletTransformClassifier(); ((ShapeletTransformClassifier)c).setOneMinuteLimit(); break; case "LearnShapelets": c=new LearnShapelets(); break; case "BOP": c=new BagOfPatternsClassifier(); break; case "BOSS": c=new BOSS(); break; case "COTE": c=new FlatCote(); break; default: throw new RuntimeException(" UNKNOWN CLASSIFIER "+str); } double acc=ClassifierTools.singleTrainTestSplitAccuracy(c, split[0], split[1]); of.writeString(acc+","); System.out.println(i+" "+str+" acc ="+acc); } of.writeString("\n"); } } /** Method to run the error experiment, default setttings * */ public static void runErrorExperiment(String[] args){ String simulator=args[0]; String classifier=args[1]; int e=Integer.parseInt(args[2])-1; int fold=Integer.parseInt(args[3])-1; String resultsPath=args[4]; //Set up the train and test files File f=new File(resultsPath+"Error"); if(!f.exists()) f.mkdir(); f=new File(resultsPath+"Error/"+simulator); if(!f.exists()) f.mkdir(); String predictions= resultsPath+"Error/"+simulator+"/"+classifier; f=new File(predictions); if(!f.exists()) f.mkdir(); //E encodes the error and the job number. So double error=((double)e/10.0); f=new File(predictions+"/testAcc"+e+"_"+fold+".csv"); if(!f.exists() || f.length()==0){ setStandardGlobalParameters(simulator); Model.setDefaultSigma(error); Instances data=simulateData(simulator,50*(e+1)*fold); Classifier c=setClassifier(classifier); Instances[] split=InstanceTools.resampleInstances(data, fold,trainProp); double a=ClassifierTools.singleTrainTestSplitAccuracy(c,split[0],split[1]); OutFile out=new OutFile(predictions+"/testAcc"+e+"_"+fold+".csv"); out.writeLine(a+""); } } /** Method to run the error experiment, default setttings * */ public static void runLengthExperiment(String[] args){ String simulator=args[0]; String classifier=args[1]; //Series length factor int l=Integer.parseInt(args[2]); String resultsPath=args[3]; seriesLength=10+(1+l)*50; //l from 1 to 50 //Set up the train and test files File f=new File(resultsPath+simulator+"Length"); if(!f.exists()) f.mkdir(); String predictions= resultsPath+simulator+"Length/"+classifier; f=new File(predictions); if(!f.exists()) f.mkdir(); //Check whether fold already exists, if so, dont do it, just quit f=new File(predictions+"/testAcc"+l+".csv"); if(!f.exists() || f.length()==0){ //Do the experiment: just measure the single fold accuracy OutFile out=new OutFile(predictions+"/testAcc"+l+".csv"); double acc=0; double var=0; for(int fold=0;fold<100;fold++){ Instances data=simulateData(simulator,seriesLength); Classifier c=setClassifier(classifier); Instances[] split=InstanceTools.resampleInstances(data, fold,0.5); double a=ClassifierTools.singleTrainTestSplitAccuracy(c,split[0],split[1]); acc+=a; var+=a*a; } out.writeLine(acc/100+","+var); } } public static void trainSetSizeExperiment(String[] args){ String simulator=args[0]; String classifier=args[1]; //Series length factor int l=Integer.parseInt(args[2]); String resultsPath=args[3]; trainProp=(double)(l/10.0); //l from 1 to 9 //Set up the train and test files File f=new File(resultsPath+simulator+"Length"); if(!f.exists()) f.mkdir(); String predictions= resultsPath+simulator+"Length/"+classifier; f=new File(predictions); if(!f.exists()) f.mkdir(); //Check whether fold already exists, if so, dont do it, just quit f=new File(predictions+"/testAcc"+l+".csv"); if(!f.exists() || f.length()==0){ //Do the experiment: just measure the single fold accuracy OutFile out=new OutFile(predictions+"/testAcc"+l+".csv"); double acc=0; double var=0; for(int fold=0;fold<100;fold++){ Instances data=simulateData(simulator,50*(l+1)*fold); Classifier c=setClassifier(classifier); Instances[] split=InstanceTools.resampleInstances(data, fold,0.5); double a=ClassifierTools.singleTrainTestSplitAccuracy(c,split[0],split[1]); acc+=a; var+=a*a; } out.writeLine(acc/100+","+var); } } //<editor-fold defaultstate="collapsed" desc="One off data processing methods"> public static void collateErrorResults(){ String path="C:\\Users\\ajb\\Dropbox\\Results\\SimulationExperiments\\Error\\"; double[][] means=new double[allClassifiers.length][21]; for(String a:allSimulators){ OutFile out=new OutFile(path+"CollatedError"+a+"Results.csv"); for(String s:allClassifiers) out.writeString(","+s); out.writeString("\n"); int count=0; for(String s:allClassifiers){ for(int i=0;i<=20;i++){ int x=0; for(int j=0;j<100;j++){ File f= new File(path+a+"\\"+s+"\\"+"testAcc"+i+"_"+j+".csv"); if(f.exists() && f.length()>0){ InFile inf=new InFile(path+a+"\\"+s+"\\"+"testAcc"+i+"_"+j+".csv"); double aa=inf.readDouble(); means[count][i]+=aa; x++; } } if(x>0) means[count][i]/=x; } count++; } for(int j=0;j<means[0].length;j++){ out.writeString(100*(j+1)+","); for(int i=0;i<means.length;i++) out.writeString(means[i][j]+","); out.writeString("\n"); } } } public static void collateLengthResults(){ String path="C:\\Users\\ajb\\Dropbox\\Results\\SimulationExperiments\\ShapeletLength\\"; OutFile out=new OutFile(path+"CollatedLengthResults.csv"); out.writeString("Error"); for(String s:allClassifiers) out.writeString(","+s); out.writeString("\n"); for(int i=0;i<10;i++){ out.writeString((i*50+10)+""); for(String s:allClassifiers){ File f= new File(path+s+"\\"+"testAcc"+i+".csv"); if(f.exists() && f.length()>0){ InFile inf=new InFile(path+s+"\\"+"testAcc"+i+".csv"); double a=inf.readDouble(); out.writeString(","+a); } else out.writeString(","); } out.writeString("\n"); } } public static void createBaseExperimentScripts(boolean grace){ //Generates cluster scripts for all combos of classifier and simulator String path="C:\\Users\\ajb\\Dropbox\\Code\\Cluster Scripts\\SimulatorScripts\\BaseExperiment\\"; File f=new File(path); int folds=200; if(!f.isDirectory()) f.mkdir(); for(String a:allSimulators){ OutFile of2; if(grace) of2=new OutFile(path+a+"Grace.txt"); else of2=new OutFile(path+a+".txt"); for(String s:allClassifiers){ OutFile of; if(grace) of = new OutFile(path+s+a+"Grace.bsub"); else of = new OutFile(path+s+a+".bsub"); of.writeLine("#!/bin/csh"); if(grace) of.writeLine("#BSUB -q short"); else of.writeLine("#BSUB -q long-eth"); of.writeLine("#BSUB -J "+s+a+"[1-"+folds+"]"); of.writeLine("#BSUB -oo output/"+a+".out"); of.writeLine("#BSUB -eo error/"+a+".err"); if(grace){ of.writeLine("#BSUB -R \"rusage[mem=2000]\""); of.writeLine("#BSUB -M 2000"); of.writeLine(" module add java/jdk/1.8.0_31"); } else{ of.writeLine("#BSUB -R \"rusage[mem=6000]\""); of.writeLine("#BSUB -M 6000"); of.writeLine("module add java/jdk1.8.0_51"); } of.writeLine("java -jar Simulator.jar "+a+" "+ s+" $LSB_JOBINDEX"); if(grace) of2.writeLine("bsub < Scripts/SimulatorExperiments/BaseExperiment/"+s+a+"Grace.bsub"); else of2.writeLine("bsub < Scripts/SimulatorExperiments/BaseExperiment/"+s+a+".bsub"); } } } public static void createErrorScripts(boolean grace,String simulator){ //Generates cluster scripts for all combos of classifier and simulator String path="C:\\Users\\ajb\\Dropbox\\Code\\Cluster Scripts\\SimulatorScripts\\Error\\"; File f= new File(path+simulator); if(!f.isDirectory()) f.mkdir(); // for(String simulator:allSimulators) String ext; if(grace) ext="ErrorGrace"; else ext="Error"; for(String classifier:allClassifiers){ OutFile of2=new OutFile(path+simulator+classifier+ext+".txt"); for(int i=1;i<=21;i++){ // OutFile of = new OutFile(path+"OC"+classifier+simulator+ext+".bsub"); OutFile of = new OutFile(path+simulator+"\\"+"\\"+classifier+"_"+ext+"_"+i+".bsub"); of.writeLine("#!/bin/csh"); if(grace) of.writeLine("#BSUB -q short"); else of.writeLine("#BSUB -q long-eth"); of.writeLine("#BSUB -J "+classifier+"[1-100]"); of.writeLine("#BSUB -oo output/"+simulator+".out"); of.writeLine("#BSUB -eo error/"+simulator+".err"); of.writeLine("#BSUB -R \"rusage[mem=6000]\""); of.writeLine("#BSUB -M 6000"); if(grace) of.writeLine(" module add java/jdk/1.8.0_31"); else of.writeLine("module add java/jdk1.8.0_51"); of.writeLine("java -jar Error.jar "+simulator+" "+ classifier+" "+i+" "+ "$LSB_JOBINDEX"); of2.writeLine("bsub < Scripts/SimulatorExperiments/Error/"+simulator+"/"+classifier+"_"+ext+"_"+i+".bsub"); } } } public static void collateSingleFoldErrorResults(){ String classifier="LearnShapelets"; String path="C:\\Users\\ajb\\Dropbox\\Results\\SimulationExperiments\\ShapeletError\\"+classifier+"\\"; OutFile of=new OutFile(path+classifier+".csv"); for(int i=0; i<21;i++){ double mean=0; for(int folds=0;folds<100;folds++){ int index=i*100+folds; InFile inf=new InFile(path+"testAcc"+index+"_"+folds+".csv"); mean+=inf.readDouble(); } mean/=100; of.writeLine(i+","+mean); } } public static void collateSomeStuff(){ String[] classifiers={"RotF","DTW","BOSS","ST"}; for(String str:classifiers){ String path="C:\\Users\\ajb\\Dropbox\\Results\\SimulationExperiments\\Dictionary\\"+str+"\\"; OutFile of=new OutFile(path+str+".csv"); double mean=0; for(int folds=0;folds<200;folds++){ File f=new File(path+"testFold"+folds+".csv"); if(f.exists() && f.length()>0){ InFile inf=new InFile(path+"testFold"+folds+".csv"); inf.readLine(); inf.readLine(); double x=inf.readDouble(); of.writeLine(folds+","+x); } } /* OutFile of2=new OutFile("C:\\Users\\ajb\\Dropbox\\Results\\MatrixProfileExperiments\\Dictionary.csv"); InFile[] all=new InFile[4]; for(String str:classifiers){ } */ } } //</editor-fold> public static void generateAllProblemFiles(){ for(String sim:allSimulators) generateProblemFile(sim); } public static void generateProblemFile(String sim){ setStandardGlobalParameters(sim); int s=22; Model.setGlobalRandomSeed(s); Model.setDefaultSigma(0.2); casesPerClass=new int[]{50,50}; try{ Instances data=simulateData(sim,s); Instances[] split=InstanceTools.resampleInstances(data, 0,trainProp); OutFile train=new OutFile("c:\\temp\\"+sim+"SimLowNoise.csv"); train.writeString(split[0].toString()); Model.setDefaultSigma(1); data=simulateData(sim,1); // data=SimulateDictionaryData.generateDictionaryData(seriesLength,casesPerClass); split=InstanceTools.resampleInstances(data, 0,trainProp); train=new OutFile("c:\\temp\\"+sim+"SimNormalNoise.csv"); train.writeString(split[0].toString()); }catch(Exception e){ System.out.println("should do something really ...."); } } public static void smoothingTests(){ String sim="WholeSeries"; setStandardGlobalParameters(sim); seriesLength=1000; int s=22; Model.setGlobalRandomSeed(s); Model.setDefaultSigma(5); casesPerClass=new int[]{50,50}; String[] names={"ED","DTW","RotF","BOSS","TSF"};//,"ST","CAWPE","HIVECOTE"}; Classifier[] cls=new Classifier[names.length]; for(int i=0;i<names.length;i++) cls[i]=setClassifier(names[i]); try{ Instances data=simulateData(sim,s); addSpikes(data); Instances[] split=InstanceTools.resampleInstances(data, 0,trainProp); DecimalFormat df= new DecimalFormat("##.##"); for(int i=0;i<names.length;i++){ double d=ClassifierTools.singleTrainTestSplitAccuracy(cls[i], split[0], split[1]); System.out.println(names[i]+" acc = "+df.format(d)); } }catch(Exception e){ System.out.println("should do something really ...."); } } public static void addSpikes(Instances t){ double peak=100; int numSpikes=10; for(int i=0;i<numSpikes;i++){ for(Instance ins:t){ int position=Model.rand.nextInt(t.numAttributes()-1); // if(Model.rand.nextDouble()<0.5){ if(Model.rand.nextDouble()<0.5) ins.setValue(position, peak); else ins.setValue(position, -peak); // } } } } public static void main(String[] args) throws Exception{ collateSimulatorResults(); // dictionarySimulatorChangingSeriesLength(); // dictionarySimulatorChangingTrainSize(); System.exit(0); smoothingTests(); String resultsPath="C:/Temp/"; System.exit(0); if(args.length>0){ if(args.length==3){//Base experiment double b=runSimulationExperiment(args,true); System.out.println(args[0]+","+args[1]+","+","+args[2]+" Acc ="+b); }else if(args.length==4){//Error experiment) runErrorExperiment(args); } // runLengthExperiment(paras); } else{ // DatasetLists.resultsPath="C:\\Users\\ajb\\Dropbox\\Results\\MatrixProfileExperiments\\"; local=true; String[] algos={"ED"};//,,"MP_RotF","MP_DTW"}; double[] meanAcc=new double[algos.length]; for(int i=1;i<=10;i++){ for(int j=0;j<algos.length;j++){ setStandardGlobalParameters("WholeSeries"); Model.setDefaultSigma(20); String[] para={"WholeSeries",algos[j],i+""}; double b=runSimulationExperiment(para,false); meanAcc[j]+=b; System.out.println(para[0]+","+para[1]+","+","+para[2]+" Acc ="+b); } } DecimalFormat df=new DecimalFormat("##.####"); for(int j=0;j<algos.length;j++) System.out.println(algos[j]+" mean acc ="+df.format(meanAcc[j]/10)); } } public static void dictionarySimulatorChangingTrainSize() throws Exception { Model.setDefaultSigma(1); boolean overwrite=false; int seriesLength = 1000; int experiments=2; String writePath="Z:/Results Working Area/DictionaryBased/SimulationExperimentsMemMonitor2/"; for(int trainSize=500;trainSize<=10000;trainSize+=500) { File path = new File(writePath + "DictionaryTrainSize" + trainSize); path.mkdirs(); if(!overwrite) { File f1 = new File(writePath + "DictionaryTrainSize" + trainSize + "/testAcc" + trainSize + ".csv"); File f2 = new File(writePath + "DictionaryTrainSize" + trainSize + "/trainTime" + trainSize + ".csv"); File f3 = new File(writePath + "DictionaryTrainSize" + trainSize + "/testTime" + trainSize + ".csv"); File f4 = new File(writePath + "DictionaryTrainSize" + trainSize + "/mem" + trainSize + ".csv"); if(f1.exists() && f2.exists() && f3.exists() && f4.exists()){ System.out.println("SKIPPING train size = "+trainSize+" as all already present"); continue; } } OutFile accFile = new OutFile(writePath + "DictionaryTrainSize" + trainSize + "/testAcc" + trainSize + ".csv"); OutFile trainTimeFile = new OutFile(writePath + "DictionaryTrainSize" + trainSize +"/trainTime" + trainSize + ".csv"); OutFile testTimeFile = new OutFile(writePath + "DictionaryTrainSize" + trainSize + "/testTime" + trainSize + ".csv"); OutFile memFile = new OutFile(writePath + "DictionaryTrainSize" + trainSize + "/mem" + trainSize + ".csv"); System.out.println(" Generating simulated data for n ="+trainSize+" Series Length ="+seriesLength+" ...."); int[] casesPerClass = new int[2]; casesPerClass[0] = casesPerClass[1] = trainSize; int[] shapesPerClass = new int[]{5, 20}; long t1, t2; String[] classifierNames = {"cBOSS", "BOSS","WEASEL","S-BOSS"}; double[] acc = new double[classifierNames.length]; long[] trainTime = new long[classifierNames.length]; long[] testTime = new long[classifierNames.length]; long[] finalMem = new long[classifierNames.length]; long[] maxMem = new long[classifierNames.length]; for (int i = 0; i < experiments; i++) { Instances data = SimulateDictionaryData.generateDictionaryData(500, casesPerClass, shapesPerClass); Instances[] split = InstanceTools.resampleInstances(data, i, 0.5); System.out.println("Series Length =" + seriesLength + " Experiment Index: " + i + " Train size =" + split[0].numInstances() + " test size =" + split[1].numInstances()); for (int j = 0; j < classifierNames.length; j++) { System.gc(); MemoryMonitor monitor=new MemoryMonitor(); monitor.installMonitor(); long memoryBefore = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); Classifier c = ClassifierLists.setClassifierClassic(classifierNames[j], i); t1 = System.nanoTime(); c.buildClassifier(split[0]); trainTime[j] = System.nanoTime() - t1; t1 = System.nanoTime(); acc[j] = ClassifierTools.accuracy(split[1], c); testTime[j] = System.nanoTime() - t1; System.gc(); finalMem[j] = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory() - memoryBefore; maxMem[j]=monitor.getMaxMemoryUsed(); System.out.println("\t" + classifierNames[j] + " ACC = " + acc[j] + " Train Time =" + trainTime[j] + " Test Time = " + testTime[j] + " Final Memory = " + finalMem[j]/1000000+" Max Memory ="+maxMem[j]/1000000); } accFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) accFile.writeString("," + acc[j]); accFile.writeString("\n"); trainTimeFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) trainTimeFile.writeString("," + trainTime[j]); trainTimeFile.writeString("\n"); testTimeFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) testTimeFile.writeString("," + testTime[j]); testTimeFile.writeString("\n"); memFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) { memFile.writeString("," + finalMem[j]); } memFile.writeString(","); for (int j = 0; j < classifierNames.length; j++) { memFile.writeString("," + maxMem[j]); } memFile.writeString("\n"); } } } public static void dictionarySimulatorChangingSeriesLength() throws Exception { Model.setDefaultSigma(1); boolean overwrite=true; int experiments=2; int numCases=2000; String writePath="Z:/Results Working Area/DictionaryBased/SimulationExperimentsMemMonitor/"; for(int seriesLength=5000;seriesLength<=10000;seriesLength+=5000) { String dir="Cases1000SeriesLength"; File path = new File(writePath +dir+ seriesLength); path.mkdirs(); if(!overwrite) { File f1 = new File(writePath + dir + seriesLength + "/testAcc" + seriesLength + ".csv"); File f2 = new File(writePath + dir + seriesLength + "/trainTime" + seriesLength + ".csv"); File f3 = new File(writePath + dir + seriesLength + "/testTime" + seriesLength + ".csv"); File f4 = new File(writePath + dir + seriesLength + "/mem" + seriesLength + ".csv"); if(f1.exists() && f2.exists() && f3.exists() && f4.exists()){ System.out.println("SKIPPING series length = "+seriesLength+" as all already present"); continue; } } OutFile accFile = new OutFile(writePath + "DictionarySeriesLength" + seriesLength + "/testAcc" + seriesLength + ".csv"); OutFile trainTimeFile = new OutFile(writePath + "DictionarySeriesLength" + seriesLength +"/trainTime" + seriesLength + ".csv"); OutFile testTimeFile = new OutFile(writePath + "DictionarySeriesLength" + seriesLength + "/testTime" + seriesLength + ".csv"); OutFile memFile = new OutFile(writePath + "DictionarySeriesLength" + seriesLength + "/mem" + seriesLength + ".csv"); System.out.println(" Generating simulated data ...."); int[] casesPerClass = new int[2]; casesPerClass[0] = casesPerClass[1] = numCases/2; int[] shapesPerClass = new int[]{5, 20}; long t1, t2; String[] classifierNames = {"cBOSS","S-BOSS","WEASEL","BOSS"}; double[] acc = new double[classifierNames.length]; long[] trainTime = new long[classifierNames.length]; long[] testTime = new long[classifierNames.length]; long[] finalMem = new long[classifierNames.length]; long[] maxMem = new long[classifierNames.length]; for (int i = 0; i < experiments; i++) { Instances data = SimulateDictionaryData.generateDictionaryData(seriesLength, casesPerClass, shapesPerClass); Instances[] split = InstanceTools.resampleInstances(data, i, 0.2); System.out.println(" series length =" + seriesLength + " Experiment Index" + i + " Train size =" + split[0].numInstances() + " test size =" + split[1].numInstances()); for (int j = 0; j < classifierNames.length; j++) { System.gc(); MemoryMonitor monitor=new MemoryMonitor(); monitor.installMonitor(); long memoryBefore = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); Classifier c = ClassifierLists.setClassifierClassic(classifierNames[j], i); t1 = System.nanoTime(); c.buildClassifier(split[0]); trainTime[j] = System.nanoTime() - t1; t1 = System.nanoTime(); acc[j] = ClassifierTools.accuracy(split[1], c); testTime[j] = System.nanoTime() - t1; System.gc(); finalMem[j] = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory() - memoryBefore; maxMem[j]=monitor.getMaxMemoryUsed(); System.out.println("\t" + classifierNames[j] + " ACC = " + acc[j] + " Train Time =" + trainTime[j] + " Test Time = " + testTime[j] + " Final Memory = " + finalMem[j]/1000000+" Max Memory ="+maxMem[j]/1000000); } accFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) accFile.writeString("," + acc[j]); accFile.writeString("\n"); trainTimeFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) trainTimeFile.writeString("," + trainTime[j]); trainTimeFile.writeString("\n"); testTimeFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) testTimeFile.writeString("," + testTime[j]); testTimeFile.writeString("\n"); memFile.writeString(i + ""); for (int j = 0; j < classifierNames.length; j++) { memFile.writeString("," + finalMem[j]); } memFile.writeString(","); for (int j = 0; j < classifierNames.length; j++) { memFile.writeString("," + maxMem[j]); } memFile.writeString("\n"); } } } public static void collateSimulatorResults(){ String type="Dictionary"; String path="Z:\\Results Working Area\\"+type+"Based\\\\SimulationExperimentsMemMonitor\\"; File f= new File(path+type+"Summary"); f.mkdirs(); String[] files={"testAcc","testTime","trainTime","mem"}; int numClassifiers=4; OutFile[] out=new OutFile[files.length]; OutFile[] outDiffs=new OutFile[files.length]; for(int i=0;i<files.length;i++){ out[i]=new OutFile(path+type+"Summary\\"+files[i]+"Mean.csv"); out[i].writeLine("Means,BOSS,cBOSS,S-BOSS,WEASEL,StDevs,BOSS,cBOSS,S-BOSS,WEASEL"); outDiffs[i]=new OutFile(path+type+"Summary\\"+files[i]+"MeanDiffs.csv"); outDiffs[i].writeLine("MeanDiffsToBOSS,cBOSS,S-BOSS,WEASEL,StDevs,cBOSS,S-BOSS,WEASEL"); } for(int i=0;i<files.length;i++){ String s=files[i]; ArrayList<double[]> medians=new ArrayList<>(); for(int trainSize=50;trainSize<=1000;trainSize+=50) { File test; int lines = 0; String fPath=path + type + "TrainSize" + trainSize + "\\" + s + trainSize + ".csv"; f = new File(fPath); if (!f.exists()) { System.out.println("File " + s + trainSize + " does not exist on" + fPath+" skipping " + trainSize); continue; } //How many have we got? InFile inf = new InFile(fPath); int l = inf.countLines(); System.out.println(" File = "+fPath); System.out.println(trainSize + " has " + l + " lines"); inf = new InFile(fPath); double[][] vals = new double[l][numClassifiers]; double[][] diffs = new double[l][numClassifiers-1]; if(files[i].equals("mem")) { for (int j = 0; j < l; j++) { String[] line = inf.readLine().split(","); vals[j][0] = Double.parseDouble(line[6]); for (int k = 1; k <numClassifiers; k++) { vals[j][k] = Double.parseDouble(line[k + 6]); diffs[j][k - 1] = vals[j][k] - vals[j][0]; } } } else{ for (int j = 0; j < l; j++) { String[] line = inf.readLine().split(","); vals[j][0] = Double.parseDouble(line[1]); for (int k = 1; k < numClassifiers; k++) { vals[j][k] = Double.parseDouble(line[k + 1]); diffs[j][k - 1] = vals[j][k] - vals[j][0]; } } } //Find means double[] means = new double[numClassifiers]; double[] meanDiffs = new double[numClassifiers]; for (int k = 0; k < numClassifiers; k++) { means[k] = 0; for (int j = 0; j < l; j++) { means[k] += vals[j][k]; } means[k] /= l; } for (int k = 0; k < numClassifiers-1; k++) { meanDiffs[k] = 0; for (int j = 0; j < l; j++) { meanDiffs[k] += diffs[j][k]; } meanDiffs[k] /= l; } double[] confInterval = new double[numClassifiers]; double[] confIntervalDiffs = new double[numClassifiers]; for (int k = 0; k < numClassifiers; k++) { confInterval[k] = 0; for (int j = 0; j < l; j++) { confInterval[k] += (vals[j][k]-means[k])*(vals[j][k]-means[k]); } confInterval[k] /= l-1; confInterval[k]=Math.sqrt(confInterval[k]); confInterval[k]/=Math.sqrt(l); confInterval[k]*=1.96; } for (int k = 0; k < numClassifiers-1; k++) { confIntervalDiffs[k] = 0; for (int j = 0; j < l; j++) { confIntervalDiffs[k] += (diffs[j][k]- meanDiffs[k])*(diffs[j][k]- meanDiffs[k]); } confIntervalDiffs[k] /= (l-1); confIntervalDiffs[k]=Math.sqrt(confIntervalDiffs[k]); confIntervalDiffs[k]/=Math.sqrt(l); confIntervalDiffs[k]*=1.96; } //Write to file if(!s.equals("testTime")) out[i].writeString(trainSize + ""); else out[i].writeString((int)(0.9*(trainSize/0.1)) + ""); for (int k = 0; k < numClassifiers; k++) { out[i].writeString("," + means[k]); } out[i].writeString(","); for (int k = 0; k < numClassifiers; k++) { out[i].writeString("," + confInterval[k]); } out[i].writeString("\n"); if(!s.equals("testTime")) outDiffs[i].writeString(trainSize + ""); else outDiffs[i].writeString((int)(0.9*(trainSize/0.1)) + ""); for (int k = 0; k < numClassifiers-1; k++) { outDiffs[i].writeString("," + meanDiffs[k]); } outDiffs[i].writeString(","); for (int k = 0; k < numClassifiers-1; k++) { outDiffs[i].writeString("," + confIntervalDiffs[k]); } outDiffs[i].writeString("\n"); } for(int seriesLength=500;seriesLength<=10000;seriesLength+=500) { File test; int lines = 0; String fPath=path + type + "SeriesLength" + seriesLength + "\\" + s + seriesLength + ".csv"; f = new File(fPath); if (!f.exists()) { System.out.println("File " + s + seriesLength + " does not exist on" + fPath+" skipping " + seriesLength); continue; } //How many have we got? InFile inf = new InFile(fPath); int l = inf.countLines(); System.out.println(seriesLength + " has " + l + " lines"); inf = new InFile(fPath); double[][] vals = new double[l][numClassifiers]; double[][] diffs = new double[l][numClassifiers-1]; if(files[i].equals("mem")) { for (int j = 0; j < l; j++) { String[] line = inf.readLine().split(","); vals[j][0] = Double.parseDouble(line[6]); for (int k = 1; k <numClassifiers; k++) { vals[j][k] = Double.parseDouble(line[k + 6]); diffs[j][k - 1] = vals[j][k] - vals[j][0]; } } } else{ for (int j = 0; j < l; j++) { String[] line = inf.readLine().split(","); vals[j][0] = Double.parseDouble(line[1]); for (int k = 1; k < numClassifiers; k++) { vals[j][k] = Double.parseDouble(line[k + 1]); diffs[j][k - 1] = vals[j][k] - vals[j][0]; } } } //Find means double[] means = new double[numClassifiers]; double[] meanDiffs = new double[numClassifiers]; for (int k = 0; k < numClassifiers; k++) { means[k] = 0; for (int j = 0; j < l; j++) { means[k] += vals[j][k]; } means[k] /= l; } for (int k = 0; k < numClassifiers-1; k++) { meanDiffs[k] = 0; for (int j = 0; j < l; j++) { meanDiffs[k] += diffs[j][k]; } meanDiffs[k] /= l; } double[] confInterval = new double[numClassifiers]; double[] confIntervalDiffs = new double[numClassifiers]; for (int k = 0; k < numClassifiers; k++) { confInterval[k] = 0; for (int j = 0; j < l; j++) { confInterval[k] += (vals[j][k]-means[k])*(vals[j][k]-means[k]); } confInterval[k] /= l-1; confInterval[k]=Math.sqrt(confInterval[k]); confInterval[k]/=Math.sqrt(l); confInterval[k]*=1.96; } for (int k = 0; k < numClassifiers-1; k++) { confIntervalDiffs[k] = 0; for (int j = 0; j < l; j++) { confIntervalDiffs[k] += (diffs[j][k]- meanDiffs[k])*(diffs[j][k]- meanDiffs[k]); } confIntervalDiffs[k] /= (l-1); confIntervalDiffs[k]=Math.sqrt(confIntervalDiffs[k]); confIntervalDiffs[k]/=Math.sqrt(l); confIntervalDiffs[k]*=1.96; } //Write to file out[i].writeString(seriesLength + ""); for (int k = 0; k < numClassifiers; k++) { out[i].writeString("," + means[k]); } out[i].writeString(","); for (int k = 0; k < numClassifiers; k++) { out[i].writeString("," + confInterval[k]); } out[i].writeString("\n"); outDiffs[i].writeString(seriesLength + ""); for (int k = 0; k < numClassifiers-1; k++) { outDiffs[i].writeString("," + meanDiffs[k]); } outDiffs[i].writeString(","); for (int k = 0; k < numClassifiers-1; k++) { outDiffs[i].writeString("," + confIntervalDiffs[k]); } outDiffs[i].writeString("\n"); } } } public static void dictionarySimulatorThreadExperiment() throws Exception { Model.setDefaultSigma(1); boolean overwrite=false; int experiments=1; for(int seriesLength=300;seriesLength<=300;seriesLength+=300) { int[] casesPerClass = new int[2]; casesPerClass[0] = casesPerClass[1] = 100; int[] shapesPerClass = new int[]{5, 20}; double[] acc = new double[4]; long[] trainTime = new long[4]; long[] testTime = new long[4]; long[] mem = new long[4]; long t1, t2; String[] classifierNames = {"BOSS"};//, "cBOSS", "SpatialBOSS", "WEASEL"}; MemoryMXBean mx= ManagementFactory.getMemoryMXBean(); Notification notif; /* GarbageCollectorMXBean gc=mx. // receive the notification emitted by a GarbageCollectorMXBean and set to notif synchronized (mx){ mx.wait(); } notif=mx.get String notifType = "TESTY"; //notif.getType(); if (notifType.equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { // retrieve the garbage collection notification information CompositeData cd = (CompositeData) notif.getUserData(); GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from(cd); } */ for (int i = 0; i < experiments; i++) { Instances data = SimulateDictionaryData.generateDictionaryData(seriesLength, casesPerClass, shapesPerClass); Instances[] split = InstanceTools.resampleInstances(data, i, 0.2); System.out.println(" Testing thread model: series length =" + seriesLength + " Experiment Index" + i + " Train size =" + split[0].numInstances() + " test size =" + split[1].numInstances()); for (int j = 0; j < classifierNames.length; j++) { System.gc(); long memoryBefore = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); Classifier c = ClassifierLists.setClassifierClassic(classifierNames[j], i); t1 = System.nanoTime(); c.buildClassifier(split[0]); trainTime[j] = System.nanoTime() - t1; t1 = System.nanoTime(); acc[j] = ClassifierTools.accuracy(split[1], c); testTime[j] = System.nanoTime() - t1; System.gc(); mem[j] = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory() - memoryBefore; System.out.println("\t" + classifierNames[j] + " ACC = " + acc[j] + " Train Time =" + trainTime[j] + " Test Time = " + testTime[j] + " Memory = " + mem[j]); } } } } public static class ThreadExperiment implements Runnable{ Classifier c; Instances train; public ThreadExperiment(Classifier c, Instances train){ this.c=c; this.train=train; } @Override public void run() { try { c.buildClassifier(train); }catch(Exception e){ System.out.println("Classifier threw exception in Thread Experiment"); } } } }
63,350
41.574597
205
java
tsml-java
tsml-java-master/src/main/java/experiments/SingleProblemExperiments.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import tsml.classifiers.EnhancedAbstractClassifier; import weka.core.Instances; /** * Class for conducting experiments on a single problem * Wish list * 1. Cross validation: create a single output file with the cross validation predictions * 2. AUROC: take cross validation results and form the data for a AUROC plot * 3. Tuning: tune classifier on a train split * 4. Sensitivity: plot parameter space * 5. Robustness: performance with changing train set size (including acc estimates) */ public class SingleProblemExperiments { /** * Input, classifier, train set, test set, number of intervals (k) * * Train set will be resampled for k different train sizes at equally spaced intervals * Output to file results: TrainSize, TestAccActual, (TestAccEstimated, optional) */ public static void increasingTrainSetSize(EnhancedAbstractClassifier c, Instances train, Instances test, int nIntervals, String results){ // Work out intervals int fullLength=train.numInstances(); int interval = fullLength/(nIntervals-1); // } }
1,874
37.265306
141
java
tsml-java
tsml-java-master/src/main/java/experiments/TonyCollateResults.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import evaluation.MultipleEstimatorsPairwiseTest; import evaluation.MultipleEstimatorEvaluation; import fileIO.InFile; import fileIO.OutFile; import java.io.File; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import statistics.distributions.BinomialDistribution; import statistics.tests.OneSampleTests; import evaluation.storage.ClassifierResults; import experiments.data.DatasetLists; import java.util.HashMap; /** * Class to collate results from any classifier creating standard output * There are two ways to collate results. * 1. (Tony Bagnall) The code in this class creates summary info for individual classifiers. * It does not do comparisons between classifiers, and it will build with incomplete * data, ignoring incomplete data sets. This can be run on the cluster (see below). * See method individualClassifiersCollate() for example usage * 2 (James Large) Using the MultipleEstimatorEvaluation class, detailed * comparisons between classifier can be conducted. This can create matlab driven * critical difference diagrams **On the cluster usage:** * Class to collate standard results files over multiple classifiers and problems * Usage * (assuming Collate.jar has this as the main class): * java -jar Collate.jar ResultsDirectory/ ProblemDirectory/ NumberOfFolds Classifier1 Classifier2 .... ClassifierN NoParasC1 NoParasC2 .... NoParasCn * e.g. java -jar -Xmx6000m Collate.jar Results/ UCIContinuous/ 30 RandF RotF 2 2 * collates the results for 30 folds for RandF and RotF in the directory for Results * on all the problems in UCIContinous (as defined by having a directory in the folder) * How it works: * * Stage 1: take all the single fold files, work out the diagnostics on test data: * Accuracy, BalancedAccuracy, NegLogLikelihood, AUROC and F1 and store the TrainCV accuracy. * all done by call to collateFolds(); * Combine folds into a single file for each statistic in ResultsDirectory/ClassifierName * these are * Counts: counts.csv, number per problem (max number is NumberOfFolds, it does not check for more). * Diagnostics: TestAcc.csv, TestF1.csv, TestBAcc.csv, TestNLL.csv, TestAUROC.csv, TrainCVAcc.csv, Timings.csv * Parameter info: Parameter1.csv, Parameter2.csv...AllTuningAccuracies.csv (if tuning occurs, all tuning values). * * Stage 2: * Output: Classifier Summary: call to method averageOverFolds() * Creates average and standard deviation over all folds based on the * created at stage 1 with the addition of the mean difference per fold. All put in a single directory. * * Stage 3 * Final Comparison Summary: call to method basicSummaryComparisons(); * a single file in ResultsDirectory directory called summaryTests<ClassifierNames>.csv * contains pairwise comparisons of all the classifiers. * 1. All Pairwise Comparisons for TestAccDiff, TestAcc, TestBAcc, TestNLL.csv and TestAUROC * 1. Wins/Draws/Loses * 2. Mean (and std dev) difference * 3. Two sample tests of the mean values * * @author ajb **/ public class TonyCollateResults { public static File[] dir; static String basePath; static String[] classifiers; public static ArrayList<String> problems; static boolean readProblemNamesFromDir=true; static String problemPath; static int folds; static int numClassifiers; static int[] numParas; static DecimalFormat df=new DecimalFormat("##.######"); static double[][] data; static boolean countPartials=false; /** * Arguments: 1. ResultsDirectory/ 2. Either ProblemDirectory/ or ProblemFiles.csv or ProblemFiles.txt * Basically checks for an extension and if its there reads a file. * 3. NumberOfFolds 4-4+nosClassifiers Classifier1 Classifier2 .... ClassifierN 4+nosClassifiers+1 to 4+2*nosClassifiers NoParasC1 NoParasC2 .... NoParasCn * */ public static void readData(String[] args){ int numInitArgs=4; basePath=args[0]; System.out.println("Base path = "+basePath); problemPath=args[1]; System.out.println("Problem path = "+problemPath); folds =Integer.parseInt(args[2]); System.out.println("Folds = "+folds); String partial=args[3].toLowerCase(); if(partial.equals("true")) countPartials=true; else countPartials=false; numClassifiers=(args.length-numInitArgs)/2; classifiers=new String[numClassifiers]; for(int i=0;i<classifiers.length;i++) classifiers[i]=args[i+numInitArgs]; numParas=new int[classifiers.length]; for(int i=0;i<classifiers.length;i++) numParas[i]=Integer.parseInt(args[i+numInitArgs+classifiers.length]); //Get problem files from a directory if required if(readProblemNamesFromDir){ File f=new File(problemPath); problems=new ArrayList<>(); if(problemPath.contains(".txt") || problemPath.contains(".csv")){//Read from file if(!f.exists()) System.out.println("Error loading problems from file ="+problemPath); else{ InFile inf=new InFile(problemPath); String prob=inf.readLine(); while(prob!=null){ problems.add(prob); prob=inf.readLine(); } } } else{ if(!f.isDirectory()){ System.out.println("Error in problem path ="+problemPath); } dir=f.listFiles(); for(File p:dir){ if(p.isDirectory()){ problems.add(p.getName()); } } } Collections.sort(problems); } } /*Returns True if the file is present and correct Changed cos it is too slow at the moment */ public static boolean validateSingleFoldFile(String str){ File f= new File(str); if(f.exists()){ // Check 1: non zero if(f.length()==0){//Empty, delete file f.delete(); } else{ try{ /* InFile inf=new InFile(str); int c=inf.countLines(); if(c<=3){//No predictions, delete inf.closeFile(); f.delete(); return false; } inf.closeFile(); */ return true; }catch(Exception e){ System.out.println("Exception thrown trying to read file "+str); System.out.println("Exception = "+e+" THIS MAY BE A GOTCHA LATER"); e.printStackTrace(); return false; } //Something in there, it is up to ClassifierResults to validate the rest } } return false; } /** * Stage 1: take all the single fold files, work out the diagnostics on test data: Accuracy, BalancedAccuracy, NegLogLikelihood, AUROC and F1 and store the TrainCV accuracy. all done by call to collateFolds(); Combine folds into a single file for each statistic in ResultsDirectory/ClassifierName these are Counts: counts.csv, number per problem (max number is NumberOfFolds, it does not check for more). Diagnostics: TestAcc.csv, TestF1.csv, TestBAcc.csv, TestNLL.csv, TestAUROC.csv, TrainCVAcc.csv Timings: Timings.csv Memory: Memory.csv Parameter info: Parameter1.csv, Parameter2.csv...AllTuningAccuracies.csv (if tuning occurs, all tuning values). */ public static int MAXNUMPARAS=1180; public static void collateFolds(){ // String[] allStats={"TestAcc","TrainCVAcc","TestNLL","TestBACC","TestAUROC","TestF1"}; for(int i=0;i<classifiers.length;i++){ String cls=classifiers[i]; System.out.println("Processing classifier ="+cls); File f=new File(basePath+cls); if(f.isDirectory()){ //Check classifier directory exists. System.out.println("Base path "+basePath+cls+" exists"); File stats=new File(basePath+cls+"/SummaryStats"); if(!stats.isDirectory()) stats.mkdir(); String filePath=basePath+cls+"/SummaryStats/"; OutFile clsResults=new OutFile(filePath+cls+"TestAcc.csv"); OutFile f1Results=new OutFile(filePath+cls+"TestF1.csv"); OutFile BAccResults=new OutFile(filePath+cls+"TestBAcc.csv"); OutFile nllResults=new OutFile(filePath+cls+"TestNLL.csv"); OutFile AUROCResults=new OutFile(filePath+cls+"TestAUROC.csv"); OutFile trainResults=new OutFile(filePath+cls+"TrainCVAcc.csv"); OutFile[] paraFiles=null; if(numParas[i]>0){ paraFiles=new OutFile[numParas[i]]; for(int j=0;j<paraFiles.length;j++) paraFiles[j]=new OutFile(filePath+cls+"Parameter"+(j+1)+".csv"); } OutFile timings=new OutFile(filePath+cls+"Timings.csv"); OutFile mem=new OutFile(filePath+cls+"Memory.csv"); OutFile allAccSearchValues=new OutFile(filePath+cls+"AllTuningAccuracies.csv"); OutFile missing=null; OutFile counts=new OutFile(filePath+cls+"Counts.csv"); OutFile partials=null; if(countPartials) partials=new OutFile(filePath+cls+"PartialCounts.csv");; OutFile of = new OutFile(filePath+cls+"Corrupted.csv"); int missingCount=0; for(String name:problems){ //Write collated results for this classifier to a single file OutFile mergedResults=new OutFile(filePath+cls+"AllTestPrediction"+name+".csv"); clsResults.writeString(name); trainResults.writeString(name); f1Results.writeString(name); BAccResults.writeString(name); nllResults.writeString(name); AUROCResults.writeString(name); allAccSearchValues.writeString(name); timings.writeString(name); mem.writeString(name); if(numParas[i]>0){ for(OutFile out:paraFiles) out.writeString(name+","); } //GAVIN HACK // String path=basePath+cls+"/"+name+"/results/"; String path=basePath+cls+"//Predictions//"+name; if(missing!=null && missingCount>0) missing.writeString("\n"); missingCount=0; if(countPartials) partials.writeString(name); int caseCount=0; for(int j=0;j<folds;j++){ //Check fold exists and is a valid file boolean valid=validateSingleFoldFile(path+"//testFold"+j+".csv"); if(valid){ //This could fail if file only has partial probabilities on the line //Read in test accuracy and store //Check fold exists //Read in test accuracy and store InFile inf=null; String[] trainRes=null; try{ inf=new InFile(path+"//testFold"+j+".csv"); inf.readLine();//First line, problem info trainRes=inf.readLine().split(",");//Stores train CV and parameter info clsResults.writeString(","+inf.readDouble()); timings.writeString(","+inf.readDouble()); if(trainRes.length>1){//There IS parameter info int pos=1; for(int k=0;k<numParas[i];k++){ if(trainRes.length>pos){ paraFiles[k].writeString(trainRes[pos]+","); pos+=2; } else paraFiles[k].writeString(","); } // write the rest to the para search file while(pos<trainRes.length) allAccSearchValues.writeString(","+trainRes[pos++]); } else{ // trainResults.writeString(","); for(int k=0;k<numParas[i];k++) paraFiles[k].writeString(","); } //Read in the rest into a ClassifierResults object inf.closeFile(); // inf.openFile(path+"//testFold"+j+".csv"); // int temp=(inf.countLines()-3); // inf.closeFile(); // System.out.println("Number of items in bag "+(j+1)+" = "+temp); // caseCount+=temp; ClassifierResults res=new ClassifierResults(); res.loadResultsFromFile(path+"//testFold"+j+".csv"); mergedResults.writeLine(res.instancePredictionsToString()); res.findAllStats(); f1Results.writeString(","+res.f1); BAccResults.writeString(","+res.balancedAcc); nllResults.writeString(","+res.nll); AUROCResults.writeString(","+res.meanAUROC); }catch(Exception e){ System.out.println(" Error "+e+" in "+path); if(trainRes!=null){ System.out.println(" second line read has "+trainRes.length+" entries :"); for(String str:trainRes) System.out.print(str+","); of.writeLine(name+","+j); e.printStackTrace(); System.exit(1); } }finally{ if(inf!=null) inf.closeFile(); } if(countPartials) partials.writeString(",0"); } else{ if(missing==null) missing=new OutFile(filePath+cls+"MissingFolds.csv"); if(missingCount==0) missing.writeString(name); missingCount++; missing.writeString(","+j); if(countPartials){ //Fold j missing, count here how many parameters are complete on it int x=0; for(int k=1;k<MAXNUMPARAS;k++){ if(validateSingleFoldFile(path+"//fold"+j+"_"+k+".csv")) x++; } if(countPartials) partials.writeString(","+x); } } } // System.out.println(" Total number of cases ="+caseCount); counts.writeLine(name+","+(folds-missingCount)); if(countPartials) partials.writeString("\n"); clsResults.writeString("\n"); trainResults.writeString("\n"); f1Results.writeString("\n"); BAccResults.writeString("\n"); nllResults.writeString("\n"); AUROCResults.writeString("\n"); timings.writeString("\n"); allAccSearchValues.writeString("\n"); for(int k=0;k<numParas[i];k++) paraFiles[k].writeString("\n"); } clsResults.closeFile(); trainResults.closeFile(); for(int k=0;k<numParas[i];k++) paraFiles[k].closeFile(); } else{ System.out.println("Classifier "+cls+" has no results directory: "+basePath+cls); System.out.println("Exit "); System.exit(0); } } } /** Stage 2: Output: Classifier Summary: call to method averageOverFolds() Creates average and standard deviation over all folds based on the files created at stage 1 with the addition of the mean difference per fold. All put in a single directory. * **/ public static void averageOverFolds(){ String name=classifiers[0]; for(int i=1;i<classifiers.length;i++) name+=classifiers[i]; String filePath=basePath+name+"/"; if(classifiers.length==1) filePath+="SummaryStats/"; File nf=new File(filePath); if(!nf.isDirectory()) nf.mkdirs(); String[] allStats={"MeanTestAcc","MeanTrainCVAcc","MeanTestNLL","MeanTestBAcc","MeanTestAUROC","MeanTestF1","MeanTimings"}; String[] testStats={"TestAcc","TrainCVAcc","TestNLL","TestBAcc","TestAUROC","TestF1","Timings"}; OutFile[] means=new OutFile[allStats.length]; for(int i=0;i<means.length;i++) means[i]=new OutFile(filePath+allStats[i]+name+".csv"); OutFile[] stDev=new OutFile[allStats.length]; for(int i=0;i<stDev.length;i++) stDev[i]=new OutFile(filePath+allStats[i]+"StDev"+name+".csv"); OutFile count=new OutFile(filePath+"Counts"+name+".csv"); //Headers for(int i=0;i<classifiers.length;i++){ for(OutFile of:means) of.writeString(","+classifiers[i]); for(OutFile of:stDev) of.writeString(","+classifiers[i]); count.writeString(","+classifiers[i]); } for(OutFile of:means) of.writeString("\n"); for(OutFile of:stDev) of.writeString("\n"); count.writeString("\n"); //Do counts first InFile[] allClassifiers=new InFile[classifiers.length]; for(int i=0;i<allClassifiers.length;i++){ String str=basePath+classifiers[i]+"/SummaryStats/"+classifiers[i]; System.out.println("Loading "+str+"Counts.csv"); String p=str+"Counts.csv"; if(new File(p).exists()) allClassifiers[i]=new InFile(p); else{ allClassifiers[i]=null;//superfluous System.out.println("File "+p+" does not exist"); } } for(String str:problems){ count.writeString(str); for(int i=0;i<allClassifiers.length;i++){ if(allClassifiers[i]!=null){ allClassifiers[i].readString(); count.writeString(","+allClassifiers[i].readInt()); } else{ count.writeString(","); } } count.writeString("\n"); } for(int j=0;j<allStats.length;j++){ //Open files with data for all folds for(int i=0;i<allClassifiers.length;i++){ String str=basePath+classifiers[i]+"/SummaryStats/"+classifiers[i]; String p=str+testStats[j]+".csv"; if(new File(p).exists()) allClassifiers[i]=new InFile(p); else{ allClassifiers[i]=null;//superfluous System.out.println("File "+p+" does not exist"); } } //Find means for(String str:problems){ means[j].writeString(str); stDev[j].writeString(str); String prev="First"; for(int i=0;i<allClassifiers.length;i++){ if(allClassifiers[i]==null){ means[j].writeString(","); stDev[j].writeString(","); } else{//Find mean try{ String r=allClassifiers[i].readLine(); String[] res=r.split(","); double mean=0; double sumSquare=0; for(int m=1;m<res.length;m++){ double d=Double.parseDouble(res[m].trim()); mean+=d; sumSquare+=d*d; } if(res.length>1){ int size=(res.length-1); mean=mean/size; double stdDev=sumSquare/size-mean*mean; stdDev=Math.sqrt(stdDev); means[j].writeString(","+df.format(mean)); stDev[j].writeString(","+df.format(stdDev)); } else{ means[j].writeString(","); stDev[j].writeString(","); } prev=r; }catch(Exception ex){ System.out.println("failed to read line: "+ex+" previous line = "+prev+" file index ="+j+" classifier index ="+i); } } } means[j].writeString("\n"); stDev[j].writeString("\n"); if(j==0) count.writeString("\n"); } for(InFile inf:allClassifiers) if(inf!=null) inf.closeFile(); } } public static void basicSummaryComparisons(){ //Only compares first two DecimalFormat df = new DecimalFormat("###.#####"); if(classifiers.length<=1) return; String name=classifiers[0]; for(int i=1;i<classifiers.length;i++) name+=classifiers[i]; OutFile s=new OutFile(basePath+"summaryTests"+name+".csv"); String[] allStatistics={"TestAcc","TestBAcc","TestNLL","TestAUROC"}; data=new double[problems.size()][classifiers.length]; s.writeLine(name); for(String str:allStatistics){ s.writeLine("**************"+str+"********************"); System.out.println("Loading "+basePath+name+"/"+str+name+".csv"); InFile f=new InFile(basePath+name+"/"+str+name+".csv"); f.readLine(); for(int i=0;i<problems.size();i++){ String ss=f.readLine(); String[] d=ss.split(","); for(int j=0;j<classifiers.length;j++) data[i][j]=-1; for(int j=0;j<d.length-1;j++){ try{ double v=Double.parseDouble(d[j+1]); data[i][j]=v; }catch(Exception e){ // yes yes I know its horrible, but this is text parsing, not rocket science // System.out.println("No entry for classifier "+j); } } // for(int j=0;j<classifiers.length;j++) // System.out.println(" Classifier "+j+" has data "+data[i][j]); } for(int x=0;x<classifiers.length-1;x++){ for (int y=x+1; y < classifiers.length; y++) {//Compare x and y int wins=0,draws=0,losses=0; int sigWins=0,sigLosses=0; double meanDiff=0; double sumSq=0; double count=0; for(int i=0;i<problems.size();i++){ if(data[i][x]!=-1 && data[i][y]!=-1){ if(data[i][x]>data[i][y]) wins++; else if(data[i][x]==data[i][y]) draws++; else losses++; meanDiff+=data[i][x]-data[i][y]; sumSq+=(data[i][x]-data[i][y])*(data[i][x]-data[i][y]); count++; } } // DecimalFormat df = new DecimalFormat("##.#####"); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",WIN/DRAW/LOSE,"+wins+","+draws+","+losses); BinomialDistribution bin=new BinomialDistribution(); bin.setParameters(wins+losses,0.5); double p=bin.getCDF(wins); if(p>0.5) p=1-p; s.writeLine(str+","+classifiers[x]+","+classifiers[y]+",WIN/DRAW/LOSE,"+wins+","+draws+","+losses+", p =,"+df.format(p)); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",COUNT,"+count+",MeanDiff,"+df.format(meanDiff/count)+",StDevDiff,"+df.format((sumSq-(meanDiff*meanDiff)/count))+" p ="+df.format(p)); //3. Find out how many are statistically different within folds //Do paired T-tests from fold files InFile first=new InFile(basePath+classifiers[x]+"/"+classifiers[x]+str+".csv"); InFile second=new InFile(basePath+classifiers[y]+"/"+classifiers[y]+str+".csv"); for(int i=0;i<problems.size();i++){ //Read in both: Must be the same number to proceed String[] probX=first.readLine().split(","); String[] probY=second.readLine().split(","); if(probX.length<=folds || probY.length<=folds) continue; //Skip this problem double[] diffs=new double[folds]; boolean notAllTheSame=false; for(int j=0;j<folds;j++){ diffs[j]=Double.parseDouble(probX[j+1])-Double.parseDouble(probY[j+1]); if(!notAllTheSame && !probX[j+1].equals(probY[j+1])) notAllTheSame=true; } if(notAllTheSame){ OneSampleTests test=new OneSampleTests(); String res=test.performTests(diffs); System.out.println("Results = "+res); String[] results=res.split(","); double tTestPValue=Double.parseDouble(results[2]); if(tTestPValue>=0.95) sigWins++; else if(tTestPValue<=0.05) sigLosses++; } else System.out.println("**************ALL THE SAME problem = "+probX[0]+" *************"); } s.writeLine(str+","+classifiers[x]+","+classifiers[y]+",SIGWIN/SIGLOSS,"+sigWins+","+sigLosses); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",SIGWIN/SIGLOSS,"+sigWins+","+sigLosses); //2. Overall mean difference s.writeLine(str+","+classifiers[x]+","+classifiers[y]+",COUNT,"+count+",MeanDiff,"+df.format(meanDiff/count)+",StDevDiff,"+df.format((sumSq-(meanDiff*meanDiff)/count))); System.out.println(str+","+classifiers[x]+","+classifiers[y]+",COUNT,"+count+",MeanDiff,"+df.format(meanDiff/count)+",StDevDiff,"+df.format((sumSq-(meanDiff*meanDiff)/count))); } } //Do pairwise tests over all common datasets. //1. First need to condense to remove any with one missing ArrayList<double[]> res=new ArrayList<>(); for(int i=0;i<data.length;i++){ int j=0; while(j<data[i].length && data[i][j]!=-1) j++; if(j==data[i].length) res.add(data[i]); } System.out.println("REDUCED DATA SIZE = "+res.size()); double[][] d2=new double[res.size()][]; for(int i=0;i<res.size();i++) d2[i]=res.get(i); //2. Do pairwise tests StringBuilder resultsString= MultipleEstimatorsPairwiseTest.runSignRankTest(d2,classifiers); s.writeString(resultsString.toString()); System.out.println(resultsString); } s.closeFile(); } public static void collate(String[] args){ //STAGE 1: Read from arguments, find problems readData(args); System.out.println(" number of classifiers ="+numClassifiers); //STAGE 2: Collate the individual fold files into one System.out.println("Collating folds ...."); collateFolds(); System.out.println("Collate folds finished. \n Averaging over folds...."); //STAGE 3: Summarise over folds averageOverFolds(); System.out.println("averaging folds finished.\n Basic stats comparison ...."); //STAGE 4: Do statical comparisons basicSummaryComparisons(); } /** * *First argument: String path to results directories * Second argument: path to directory with problem allStats to look for * Third argument: number of folds * Next x arguments: x Classifiers to collate * Next x arguments: number of numParas stored for each classifier **/ public static void singleClassifiersFullStats(String[] args) throws Exception{ if(args.length>1) collate(args); else{ String[] classifiers={"TSF"};//,"EE","RISE","ST","BOSS"}; for(String classifier:classifiers){ String parameters="0"; String[] str={"E:\\Results\\Bakeoff Redux\\", "Z:\\ArchiveData\\Univariate_arff\\","30","false",classifier,parameters}; //Change this to read an array collate(str); } } } public static void shapeletResultsSummary() throws Exception{ String[] problems={""}; String[] classifiers={""}; String readPath=""; //Where to put them, directory name, number of folds MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation("E://Results//somewhere", "OneHour", 1); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); m.setDatasets(problems); m.readInEstimators(classifiers, readPath); m.runComparison(); } /** * Usage of MultipleEstimatorEvaluation. See the class for more info * @throws Exception */ public static void multipleClassifierFullStats(String[] args) throws Exception{ if(args.length>0){ //TO DO } else{ //Example manual setting //Where to put them, directory name, number of folds MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation("E://Results//UCI//Analysis//", "Tuned", 5); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); m.setDatasets(Arrays.copyOfRange(experiments.data.DatasetLists.UCIContinuousWithoutBigFour, 0, 117)); m.readInEstimators(new String[] {"MLP2","SVMRBF","SVMP","RandF","RotF","XGBoost"}, "E://Results/UCI/Tuned"); m.runComparison(); } } public static final String bakeOffPathBeast="Z:/ReferenceResults/CollatedResults/Bakeoff2015/byClassifier/"; public static final String hiveCotePathBeast="Z:/ReferenceResults/CollatedResults/HIVE-COTE2017/"; public static final String reduxPathBeast="Z:/ReferenceResults/CollatedResults/BakeoffRedux2019/"; public static final String bakeOffPathCluster="/gpfs/home/ajb/Results/ReferenceResults/Bakeoff2015/ByClassifier/"; public static final String hiveCotePathCluster="/gpfs/home/ajb/Results/ReferenceResults/HIVE-COTE2017/"; public static final String reduxPathCluster="/gpfs/home/ajb/Results/ReferenceResults/BakeoffRedux2019/"; public static String bakeOffPath=bakeOffPathBeast; public static String hiveCotePath=hiveCotePathBeast; public static String reduxPath=reduxPathBeast; /** * Quick in place collation and comparison to reference results * @param args * @throws Exception * Primary: these results are built from file using predictions or just reading line 3 * para 1: String: location of primary results, including classifier name * para 2: Boolean stored as string: true: calculate acc from preds and check. False: just read from line 3. * Para 3: Integer stored as string: number of folds to look for * OPTIONAL: these results are read directly, can have as many as desired * Input format ProblemSource,ClassifierName. Problem source must be Bakeoff, HIVE-COTE, or Redux * para 3: comparison classifier TYPE,NAME 1 * para 4: comparison classifier full path 2 * .. * Notes: * 1. Only uses accuracy, does not require classes map 0... numClasses-1 or probabilities. * 2. Assumes file structure is arg[0]/Predictions/ProblemName/testFold0.csv * 3. Assumes every directory in Predictions is a results folder * 4. For the fold averages, it ignores any problem without a full set of results, will print the results as empty * 5. Prints results to arg[0]/QuickResults/TrainTest<classifierName>.csv, */ public static void singleClassifiervsReferenceResults(String[] args) throws Exception{ if(args.length<4){ String input=""; for(String s:args) input+=s+" "; throw new Exception("Wrong input args =:"+input); } for(int i=0;i<args.length;i++){ System.out.println("args["+i+"] = "+args[i]); } String fullPath=args[0]; String[] temp=args[0].split("/"); String classifierName=temp[temp.length-1]; System.out.println(" Primary Classifier = "+classifierName); boolean calcAcc=Boolean.parseBoolean(args[1]); System.out.println(" Recalculate accuracies from file? = "+calcAcc); folds=Integer.parseInt(args[2]); System.out.println("Folds = "+folds); boolean oldFormat=Boolean.parseBoolean(args[3]); System.out.println(" Use old format ? = "+oldFormat); File f= new File(fullPath+"/QuickResults"); f.mkdirs(); //Get primary results ArrayList<String> problems =new ArrayList<>(); ArrayList<String> missing =new ArrayList<>(); f=new File(fullPath+"/Predictions"); System.out.println(fullPath+"/Predictions"); File[] fileList=f.listFiles(); System.out.println("File names in "+fullPath+"/Predictions : has "+f.length()+" files "); for(File t:fileList){ System.out.println("\t"+t.getName()); if(t.isDirectory()){ // Note 3: assume all dirs are problems problems.add(t.getName()); } } Collections.sort(problems); double[] trainTest= new double[problems.size()]; double[] trainTestTime= new double[problems.size()]; double[] means= new double[problems.size()]; double[][] allFolds= new double[problems.size()][folds]; double[] meansTime= new double[problems.size()]; double[][] allFoldsTime= new double[problems.size()][folds]; OutFile trTsFile=new OutFile(fullPath+"/QuickResults/TrainTest"+classifierName+".csv"); OutFile meansFile=new OutFile(fullPath+"/QuickResults/Average"+folds+classifierName+".csv"); OutFile allFoldsFile=new OutFile(fullPath+"/QuickResults/AllFolds"+classifierName+".csv"); OutFile trTsTimesFile=new OutFile(fullPath+"/QuickResults/TimesTrainTest"+classifierName+".csv"); OutFile meanTimesFile=new OutFile(fullPath+"/QuickResults/TimeAverage"+folds+classifierName+".csv"); OutFile allFoldsTimesFile=new OutFile(fullPath+"/QuickResults/TimeAllFolds"+classifierName+".csv"); OutFile trainFileCount=new OutFile(fullPath+"/QuickResults/trainFileCount"+classifierName+".csv"); InFile inf=null; boolean readTimes=true; for(int i=0;i<trainTest.length;i++){ System.out.println("Processing "+problems.get(i)); int trainCount=0; boolean cont=true; for(int j=0;j<folds && cont;j++){ try{ inf=new InFile(fullPath+"/Predictions/"+problems.get(i)+"/testFold"+j+".csv"); inf.readLine();//Ignore first two String secondLine=inf.readLine(); String[] split; String[] secondSplit=secondLine.split(","); String thirdLine=inf.readLine(); String[] thirdSplit=thirdLine.split(","); //Under the old format, the time is the second argument of line 2 //Under the new format, the time is double time=0; try{ if(oldFormat){ time=Double.parseDouble(secondSplit[1]); }else{ time=Double.parseDouble(thirdSplit[1]); } }catch(Exception e){ System.out.println("Error reading in times for base classifier, oldFormat="+oldFormat+" may be wrong"); System.out.println("Continue without timing"); readTimes=false; } double acc=Double.parseDouble(thirdSplit[0]);// if(calcAcc){ String line=inf.readLine(); double a=0; int count=0; while(line!=null){ split=line.split(","); count++; if(split[0].equals(split[1])) a++; line=inf.readLine(); } if(count>0) a/=count; if((a-acc)>0.000000001){ System.out.println("Mismatch in acc read from file and acc calculated from file"); System.out.println("THIS NEEDS INVESTIGATING. Abandoning the whole problem compilation "); System.exit(1); } } if(j==0){ trainTest[i]=acc; trainTestTime[i]=time; } allFolds[i][j]=acc; allFoldsTime[i][j]=time; File tr_f=new File(fullPath+"/Predictions/"+problems.get(i)+"/trainFold"+j+".csv"); if(tr_f.exists()){//Train fold present trainCount++; } }catch(Exception e){ missing.add(problems.get(i)); System.out.println("Some error processing "+fullPath+"/Predictions/"+problems.get(i)+"/testFold"+j+".csv"); System.out.println(" Abandoning entire problem "+problems.get(i)); cont=false; } finally{ if(inf!=null) inf.closeFile(); } } if(cont){//Should have all the data trTsFile.writeString(problems.get(i)); meansFile.writeString(problems.get(i)); allFoldsFile.writeString(problems.get(i)); trainFileCount.writeLine(problems.get(i)+","+trainCount); trTsFile.writeString(","+trainTest[i]); means[i]=0; for(int j=0;j<allFolds[i].length;j++){ allFoldsFile.writeString(","+allFolds[i][j]); means[i]+=allFolds[i][j]; } means[i]/=folds; meansFile.writeString(","+means[i]); trTsFile.writeString("\n"); meansFile.writeString("\n"); allFoldsFile.writeString("\n"); if(readTimes){ trTsTimesFile.writeString(problems.get(i)); meanTimesFile.writeString(problems.get(i)); allFoldsTimesFile.writeString(problems.get(i)); trTsTimesFile.writeString(","+trainTestTime[i]); meansTime[i]=0; for(int j=0;j<allFolds[i].length;j++){ allFoldsTimesFile.writeString(","+allFoldsTime[i][j]); meansTime[i]+=allFoldsTime[i][j]; } meansTime[i]/=folds; meanTimesFile.writeString(","+meansTime[i]); trTsTimesFile.writeString("\n"); meanTimesFile.writeString("\n"); allFoldsTimesFile.writeString("\n"); } }else{//Write trainTest if present if(trainTest[i]>0){ //Captured fold 0, lets use it trTsFile.writeLine(problems.get(i)+","+trainTest[i]); if(readTimes) trTsTimesFile.writeString(problems.get(i)); } } } if(args.length>4){ //Going to compare to some others String[] rc=new String[args.length-4]; for(int i=4;i<args.length;i++) rc[i-4]=args[i]; System.out.println("Comparing "+classifierName+" to "); String[][] classifiers=new String[rc.length][]; for(int i=0;i<rc.length;i++) classifiers[i]=rc[i].split(","); ArrayList<HashMap<String,Double>> trainTestResults=new ArrayList<>(); ArrayList<HashMap<String,Double>> averageResults=new ArrayList<>(); for(int i=0;i<classifiers.length;i++){ classifiers[i][0]=classifiers[i][0].toUpperCase(); System.out.println(classifiers[i][0]+"_"+classifiers[i][1]); HashMap<String,Double> trTest=new HashMap<>(); HashMap<String,Double> averages=new HashMap<>(); //Look for train results String path=""; switch(classifiers[i][0]){ case "BAKEOFF": path=bakeOffPath; break; case "HIVE-COTE": path=hiveCotePath; break; case "REDUX": path=reduxPath; break; default: System.out.println("UNKNOWN LOCATION INDICATOR "+classifiers[i][0]); throw new Exception("UNKNOWN LOCATION INDICATOR "+classifiers[i][0]); } f=new File(path+"TrainTest/TrainTest"+classifiers[i][1]+".csv"); if(f.exists()){ inf=new InFile(path+"TrainTest/TrainTest"+classifiers[i][1]+".csv"); String line=inf.readLine(); while(line!=null){ String[] split=line.split(","); String prob=split[0]; if(prob.equals("CinCECGtorso"))//Hackhackityhack: legacy problem prob="CinCECGTorso"; if(prob.equals("StarlightCurves"))//Hackhackityhack: legacy problem prob="StarLightCurves"; if(prob.equals("NonInvasiveFatalECGThorax1"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax1"; if(prob.equals("NonInvasiveFatalECGThorax2"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax2"; Double d = Double.parseDouble(split[1]); trTest.put(prob,d); line=inf.readLine(); } } f=new File(path+"Average30/Average30"+classifiers[i][1]+".csv"); if(f.exists()){ inf=new InFile(path+"Average30/Average30"+classifiers[i][1]+".csv"); // inf.readLine(); String line=inf.readLine(); while(line!=null){ String[] split=line.split(","); String prob=split[0]; if(prob.equals("CinCECGtorso"))//Hackhackityhack: legacy problem prob="CinCECGTorso"; if(prob.equals("StarlightCurves"))//Hackhackityhack: legacy problem prob="StarLightCurves"; if(prob.equals("NonInvasiveFatalECGThorax1"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax1"; if(prob.equals("NonInvasiveFatalECGThorax2"))//Hackhackityhack: legacy problem prob="NonInvasiveFetalECGThorax2"; Double d = Double.parseDouble(split[1]); averages.put(prob,d); line=inf.readLine(); } } trainTestResults.add(trTest); averageResults.add(averages); } trTsFile=new OutFile(fullPath+"/QuickResults/CompareTrainTest"+classifierName+".csv"); OutFile trTsFileComplete=new OutFile(fullPath+"/QuickResults/CompareTrainTestCompleteOnly"+classifierName+".csv"); meansFile=new OutFile(fullPath+"/QuickResults/CompareAverage"+folds+"_"+classifierName+".csv"); OutFile meansFileComplete=new OutFile(fullPath+"/QuickResults/CompareAverageCompleteOnly"+classifierName+".csv"); trTsFile.writeString("Problem,"+classifierName); meansFile.writeString("Problem,"+classifierName); meansFileComplete.writeString("Problem,"+classifierName); trTsFileComplete.writeString("Problem,"+classifierName); for(int i=0;i<classifiers.length;i++){ trTsFile.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); meansFile.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); meansFileComplete.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); trTsFileComplete.writeString(","+classifiers[i][0]+"_"+classifiers[i][1]); } trTsFile.writeString("\n"); meansFile.writeString("\n"); meansFileComplete.writeString("\n"); trTsFileComplete.writeString("\n"); for(int i=0;i<problems.size();i++){ String name=problems.get(i); boolean present=true; //Train test if(trainTest[i]>0){ //Captured fold 0, lets use it String line=name+","+trainTest[i]; trTsFile.writeString(name+","+trainTest[i]); for(int j=0;j<classifiers.length;j++){ HashMap<String,Double> trTest=trainTestResults.get(j); if(trTest.containsKey(name)){ Double x=trTest.get(name); trTsFile.writeString(","+x); line+=","+x; } else{ trTsFile.writeString(","); present=false; } } trTsFile.writeString("\n"); if(present) trTsFileComplete.writeLine(line); } //Averages if(!missing.contains(name)){ String line=name+","+means[i]; meansFile.writeString(name+","+means[i]); for(int j=0;j<classifiers.length;j++){ HashMap<String,Double> av=averageResults.get(j); if(av.containsKey(name)){ Double x=av.get(name); meansFile.writeString(","+x); line+=","+x; } else{ meansFile.writeString(","); present=false; } } meansFile.writeString("\n"); if(present) meansFileComplete.writeLine(line); } } } } static String[] makeQuickStatsArgs(String primary, boolean calcAcc, int folds, String...others) throws Exception{ String[] input; if(others==null) input=new String[4]; else input=new String[4+others.length]; input[0]=primary; input[1]=calcAcc+""; input[2]=folds+""; input[3]="false"; if(others!=null) for(int i=0;i<others.length;i++) input[i+4]=others[i]; return input; } public static void quickStats(String primary, boolean calcAcc, int folds, String...others) throws Exception{ String[] input; if(others==null) input=new String[3]; else input=new String[3+others.length]; input[0]=primary; input[1]=calcAcc+""; input[2]=folds+""; if(others!=null) for(int i=0;i<others.length;i++) input[i+3]=others[i]; singleClassifiervsReferenceResults(input); } public static void oneOffCollate() throws Exception{ String[] classifierList={"RotF_Josh","RotF_Markus"};//sktime1","sktime2"};//,"RotF","DTWCV"};{"PythonTSF","PythonTSFComposite", String readPath="E://Temp/Python/"; //Where to put them, directory name, number of folds MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation("E:/Temp/RotFDebug/","rotf_debug",1); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); m.setDatasets(DatasetLists.ReducedUCI); m.setTestResultsOnly(true); m.readInEstimators(classifierList,readPath); classifierList=new String[]{"RotF"}; readPath="E://Temp/"; m.readInEstimators(classifierList,readPath); m.runComparison(); } public static void rotFDebug() throws Exception{ String[] classifierList={"RotFMarkus","RotF"};//sktime1","sktime2"};//,"RotF","DTWCV"};{"PythonTSF","PythonTSFComposite", String readPath="Z://RotFDebug/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation("Z:/RotFDebug/","markus_no_norm",30); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=new String[DatasetLists.ReducedUCI.length+DatasetLists.tscProblems112.length]; System.arraycopy(DatasetLists.ReducedUCI,0,allProbs,0,DatasetLists.ReducedUCI.length); System.arraycopy(DatasetLists.tscProblems112,0,allProbs,DatasetLists.ReducedUCI.length,DatasetLists.tscProblems112.length); m.setDatasets(allProbs); m.setTestResultsOnly(true); m.readInEstimators(classifierList,readPath); // classifierList=new String[]{"RandF","RotF"};//sktime1","sktime2"};//,"RotF","DTWCV"};{"PythonTSF","PythonTSFComposite", // readPath="E://Results/RotFDebug/UCINorm/"; // m.readInClassifiers(classifierList,readPath); m.runComparison(); } public static void bakeOffRedux() throws Exception{ String[] classifierList={"EE","BOSS","TSF","RISE","STC"}; //"DTWCV","STC","EE","BOSS","TSF","RISE","HIVE-COTE"};//sktime1","sktime2"};//,"RotF","DTWCV"};{"PythonTSF","PythonTSFComposite", String readPath="Z:/ReferenceResults/Hive Cote Components REDUX/"; //Where to put them, directory name, number of folds MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath,"bakeoff_redux",30); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems78; m.setDatasets(allProbs); m.setTestResultsOnly(true); m.readInEstimators(classifierList,readPath); classifierList=new String[]{"FlatCote"};//sktime1","sktime2"};//,"RotF","DTWCV"};{"PythonTSF","PythonTSFComposite", readPath="E:/Results Working Area/Hybrids/"; m.readInEstimators(classifierList,readPath); m.runComparison(); } public static void compareDistanceBased(int folds, boolean testOnly) throws Exception { String readPath="E:/Results Working Area/DistanceBased/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "distance_compare_"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); m.setTestResultsOnly(testOnly); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] benchmark={"EE","PF","FastEE"}; m.readInEstimators(benchmark,readPath); m.runComparison(); } public static void compareDictionary(int folds, boolean testOnly) throws Exception { MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation("E:/Results Working Area/DictionaryBased/", "dictionary_compare_with_csBOSS",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(false); m.setUseAllStatistics(); // String[] allProbs=DatasetLists.newProblems27; String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(testOnly); String readPath="E:/Results Working Area/DictionaryBased/"; String[] classifierList={"BOSS","S-BOSS","WEASEL","cBOSS","cS-BOSS"};//,};//," m.readInEstimators(classifierList,readPath); m.runComparison(); boolean recalAcc=false; //These for quick stats bakeOffPath=bakeOffPathBeast; hiveCotePath=hiveCotePathBeast; reduxPath= reduxPathBeast; for(String cls:classifierList) { // String comparisons=null; String[] comparisons = {"HIVE-COTE,BOSS", "HIVE-COTE,HIVE-COTE"}; String[] args = makeQuickStatsArgs(readPath + cls, recalAcc, folds, comparisons); singleClassifiervsReferenceResults(args); } problems=new ArrayList<>(); readProblemNamesFromDir=false; for(String str:allProbs) problems.add(str); for(String cls:classifierList) { String parameters = "0"; String[] str = {readPath, "Z:\\ArchiveData\\Univariate_arff\\", folds + "", "false", cls, parameters}; //Change this to read an array collate(str); } } public static void compareHiveCoteVariants() throws Exception { int folds=30; String readPath="E:/Results Working Area/HC Variants/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "hc_compare_"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems85; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] newClassifiers={"HC-Alpha4","HC-Alpha1","HC-Catch22TSF"}; m.readInEstimators(newClassifiers,readPath); // readPath="E:/Results Working Area/Hybrids/"; // String[] c2={"TSCHIEF","HiveCote"}; // m.readInClassifiers(c2,readPath); m.runComparison(); } public static void compareShapeletVariants(int folds) throws Exception { String readPath="E:/Results Working Area/STC Variants/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "shapelet_compare_"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); //"k200","k300","k400","k600","k700","k800","k900", String[] classifierList={"k100","k500","k1000","hr1000k100"};//,"BOSS","SpatialBOSS","cBOSS"};//,"cBOSS", m.readInEstimators(classifierList,readPath); m.readInEstimators(new String[]{"STC"},"Z:\\ReferenceResults\\Hive Cote Components REDUX\\"); m.runComparison(); boolean recalAcc=false; //These for quick stats bakeOffPath=bakeOffPathBeast; hiveCotePath=hiveCotePathBeast; reduxPath= reduxPathBeast; for(String cls:classifierList) { // String comparisons=null; String[] comparisons = {"HIVE-COTE,ST", "HIVE-COTE,HIVE-COTE"}; String[] args = makeQuickStatsArgs(readPath + cls, recalAcc, folds, comparisons); singleClassifiervsReferenceResults(args); } problems=new ArrayList<>(); readProblemNamesFromDir=false; for(String str:allProbs) problems.add(str); for(String cls:classifierList) { String parameters = "0"; String[] str = {readPath, "Z:\\ArchiveData\\Univariate_arff\\", folds + "", "false", cls, parameters}; //Change this to read an array collate(str); } } public static void compareShapelets(int folds) throws Exception { String readPath="E:/Results Working Area/STC Variants/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "shapelet_compare_"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] classifierList={"STC-Bakeoff","STC-DAWAK","ShapeletTreeClassifier"};//,"BOSS","SpatialBOSS","cBOSS"};//,"cBOSS", m.readInEstimators(classifierList,readPath); m.runComparison(); boolean recalAcc=false; //These for quick stats bakeOffPath=bakeOffPathBeast; hiveCotePath=hiveCotePathBeast; reduxPath= reduxPathBeast; for(String cls:classifierList) { // String comparisons=null; String[] comparisons = {"HIVE-COTE,ST", "HIVE-COTE,HIVE-COTE"}; String[] args = makeQuickStatsArgs(readPath + cls, recalAcc, folds, comparisons); singleClassifiervsReferenceResults(args); } problems=new ArrayList<>(); readProblemNamesFromDir=false; for(String str:allProbs) problems.add(str); for(String cls:classifierList) { String parameters = "0"; String[] str = {readPath, "Z:\\ArchiveData\\Univariate_arff\\", folds + "", "false", cls, parameters}; //Change this to read an array collate(str); } } public static void compareTopDogs() throws Exception { int folds=30; String readPath="E:/Results Working Area/Hybrids/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "top_dog_compare",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] newClassifiers={"TSCHIEF"}; m.readInEstimators(newClassifiers,readPath); // readPath="Z:/Results Working Area/DeepLearning/"; // newClassifiers=new String[] {"resnet","InceptionTime"}; // m.readInClassifiers(newClassifiers,readPath); readPath="E:/Results Working Area/HC Variants/"; newClassifiers=new String[] {"HC-Catch22TSF"}; m.readInEstimators(newClassifiers,readPath); m.runComparison(); } public static void C22IFPaperSection5_1MiniBakeoff() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/CIFPaper/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"Summary Results/", "miniBakeoff5_1_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] c={"catch22","DTWCV","HIVE-COTE","S-BOSS","PF","STC","TSF","TS-CHIEF","InceptionTime","WEASEL"};//,"HC-Latest","HC-CIF"}; m.readInEstimators(c,readPath+"Classifier Results/"); m.runComparison(); } public static void C22IFPaperSection5_2IntervalBased() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/CIFPaper/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"Summary Results/", "intervalbased5_2_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] c={"CIF","hybrid","TSF","catch22"};//,"HC-Latest","HC-CIF"}; m.readInEstimators(c,readPath+"Classifier Results/"); m.runComparison(); } public static void C22IFPaperSection5_2HCComponents() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"C22IFPaper/", "HC-components5_2_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] c={"CIF"};//,"HC-Latest","HC-CIF"}; m.readInEstimators(c,readPath+"IntervalBased/"); String[] c2={"BOSS","RISE","TSF","EE","STC","HIVE-COTE"}; m.readInEstimators(c2,readPath+"HC Variants/"); m.runComparison(); } public static void C22IFPaperSection5_2NewComponents() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/CIFPaper/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"Summary Results/", "new-components5_2_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] c={"S-BOSS","STC","PF","WEASEL","CIF"}; m.readInEstimators(c,readPath+"Classifier Results/"); m.runComparison(); } public static void CIFPaperSection5_3SOTA() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/CIFPaper/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"Summary Results/", "Reboot_SOTA5_3_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] c={"HIVE-COTE","HC-CIF","TS-CHIEF","InceptionTime"};//,"HC-Latest","HC-CIF"}; m.readInEstimators(c,readPath+"Classifier Results/"); m.runComparison(); } public static void HC_SOTA() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/HC Variants/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "HC_tuning_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] c={"HIVE-COTE","TunedHIVE-COTE"};//,"HC-Latest","HC-CIF"}; m.readInEstimators(c,readPath); m.runComparison(); } public static void C22IFPaperSectionSuppMaterialAll() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/CIFPaper/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"Summary Results/", "AllSuppMaterial_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] c={"catch22","DTWCV","CIF","HIVE-COTE","S-BOSS","PF","STC","TSF","TS-CHIEF","InceptionTime","WEASEL","HC-CIF"};//,"HC-Latest","HC-CIF"}; m.readInEstimators(c,readPath+"Classifier Results/"); m.runComparison(); } public static void tempCompare() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/FrequencyBased/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "RISE_TRAIN"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(false); String[] allClassifiersWithTestResults={"RISE-CVTRAIN","RISE-OOB"}; m.readInEstimators(allClassifiersWithTestResults,readPath); m.runComparison(); } public static void tsfCompare() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/TSF Test/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "TSFResults_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(false); String[] allClassifiersWithTestResults={"TSFBagging","TSFCV_Full","TSFOOB_Full","TunedTSF"}; m.readInEstimators(allClassifiersWithTestResults,readPath); m.runComparison(); } public static void TDEvsDictionary() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"TDEPaper/", "DictionaryComparison",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; allProbs=tscProblems107; m.setDatasets(allProbs); // m.setTestResultsOnly(true); String[] ts={"BOSS","cBOSS","WEASEL","S-BOSS","TDE","cS-BOSS"}; m.readInEstimators(ts,readPath+"DictionaryBased/"); m.runComparison(); } public static void HC_TDE_vsSOTA () throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"TempResults/", "SOTA",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); // String[] allClassifiersWithTestResults={"HC-TDE2","HIVE-COTE2","HC-WEASEL2"}; // m.readInClassifiers(allClassifiersWithTestResults,readPath+"TDEPaper/"); String[] str2={"HIVE-COTE"};// m.readInEstimators(str2,readPath+"TDEPaper/"); String[] ts={"TS-CHIEF"}; m.readInEstimators(ts,readPath+"Hybrids/"); String[] incep={"InceptionTime","ROCKET"}; m.readInEstimators(incep,readPath+"DeepLearning/"); m.runComparison(); } public static void HC_Variants() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/TDEPaper/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "HC_Variants",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); // String[] allClassifiersWithTestResults={"HC-TDE2","HIVE-COTE2","HC-WEASEL2"}; // m.readInClassifiers(allClassifiersWithTestResults,readPath+"TDEPaper/"); String[] str2={"HC-TDE","HIVE-COTE", "HC-WEASEL","HC-S-BOSS"}; m.readInEstimators(str2,readPath); str2=new String[]{"InceptionTime","ROCKET"}; m.readInEstimators(str2,"E:/Results Working Area/DeepLearning/"); str2=new String[]{"TS-CHIEF"}; m.readInEstimators(str2,"E:/Results Working Area/Hybrids/"); m.runComparison(); } public static void TDEContractCompare() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"TDEPaper/", "TDEContract_Folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); // String[] allClassifiersWithTestResults={"HC-TDE2","HIVE-COTE2","HC-WEASEL2"}; // m.readInClassifiers(allClassifiersWithTestResults,readPath+"TDEPaper/"); String[] str2={"TDE","TDE-1H"};// m.readInEstimators(str2,readPath+"TDEPaper/"); m.runComparison(); } public static void componentTest() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"HCv2Paper/", "Component_TEST"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); // m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=tscProblems107; m.setDatasets(allProbs); m.setTestResultsOnly(false); String[] g1={"BOSS","STC","RISE","TSF","BcS-BOSS"}; m.readInEstimators(g1,readPath+"cS-BOSSPaper/"); String[] g2={"CIF","PF","RISEV2"}; m.readInEstimators(g2,readPath+"HCv2Paper/"); m.runComparison(); } public static void bestCompare() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"HCv2Paper/", "Hybrid_"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); // m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); String[] allClassifiersWithTestResults={"HC-TED2","HIVE-COTE2"}; m.readInEstimators(allClassifiersWithTestResults,readPath+"cS-BOSSPaper/"); String[] ts={"TSCHIEF"}; m.readInEstimators(ts,readPath+"Hybrids/"); String[] incep={"InceptionTime"}; m.readInEstimators(incep,readPath+"DeepLearning/"); String[] hc={"HIVE-COTEV2","HC-V2NoRise"}; m.readInEstimators(hc,readPath+"HCv2Paper/"); m.runComparison(); } //Missing //<editor-fold defaultstate="collapsed" desc="tsc Problems 2018, no missing values"> public static String[] tscProblems107={ //Train Size, Test Size, Series Length, Nos Classes "ACSF1", "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "BME", "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "Chinatown", "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "Crop", "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "EOGHorizontalSignal", "EOGVerticalSignal", "EthanolLevel", "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "FreezerRegularTrain", "FreezerSmallTrain", "GunPoint", // 50,150,150,2 "GunPointAgeSpan", "GunPointMaleVersusFemale", "GunPointOldVersusYoung", "Ham", //105,109,431 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "HouseTwenty", "InlineSkate", // 100,550,1882,7 "InsectEPGRegularTrain", "InsectEPGSmallTrain", "InsectWingbeatSound",//1980,220,256 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxTW", // 399,154,80,6 "MixedShapesRegularTrain", "MixedShapesSmallTrain", "MoteStrain", // 20,1252,84,2 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme",//1896,214, 1024 "PigAirwayPressure", "PigArtPressure", "PigCVP", "Plane", // 105,105,144,7 "PowerCons", "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "Rock", "ScreenType", // 375,375,720,3 "SemgHandGenderCh2", "SemgHandMovementCh2", "SemgHandSubjectCh2", "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SmoothSubspace", "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "StarLightCurves", // 1000,8236,1024,3 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UMD", "UWaveGestureLibraryAll", // 896,3582,945,8 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga" // 300,3000,426,2 }; //</editor-fold> public static void memoryCompare() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/TDEPaper/MemoryCompare/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "MemoryComparison",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(false); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); // String[] allClassifiersWithTestResults={"HC-TDE2","HIVE-COTE2","HC-WEASEL2"}; // m.readInClassifiers(allClassifiersWithTestResults,readPath+"TDEPaper/"); String[] str2={"BOSS","S-BOSS", "WEASEL"}; m.readInEstimators(str2,readPath); str2=new String[]{"cBOSS","cS-BOSS","TDE"}; m.readInEstimators(str2,"Z:/Results Working Area/DictionaryBased/"); m.runComparison(); } public static void HC_Components() throws Exception { int folds = 30; String readPath = "E:/Results Working Area/HIVE-COTE 1.0/"; String[] ts={"TSF","RISE","cBOSS","STC","HC 1.0"}; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "HC-Analysis",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); m.readInEstimators(ts,readPath); m.runComparison(); } public static void HC_vsSOTA () throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"HIVE-COTE 1.0/", "HC-vs-SOTA_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); // String[] allClassifiersWithTestResults={"HC-TDE2","HIVE-COTE2","HC-WEASEL2"}; // m.readInClassifiers(allClassifiersWithTestResults,readPath+"TDEPaper/"); String[] str2={"HC 1.0"};// m.readInEstimators(str2,readPath+"HIVE-COTE 1.0/"); String[] ts={"TS-CHIEF"}; m.readInEstimators(ts,readPath+"Hybrids/"); String[] incep={"InceptionTime","ROCKET"}; m.readInEstimators(incep,readPath+"DeepLearning/"); m.runComparison(); } public static void HC_All () throws Exception { int folds = 30; String readPath = "E:/Results Working Area/"; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath+"HIVE-COTE 1.0/", "HC-all_folds"+folds,folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); // String[] allClassifiersWithTestResults={"HC-TDE2","HIVE-COTE2","HC-WEASEL2"}; // m.readInClassifiers(allClassifiersWithTestResults,readPath+"TDEPaper/"); String[] str2={"TSF","RISE","cBOSS","STC","HC 1.0"};// m.readInEstimators(str2,readPath+"HIVE-COTE 1.0/"); String[] ts={"TS-CHIEF"}; m.readInEstimators(ts,readPath+"Hybrids/"); String[] incep={"InceptionTime","ROCKET"}; m.readInEstimators(incep,readPath+"DeepLearning/"); m.runComparison(); } public static void temp() throws Exception { int folds = 1; String readPath = "E:/Results Working Area/HIVE-COTE 1.0/"; String[] ts={"HIVE-COTE","TSCHIEF"}; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "temp",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); m.readInEstimators(ts,readPath); m.runComparison(); } public static void singleProblem(String classifier) throws Exception { int folds = 30; String readPath = "Z:/ReferenceResults/"; String[] ts={classifier}; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, classifier+"/SummaryEvaluation",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); m.readInEstimators(ts,readPath); m.runComparison(); } public static void makeMegaCD() throws Exception { int folds = 30; String readPath = "Z:/ReferenceResults/"; String[] ts={"BOSS","Catch22","cBOSS","HIVE-COTE v1.0","InceptionTime","ProximityForest","ResNet", "RISE", "ROCKET","S-BOSS","STC","TS-CHIEF","TSF","WEASEL"}; MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, "MegaComparison",folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); String[] allProbs=DatasetLists.tscProblems112; m.setDatasets(allProbs); m.setTestResultsOnly(true); m.readInEstimators(ts,readPath); m.runComparison(); } public static void main(String[] args) throws Exception { //TDE vs BOSS, cBOSS, cS-BOSS, WEASEL and S-BOSS // TDEvsDictionary(); //HC-TDE vs HIVE-COTE, TS-CHIEF and InceptionTime // HC_Components(); // HC_All(); // HC_vsSOTA(); //HC-TDE vs HC-S-BOSS and HC-WEASEL // memoryCompare(); // HC_Variants(); // tempSummary(); // temp(); // makeMegaCD(); // System.exit(0); String type=""; String[] datasets=DatasetLists.fixedLengthMultivariate; type="QuickStats"; type="SingleClassifiers"; // type="MultipleClassifiers"; if (args.length == 0) {//Local run: manually configure int folds=30; String readPath="E:\\Results Working Area\\Multivariate\\CompleteClassifiers\\"; // String readPath="Z:/Results and Code for Papers/CIF-KDD2020/Classifier Results/"; // String[] classifierList={"BOSS","cBOSS","S-BOSS","WEASEL","cS-BOSS","TDE"}; String[] classifierList={"CBOSS_I","TSF_I","RISE_I","STC_I","HIVE-COTE_I"}; // String readPath="E:/Results Working Area/DeepLearning/"; // singleProblem(classifierList[0]); switch(type){ case "QuickStats": //OR DIRECTLY IF YOU WANT //quickStats("C:/Users/ajb/Dropbox/results david/ShapeletForest",false,1,"HIVE-COTE,ST","HIVE-COTE,BOSS","HIVE-COTE,HIVE-COTE"); // quickStats("C:/Users/ajb/Dropbox/results david/CNN100hours",false,1); //PYTHON VERSIONS // quickStats("E:/Results/sktimeResults/TSF",false,30,"HIVE-COTE,TSF"); // quickStats("E:/Results/sktimeResults/BOSS",false,30,"HIVE-COTE,BOSS"); // quickStats("E:/Results/sktimeResults/RISE",false,30,"HIVE-COTE,RISE"); boolean recalAcc=false; //These for quick stats bakeOffPath=bakeOffPathBeast; hiveCotePath=hiveCotePathBeast; reduxPath= reduxPathBeast; for(String cls:classifierList){ // String comparisons=null; String[] comparisons={"HIVE-COTE,TSF","HIVE-COTE,RISE","HIVE-COTE,BOSS","HIVE-COTE,ST","HIVE-COTE,HIVE-COTE"}; args=makeQuickStatsArgs(readPath+cls,recalAcc,folds,comparisons); singleClassifiervsReferenceResults(args); } break; case "SingleClassifiers": problems=new ArrayList<>(); readProblemNamesFromDir=false; for(String str:datasets) problems.add(str); for(String cls:classifierList){ System.out.println(" Processing "+cls); String parameters="0"; String[] str={readPath, "Z:\\ArchiveData\\Univariate_arff\\",folds+"","false",cls,parameters}; //Change this to read an array collate(str); } break; case "MultipleClassifiers": //Where to put them, directory name, number of folds String[] name=readPath.split("/"); MultipleEstimatorEvaluation m=new MultipleEstimatorEvaluation(readPath, name[name.length-1]+"_"+folds+"_Resamples", folds); m.setIgnoreMissingResults(true); m.setBuildMatlabDiagrams(true); m.setDebugPrinting(true); m.setUseAllStatistics(); m.setDatasets(datasets); // m.setDatasets(reduxComplete); m.setTestResultsOnly(true); m.readInEstimators(classifierList,readPath); m.runComparison(); break; } } else{ //Cluster run bakeOffPath=bakeOffPathCluster; hiveCotePath=hiveCotePathCluster; System.out.println("Cluster Job Args:"); for(String s:args) System.out.println(s); switch(type){ case "QuickStats": singleClassifiervsReferenceResults(args); break; case "SingleClassifiers": singleClassifiersFullStats(args); break; case "MultipleClassifier": multipleClassifierFullStats(args); default: System.out.println("Unknown type = "+type); } } } }
90,682
43.517919
214
java
tsml-java
tsml-java-master/src/main/java/experiments/TransformExperiments.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import experiments.data.DatasetLoading; import java.io.File; import java.math.BigDecimal; import java.math.BigInteger; import java.math.MathContext; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; import tsml.filters.shapelet_filters.ShapeletFilter; import tsml.transformers.Transformer; import tsml.transformers.shapelet_tools.ShapeletTransformTimingUtilities; import static tsml.transformers.shapelet_tools.ShapeletTransformTimingUtilities.nanoToOp; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearchFactory; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearchOptions; import weka.core.Instances; import weka.core.converters.ArffSaver; /** * * @author Aaron Bostrom - Travis Test */ public class TransformExperiments { private final static Logger LOGGER = Logger.getLogger(TransformExperiments.class.getName()); public static boolean debug = false; public static void main(String[] args) throws Exception { System.out.println("Tony Dev Test"); if (args.length > 0) { ExperimentalArguments expSettings = new ExperimentalArguments(args); SetupTransformExperiment(expSettings); }else{ String[] settings=new String[7]; //Location of data set //settings[0]="-dp=E:/Data/TSCProblems2018/";//Where to get data settings[0]="-dp=D:/Research TSC/Data/TSCProblems2018/"; //settings[1]="-rp=E:/Results/";//Where to write results settings[1]="-rp=D:/Research TSC/Results/"; settings[2]="-gtf=false"; //Whether to generate train files or not settings[3]="-cn=ShapeletTransform"; //Classifier name // for(String str:DataSets.tscProblems78){ settings[4]="-dn=SonyAIBORobotSurface2"; //Problem file settings[5]="-f=2";//Fold number (fold number 1 is stored as testFold0.csv, its a cluster thing) settings[6]= "-ctrh=1"; System.out.println("Manually set args:"); for (String str : settings) System.out.println("\t"+str); ExperimentalArguments expSettings = new ExperimentalArguments(settings); SetupTransformExperiment(expSettings); // } } } public static void SetupTransformExperiment(ExperimentalArguments expSettings) throws Exception { if (debug) LOGGER.setLevel(Level.FINEST); else LOGGER.setLevel(Level.INFO); LOGGER.log(Level.FINE, expSettings.toString()); long hrs = TimeUnit.HOURS.convert(expSettings.contractTrainTimeNanos, TimeUnit.NANOSECONDS); //Build/make the directory to write the train and/or testFold files to String partialWriteLocation = expSettings.resultsWriteLocation + expSettings.estimatorName + hrs + "/"; String transformWriteLocation = partialWriteLocation + "Transforms/" + expSettings.datasetName + "/"; String additionalWriteLocation = partialWriteLocation + /*expSettings.classifierName*/ "Shapelets" + "/" + expSettings.datasetName + "/"; System.out.println(transformWriteLocation); File f = new File(transformWriteLocation); if (!f.exists()) f.mkdirs(); if (experiments.CollateResults.validateSingleFoldFile(transformWriteLocation) && experiments.CollateResults.validateSingleFoldFile(additionalWriteLocation)) { LOGGER.log(Level.INFO, expSettings.toShortString() + " already exists at "+additionalWriteLocation+", exiting."); LOGGER.log(Level.INFO, expSettings.toShortString() + " already exists at "+transformWriteLocation+", exiting."); } else{ Transformer transformer = TransformLists.setTransform(expSettings); Instances[] data = DatasetLoading.sampleDataset(expSettings.dataReadLocation, expSettings.datasetName, expSettings.foldId); runExperiment(expSettings, data[0], data[1], transformer, transformWriteLocation, additionalWriteLocation); LOGGER.log(Level.INFO, "Experiment finished {0}", expSettings.toShortString()); } } public static void runExperiment(ExperimentalArguments expSettings, Instances train, Instances test, Transformer transformer, String fullWriteLocation, String additionalDataFilePath) throws Exception{ //this is hacky, but will do. Instances[] transforms = setContractDataAndProcess(expSettings, train, test, transformer); //Filter.useFilter is wekas weird way Instances transformed_train = transforms[0]; Instances transformed_test = transforms[1]; ArffSaver saver = new ArffSaver(); String transformed_train_output = fullWriteLocation + expSettings.datasetName +"_TRAIN.arff"; String transformed_test_output = fullWriteLocation + expSettings.datasetName +"_TEST.arff"; saver.setInstances(transformed_train); saver.setFile(new File(transformed_train_output)); saver.writeBatch(); saver.setInstances(transformed_test); saver.setFile(new File(transformed_test_output)); saver.writeBatch(); writeAdditionalTransformData(expSettings, transformer, additionalDataFilePath); } private static Instances[] setContractDataAndProcess(ExperimentalArguments expSettings, Instances train, Instances test, Transformer transformer){ Instances[] out = new Instances[2]; switch(expSettings.estimatorName){ case"ST": case "ShapeletTransform": /*TODO: Can tidy it up big time. Or move some of this else where.*/ ShapeletFilter st = (ShapeletFilter)transformer; //do contracting. int m = train.numAttributes()-1; int n = train.numInstances(); ShapeletSearch.SearchType searchType = ShapeletSearch.SearchType.FULL; //kShapelets int numShapeletsInTransform=st.getNumberOfShapelets(); long numShapeletsToSearchFor = 0; if(expSettings.contractTrainTimeNanos > 0){ long time = expSettings.contractTrainTimeNanos; //time in nanoseconds for the number of hours we want to run for. //proportion of operations we can perform in time frame. BigInteger opCountTarget = new BigInteger(Long.toString(time / nanoToOp)); BigInteger opCount = ShapeletTransformTimingUtilities.calculateOps(n, m, 1, 1); BigDecimal oct = new BigDecimal(opCountTarget); BigDecimal oc = new BigDecimal(opCount); BigDecimal prop = oct.divide(oc, MathContext.DECIMAL64); //proportion of shapelets vs. total no. shapelets. numShapeletsToSearchFor = ShapeletTransformTimingUtilities.calculateNumberOfShapelets(n,m,3,m); //no point in searching more than full? if(prop.doubleValue() < 1.0){ numShapeletsToSearchFor *= prop.doubleValue(); System.out.println(numShapeletsToSearchFor); //make sure the k shapelets is less than the amount we're looking at. numShapeletsInTransform = numShapeletsToSearchFor > numShapeletsInTransform ? numShapeletsInTransform : (int) numShapeletsToSearchFor; searchType = ShapeletSearch.SearchType.IMPROVED_RANDOM; } } ShapeletSearchOptions sops = new ShapeletSearchOptions.Builder() .setSearchType(searchType) .setMin(3).setMax(m) .setSeed(expSettings.foldId) .setNumShapeletsToEvaluate(numShapeletsToSearchFor) .build(); st.setSearchFunction(new ShapeletSearchFactory(sops).getShapeletSearch()); st.setNumberOfShapelets(numShapeletsInTransform); out[0] = st.process(train); out[1] = st.process(test); break; default: System.out.println("UNKNOWN CLASSIFIER "+transformer); System.exit(0); // throw new Exception("Unknown classifier "+classifier); } return out; } private static void writeAdditionalTransformData(ExperimentalArguments expSettings, Transformer transformer, String additionalDataFilePath) { switch(expSettings.estimatorName){ case"ST": case "ShapeletTransform": ShapeletFilter st = (ShapeletFilter) transformer; st.writeAdditionalData(additionalDataFilePath, expSettings.foldId); break; default: System.out.println("UNKNOWN CLASSIFIER "+transformer); System.exit(0); // throw new Exception("Unknown classifier "+classifier); } } }
10,430
45.775785
204
java
tsml-java
tsml-java-master/src/main/java/experiments/TransformLists.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments; import tsml.transformers.*; /** * * @author Aaron Bostrom and Tony Bagnall */ public class TransformLists { //All implemented time series related SimpleBatchFilters in tsml //<editor-fold defaultstate="collapsed" desc="All time series related SimpleBatchFilters"> public static String[] allFilters={ "ACF","ACF_PACF","ARMA","BagOfPatterns","BinaryTransform","Clipping", "Cosine","Derivative","Differences","Fast_FFT", "FFT","Hilbert","MatrixProfile", "NormalizeCase","PAA","PACF","PowerCepstrum","RankOrder", "RunLength","SAX","Sine","SummaryStats","ShapeletTransform" }; //</editor-fold> //multivariate SimpleBatchFilters in tsml //<editor-fold defaultstate="collapsed" desc="Filters that transform univariate into multivariate"> public static String[] multivariateFilters={"Spectrogram","MFCC"}; //</editor-fold> public static Transformer setTransform(ExperimentalArguments exp){ return setClassicTransform(exp.estimatorName, exp.foldId); } //TODO: Fix for new Transformers. public static Transformer setClassicTransform(String transformName, int foldId) { Transformer transformer = null; switch(transformName){ case "ShapeletTransform": case "ST": transformer = new ShapeletTransform(); break; case "ACF": transformer = new ACF(); break; case "ACF_PACF": transformer = new ACF_PACF(); break; case "ARMA": transformer = new ARMA(); break; case "BagOfPatterns": transformer = new BagOfPatterns(); break; case "BinaryTransform": transformer = new BinaryTransform(); break; case "Clipping": transformer = new Clipping(); break; case "Cosine": transformer = new Cosine(); break; case "Derivative": transformer = new Derivative(); break; case "Differences": transformer = new Differences(); break; case "Fast_FFT": transformer = new Fast_FFT(); break; case "FFT": transformer = new FFT(); break; case "Hilbert": transformer = new Hilbert(); break; case "MatrixProfile": transformer = new MatrixProfile(); break; case "MFCC": transformer = new MFCC(); break; case "NormalizeCase": transformer = new RowNormalizer(); break; case "PAA": transformer = new PAA(); break; case "PACF": transformer = new PACF(); break; case "PowerCepstrum": transformer = new PowerCepstrum(); break; case "PowerSpectrum": transformer = new PowerSpectrum(); break; case "RankOrder": transformer = new RankOrder(); break; case "RunLength": transformer = new RunLength(); break; case "SAX": transformer = new SAX(); break; case "Sine": transformer = new Sine(); break; case "Spectrogram": transformer = new Spectrogram(); break; case "SummaryStats": transformer = new SummaryStats(); break; default: System.out.println("UNKNOWN TRANSFORM "+transformName); System.exit(0); } return transformer; } public static void main(String[] args) throws Exception { System.out.println(setClassicTransform("ST", 0)); System.out.println(setClassicTransform("ShapeletTransform", 0)); } }
4,978
32.870748
103
java
tsml-java
tsml-java-master/src/main/java/experiments/data/DataProcessing.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ /* Multivariate data can be stored in Wekas "multi instance" format https://weka.wikispaces.com/Multi-instance+classification for TSC, the basic univariate syntax is */ package experiments.data; import experiments.CollateResults; import fileIO.InFile; import fileIO.OutFile; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.text.DecimalFormat; import java.util.Enumeration; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; import tsml.classifiers.multivariate.NN_ED_I; import utilities.ClassifierTools; import utilities.InstanceTools; import utilities.multivariate_tools.MultivariateInstanceTools; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; /** * Sorting out the new archive, some general utility functions * @author ajb */ public class DataProcessing { public static void collateShapeletParameters(String readPath,String classifier,String[] problems) throws Exception { //ONE FOLD ONLY //1. List of full transforms vs random int count=0; int full=0; int withinContract=0; File of=new File(readPath+classifier+"/ParameterSummary/"); of.mkdirs(); OutFile timings = new OutFile(readPath+classifier+"/ParameterSummary/BuildTime"+classifier+".csv"); OutFile numShapelets = new OutFile(readPath+classifier+"/ParameterSummary/NumShapelets"+classifier+".csv"); OutFile combo = new OutFile(readPath+classifier+"/ParameterSummary/combo"+classifier+".csv"); OutFile timeRegression = new OutFile(readPath+classifier+"/ParameterSummary/singleShapeletTrainTime.arff"); timeRegression.writeLine("@Relation ShapeletTrainTimeRegression"); timeRegression.writeLine("@Attribute dataSet String"); timeRegression.writeLine("@Attribute log(n) real"); timeRegression.writeLine("@Attribute log(m) real"); timeRegression.writeLine("@Attribute log(s) real"); timeRegression.writeLine("@data"); //FORMAT by column: SearchType (full/random), transformContract (secs), transformActual (secs), proportion DecimalFormat df= new DecimalFormat("##.##"); double meanProportion=0; double meanOutForFull=0; timings.writeLine("problem,SearchType(Full/Random), transformContract(secs),transformActual(secs),proportionTimeUsed,classifierTime"); numShapelets.writeLine("problem,SearchType(Full/Random),numShapeletsInProblem,ProportionToEvaluate," + "numShapeletsInTransform,NumberShapeletsEvaluated,NumberShapeletsEarlyAbandoned"); combo.writeString("problem,SearchType(Full/Random), transformContract(secs),transformActual(secs),proportionTimeUsed,classifierTime"); combo.writeLine(",numShapeletsInProblem,ProportionToEvaluate,NumToEvaluate," + "NumInTransform,NumberEvaluated,NumberEarlyAbandoned,TotalNumber,TimePerShapelet,FullTimeEstimate(hrs),withinContract"); for (String problem : problems) { // Instances data=DatasetLoading.loadData(""+problem+"/"+problem+"_TRAIN.arff"); for(int i=0;i<30;i++) { File f = new File(readPath + classifier + "/Predictions/" + problem + "/testFold"+i+".csv"); if (f.exists()) { timings.writeString(problem); numShapelets.writeString(problem); combo.writeString(problem); count++; InFile inf = new InFile(readPath + classifier + "/Predictions/" + problem + "/testFold"+i+".csv"); String str = inf.readLine(); str = inf.readLine(); String[] split = str.split(","); System.out.println(problem + " Full/Random = " + split[22]); timings.writeString("," + split[11]); //Full/Random combo.writeString("," + split[11]); //Full/Random double contract = Double.parseDouble(split[5]);//Contracted time contract /= 1000000000.0; timings.writeString("," + df.format(contract));//Contract time combo.writeString("," + df.format(contract)); double actual = Double.parseDouble(split[1]); //Actual transform time actual /= 1000000000.0; timings.writeString("," + df.format(actual)); //Actual time combo.writeString("," + df.format(actual)); if (contract > 0) { timings.writeString("," + df.format(actual / contract));//Proportion combo.writeString("," + df.format(actual / contract)); } else { timings.writeString("," + 1.0);//Proportion combo.writeString("," + 1.0); } String str2 = inf.readLine(); String[] split2 = str2.split(","); double totalTime = Double.parseDouble(split[1]);//Total Time totalTime /= 1000000000.0; timings.writeLine("," + df.format(totalTime - actual));//Classifier time combo.writeString("," + df.format(totalTime - actual)); if (split[11].equals("FULL")) { full++; if (actual < contract) withinContract++; else meanOutForFull += actual / contract; } else meanProportion += actual / contract; numShapelets.writeString("," + split[11]); //Full/Random numShapelets.writeString("," + split[7]);//Shapelets in problem int totalShapelets = Integer.parseInt(split[7]); numShapelets.writeString("," + split[9]);//Proportion to evaluate numShapelets.writeString("," + split[33]);//Shapelets in transform numShapelets.writeString("," + split[35]);//Shapelets fully evaluated numShapelets.writeLine("," + split[37]);//Shapelets abandoned combo.writeString("," + split[7]);//Shapelets in problem combo.writeString("," + split[9]);//Proportion to evaluate int toEvaluate = (int) (Double.parseDouble(split[7]) * Double.parseDouble(split[9])); combo.writeString("," + toEvaluate);//Number to evaluate for full combo.writeString("," + split[33]);//Shapelets in transform combo.writeString("," + split[35]);//Shapelets evaluated combo.writeString("," + split[37]);//Shapelets abandoned double totalEvals = Double.parseDouble(split[35]) + Double.parseDouble(split[37]); System.out.println("Total evals"); combo.writeString("," + (long) totalEvals);//number actually evaluated double timePerS = actual / totalEvals; combo.writeString("," + timePerS);//Time per shapelet (secs) double hrsToFull = timePerS * totalShapelets / (60 * 60); combo.writeString("," + hrsToFull);//Time for full if (timePerS * totalShapelets < contract || split[11].equals("FULL")) combo.writeLine(",YES"); else combo.writeLine(",NO"); timeRegression.writeLine(timePerS+""); } } } timings.closeFile(); numShapelets.closeFile(); System.out.println(count+" problems present"); System.out.println(" Mean proportion actual/contract for random ="+meanProportion/(count-full)); System.out.println(" number of full transforms = "+full+" number full actually within contract ="+withinContract); System.out.println(" Mean proportion fill given out of contract "+meanOutForFull/(full-withinContract)); } public static void makeZips(String[] directories, String dest,String ... source) { File inf=new File(dest); inf.mkdirs(); for(String str: directories){ // create byte buffer byte[] buffer = new byte[1024]; FileOutputStream fos; ZipOutputStream zos =null; try { fos = new FileOutputStream(dest+str+".zip"); zos= new ZipOutputStream(fos); for(String src:source){ File dir = new File(src+str); File[] files = dir.listFiles(); for (int i = 0; i < files.length; i++) { System.out.println("Adding file: " + files[i].getName()); if(files[i].isFile()){ FileInputStream fis = new FileInputStream(files[i]); // begin writing a new ZIP entry, positions the stream to the start of the entry data zos.putNextEntry(new ZipEntry(files[i].getName())); int length; while ((length = fis.read(buffer)) > 0) { zos.write(buffer, 0, length); } zos.closeEntry(); // close the InputStream fis.close(); } } } zos.close(); } catch (FileNotFoundException ex) { System.out.println("ERROR OPENING THE ZIP on "+dest+str+".zip"); } catch (IOException ex) { System.out.println("ERROR CLOSING THE ZIP on "+dest+str+".zip"+" Exception ="+ex); } } } public static void concatenateShapelets(){ String path="E:\\Data\\ShapeletTransforms\\"; String[] timeLength={"1"}; String st = "ShapelelTransform"; String hybrid="Hybrid"; String combo="Combo"; for(String s:timeLength){ File f= new File(path+combo+s); f.mkdirs(); for(String str:DatasetLists.tscProblems2018){ //Check they are present File tr1,tr2,te1,te2; tr1=new File(path+st+s+"\\Transforms\\"+str+"\\"+str+"_TRAIN.arff"); te1=new File(path+st+s+"\\Transforms\\"+str+"\\"+str+"_TEST.arff"); tr2=new File(path+st+s+"\\Transforms\\"+str+"\\"+str+"_TRAIN.arff"); te2=new File(path+st+s+"\\Transforms\\"+str+"\\"+str+"_TEST.arff"); Instances train1,train2; Instances test1,test2; } //Load the data //Check class labels are alligned //Merge the instances //Write to new files } } public static void makeAllZipFiles(){ String[] paths={"Z:\\ArchiveData\\Univariate_arff\\","Z:\\ArchiveData\\Univariate_ts\\"}; String dest="E:\\ArchiveData\\Zips_Univariate\\"; String[] probs={"Adiac"};//DatasetLists.mtscProblems2018 makeZips(probs,dest,paths); // makeZips(DatasetLists.mtscProblems2018,path,dest); } public static void checkZipFiles(String[] fileNames,String path, String dest){ for(String str:fileNames){ try { URL zip=new URL(path+str+".zip"); long s=Files.copy(zip.openStream(),Paths.get(dest+str+".zip"),StandardCopyOption.REPLACE_EXISTING); System.out.println("Connected and opened "+path+str+".zip size = "+s); } catch (MalformedURLException ex) { System.out.println("UNABLE TO CONNECT TO ZIP FILE "+path+str+".zip"); System.exit(0); } catch (IOException ex) { System.out.println("UNABLE TO OPEN AND COPY ZIP FILE "+path+str+".zip"); System.exit(0); } } } public static void checkAllZipFiles(){ String path="http://www.timeseriesclassification.com/Downloads/"; String dest="C:\\temp\\Zips\\"; File f=new File(dest); f.mkdirs(); // checkZipFiles(DatasetLists.tscProblems2018,path,dest); checkZipFiles(DatasetLists.mtscProblems2018,path,dest); } public static void makeConcatenatedFiles(){ String path="Z:\\Data\\Multivariate TSC Problems\\"; String dest="Z:\\Data\\ConcatenatedMTSC\\"; OutFile out=new OutFile(path+"SummaryData.csv"); out.writeLine("problem,numTrainCases,numTestCases,numDimensions,seriesLength,numClasses"); String[] probs={"BasicMotions"}; for(String prob:DatasetLists.mtscProblems2018){ File t1=new File(dest+prob+"\\"+prob+"_TRAIN.arff"); File t2=new File(dest+prob+"\\"+prob+"_TRAIN.arff"); if(!(t1.exists()||t2.exists())){ Instances train =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); Instances test =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); System.out.println("PROBLEM "+prob); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num train attributes (should be 2!)="+train.numAttributes()); System.out.println("num classes="+train.numClasses()); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); System.out.println("train number of dimensions "+x.numInstances()); System.out.println("train number of attributes per dimension "+x.numAttributes()); temp=test.instance(0); x= temp.relationalValue(0); System.out.println("test number of dimensions "+x.numInstances()); System.out.println("test number of attributes per dimension "+x.numAttributes()); out.writeLine(prob+","+train.numInstances()+","+test.numInstances()+","+x.numInstances()+","+x.numAttributes()+","+train.numClasses()); int numAtts=x.numInstances()*x.numAttributes(); System.out.println(" Total number of attributes ="+numAtts); //Build a new train test file of concatenated attributes File f= new File(dest+prob); f.mkdirs(); OutFile uniTrain=new OutFile(dest+prob+"\\"+prob+"_TRAIN.arff"); OutFile uniTest=new OutFile(dest+prob+"\\"+prob+"_TEST.arff");; String header="@relation "+prob+"\n"; for(int i=0;i<numAtts;i++){ header+="@attribute att"+i+" numeric \n"; } header+="@attribute "+train.classAttribute().name()+ " {"; for(int i=0;i<train.numClasses()-1;i++) header+=i+","; header+=train.numClasses()-1+"}\n"; header+="@data \n"; uniTrain.writeString(header); uniTest.writeString(header); for(int i=0;i<train.numInstances();i++){ temp=train.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTrain.writeString(y.value(j)+","); } uniTrain.writeString((int)temp.classValue()+"\n"); } for(int i=0;i<test.numInstances();i++){ temp=test.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTest.writeString(y.value(j)+","); } uniTest.writeString((int)temp.classValue()+"\n"); } // System.out.println(" Object type ="+x); train = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TRAIN"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TRAIN"); test = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TEST"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TEST"); } else System.out.println("Already done "+prob); } } static enum MV_Classifiers {SHAPELETI, SHAPELETD, SHAPELET_INDEP, ED_I, ED_D, DTW_I, DTW_D, DTW_A} public static boolean isMultivariateClassifier(String classifier){ for (MV_Classifiers mvClassifier: MV_Classifiers.values()){ if (mvClassifier.name().toLowerCase().equals(classifier.toLowerCase())) { return true; } } return false; } //TODO CHECK TO SEE IF FILES ALREADY MADE public static Instances[] convertToUnivariate(String path, String dest, String prob){ if (!CollateResults.validateSingleFoldFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN") || !CollateResults.validateSingleFoldFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TEST")){ Instances train =DatasetLoading.loadDataNullable(path+prob+"/"+prob+"_TRAIN"); Instances test =DatasetLoading.loadDataNullable(path+prob+"/"+prob+"_TEST"); Instance temp=test.instance(0); Instances x= temp.relationalValue(0); int numAtts=x.numInstances()*x.numAttributes(); File f= new File(dest+prob+"_UNI"); f.mkdirs(); OutFile uniTrain=new OutFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN.arff"); OutFile uniTest=new OutFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TEST.arff"); String header="@relation "+prob+"\n"; for(int i=0;i<numAtts;i++){ header+="@attribute att"+i+" numeric \n"; } header+="@attribute "+train.classAttribute().name()+ " {"; for(int i=0;i<train.numClasses()-1;i++) header+=train.classAttribute().value(i)+","; header+=train.classAttribute().value(train.numClasses()-1)+"}\n"; header+="@data \n"; uniTrain.writeString(header); uniTest.writeString(header); for(int i=0;i<train.numInstances();i++){ temp=train.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTrain.writeString(y.value(j)+","); } uniTrain.writeString(temp.classAttribute().value((int)temp.classValue())+"\n"); } for(int i=0;i<test.numInstances();i++){ temp=test.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTest.writeString(y.value(j)+","); } if (temp.classIsMissing()){ uniTest.writeString("?\n"); } else { uniTest.writeString(temp.classAttribute().value((int)temp.classValue())+"\n"); } } } // System.out.println(" Object type ="+x); Instances train = DatasetLoading.loadDataNullable(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); System.out.println("Can load univariate "+dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); Instances test = DatasetLoading.loadDataNullable(dest+prob+"_UNI"+"/"+prob+"_UNI_TEST"); System.out.println("Can load univariate "+dest+prob+"_UNI"+"/"+prob+"_UNI_TEST"); Instances[] i = new Instances[2]; i[0] = train; i[1] = test; return i; } //TODO CHECK TO SEE IF FILES ALREADY MADE public static Instances convertToUnivariateTrain(String path, String dest, String prob){ if (!CollateResults.validateSingleFoldFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN")){ Instances train =DatasetLoading.loadDataNullable(path+prob+"/"+prob+"_TRAIN"); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); int numAtts=x.numInstances()*x.numAttributes(); File f= new File(dest+prob+"_UNI"); f.mkdirs(); OutFile uniTrain=new OutFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN.arff"); String header="@relation "+prob+"\n"; for(int i=0;i<numAtts;i++){ header+="@attribute att"+i+" numeric \n"; } header+="@attribute "+train.classAttribute().name()+ " {"; for(int i=0;i<train.numClasses()-1;i++) header+=train.classAttribute().value(i)+","; header+=train.classAttribute().value(train.numClasses()-1)+"}\n"; header+="@data \n"; uniTrain.writeString(header); for(int i=0;i<train.numInstances();i++){ temp=train.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTrain.writeString(y.value(j)+","); } uniTrain.writeString(temp.classAttribute().value((int)temp.classValue())+"\n"); } } // System.out.println(" Object type ="+x); Instances train = DatasetLoading.loadDataNullable(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); System.out.println("Can load univariate "+dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); return train; } public static void checkConcatenatedFiles(){ String dest="Z:\\Data\\ConcatenatedMTSC\\"; for(String prob:DatasetLists.mtscProblems2018){ // System.out.println(" Object type ="+x); try{ Instances train = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TRAIN"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TRAIN"); }catch(Exception e){ System.out.println("UNABLE TO LOAD :"+prob+" TRAIN FILE: EXCEPTION "+e); } try{ Instances test = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TEST"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TEST"); }catch(Exception e){ System.out.println("UNABLE TO LOAD :"+prob+" TEST FILE: EXCEPTION "+e); } } } public static void formatPhilData(){ Instances multi=DatasetLoading.loadDataNullable("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\FinalMulti"); Instances trans=MultivariateInstanceTools.transposeRelationalData(multi); // double[][] rawData= // Instances temp=DatasetLoading.loadDataNullable("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\FinalUni"); // System.out.println(" Uni: num cases "+temp.numInstances()+" num atts ="+temp.numAttributes()); // Instances mtsc=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,30); OutFile out=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports.arff"); out.writeString(trans.toString()); Instances test=DatasetLoading.loadDataNullable("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports.arff"); System.out.println("New data = "+test); Instances[] split=InstanceTools.resampleInstances(test, 0, 0.5); OutFile train=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports\\RacketSports_TRAIN.arff"); train.writeString(split[0].toString()); OutFile testF=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports\\RacketSports_TEST.arff"); testF.writeString(split[1].toString()); } public static void splitData(String path,String prob){ Instances all=DatasetLoading.loadDataNullable(path+prob+"\\"+prob); Instances[] split=InstanceTools.resampleInstances(all, 0, 0.5); OutFile out=new OutFile(path+prob+"\\"+prob+"_TRAIN.arff"); out.writeLine(split[0].toString()); out=new OutFile(path+prob+"\\"+prob+"_TEST.arff"); out.writeLine(split[1].toString()); } public static void formatDuckDuckGeese(){ String path="Z:\\Data\\MultivariateTSCProblems\\DuckDuckGeese\\"; Instances data=DatasetLoading.loadDataNullable(path+"DuckDuckGeese"); Instance temp=data.instance(0); Instances x= temp.relationalValue(0); System.out.println("train number of dimensions "+x.numInstances()); System.out.println("train number of attributes per dimension "+x.numAttributes()); Instances[] split= MultivariateInstanceTools.resampleMultivariateInstances(data, 0, 0.6); System.out.println("Train size ="+split[0].numInstances()); System.out.println("Test size ="+split[1].numInstances()); OutFile out=new OutFile(path+"DuckDuckGeese_TRAIN.arff"); out.writeString(split[0]+""); out=new OutFile(path+"DuckDuckGeese_TEST.arff"); out.writeString(split[1]+""); } public static void formatCricket(){ String path="Z:\\Data\\Multivariate Working Area\\Cricket\\"; Instances[] data=new Instances[6]; data[0]=DatasetLoading.loadDataNullable(path+"CricketXLeft.arff"); data[1]=DatasetLoading.loadDataNullable(path+"CricketYLeft.arff"); data[2]=DatasetLoading.loadDataNullable(path+"CricketZLeft.arff"); data[3]=DatasetLoading.loadDataNullable(path+"CricketXRight.arff"); data[4]=DatasetLoading.loadDataNullable(path+"CricketYRight.arff"); data[5]=DatasetLoading.loadDataNullable(path+"CricketZRight.arff"); Instances all=MultivariateInstanceTools.mergeToMultivariateInstances(data); OutFile out=new OutFile(path+"Cricket.arff"); System.out.println("Cricket number of instances ="+all.numInstances()); Instance temp=all.instance(0); Instances x= temp.relationalValue(0); System.out.println(" number of dimensions "+x.numInstances()); System.out.println(" number of attributes per dimension "+x.numAttributes()); out.writeString(all+""); Instances[] split= MultivariateInstanceTools.resampleMultivariateInstances(all, 0, 0.6); System.out.println("Train size ="+split[0].numInstances()); System.out.println("Test size ="+split[1].numInstances()); out=new OutFile(path+"Cricket_TRAIN.arff"); out.writeString(split[0]+""); out=new OutFile(path+"Cricket_TEST.arff"); out.writeString(split[1]+""); } public static void makeSingleDimensionFiles(String path, String[] probs,boolean overwrite){ for(String prob: probs){ System.out.println("Processing "+prob); if(prob.equals("InsectWingbeat")||prob.equals("FaceDetection")|| prob.equals("DuckDuckGeese")) continue; File f= new File(path+prob+"\\"+prob+"Dimension"+(1)+"_TRAIN.arff"); if(f.exists()&&!overwrite) continue; Instances train =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); Instances test =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); System.out.println("PROBLEM "+prob); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num attributes (should be 2!)="+train.numAttributes()); System.out.println("num classes="+train.numClasses()); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); System.out.println(" number of dimensions "+x.numInstances()); System.out.println(" number of attributes per dimension "+x.numAttributes()); Instances[] splitTest=MultivariateInstanceTools.splitMultivariateInstances(test); Instances[] splitTrain=MultivariateInstanceTools.splitMultivariateInstances(train); System.out.println(" Num split files ="+splitTest.length); for(int i=0;i<splitTrain.length;i++){ System.out.println("Number of test instances = "+splitTest[i].numInstances()); OutFile outTrain=new OutFile(path+prob+"\\"+prob+"Dimension"+(i+1)+"_TRAIN.arff"); outTrain.writeLine(splitTrain[i].toString()+""); OutFile outTest=new OutFile(path+prob+"\\"+prob+"Dimension"+(i+1)+"_TEST.arff"); outTest.writeLine(splitTest[i].toString()+""); } // System.out.println(" Object type ="+x); } } public static void checkSpeechMarks(){ String path="Z:\\Data\\MultivariateTSCProblems\\"; OutFile out=new OutFile("Z:\\Data\\MultivariateTSCProblems\\SummaryData.csv"); out.writeLine("problem,numTrainCases,numTestCases,numDimensions,seriesLength,numClasses"); for(String prob: DatasetLists.mtscProblems2018){ InFile[] split = new InFile[2]; split[0] =new InFile(path+prob+"\\"+prob+"_TRAIN.arff"); split[1] =new InFile(path+prob+"\\"+prob+"_TEST.arff"); //Ignore header for(InFile f:split){ String line=f.readLine(); while(!line.startsWith("@data")) line=f.readLine(); line=f.readLine(); while(line!=null && !line.contains("\"")) line=f.readLine(); if(line!=null){ System.out.println("Problem "+prob+" contains speech marks "+line); } } } } public static void removeSpeechMarks(){ String[] problems={"ERing","JapaneseVowels","PenDigits","SpokenArabicDigits"}; String path="Z:\\Data\\MultivariateTSCProblems\\"; String path2="Z:\\Data\\Temp\\"; for(String prob: problems){ InFile[] split = new InFile[2]; split[0] =new InFile(path+prob+"\\"+prob+"_TRAIN.arff"); split[1] =new InFile(path+prob+"\\"+prob+"_TEST.arff"); OutFile[] split2 = new OutFile[2]; File file= new File(path2+prob); file.mkdirs(); split2[0] =new OutFile(path2+prob+"\\"+prob+"_TRAIN.arff"); split2[1] =new OutFile(path2+prob+"\\"+prob+"_TEST.arff"); //Ignore header for(int i=0;i<split.length;i++){ String line=split[i].readLine(); while(!line.startsWith("@data")){ split2[i].writeLine(line); line=split[i].readLine(); } split2[i].writeLine(line); line=split[i].readLine(); while(line!=null){ String replaceString=line.replace("\"","'"); split2[i].writeLine(replaceString); line=split[i].readLine(); } if(line!=null){ System.out.println("SHOULD NOT GET HERE!"); } } } } public static void summariseMultivariateData(){ String path="Z:\\Data\\MultivariateTSCProblems\\"; OutFile out=new OutFile("Z:\\Data\\MultivariateTSCProblems\\SummaryData.csv"); out.writeLine("problem,numTrainCases,numTestCases,numDimensions,seriesLength,numClasses"); for(int i=0;i<DatasetLists.mtscProblems2018.length;i++){ String prob=DatasetLists.mtscProblems2018[i]; System.out.println("PROBLEM "+prob); Instances train =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); Instances test =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num attributes (should be 2!)="+train.numAttributes()); System.out.println("num classes="+train.numClasses()); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); System.out.println(" number of dimensions "+x.numInstances()); System.out.println(" number of attributes per dimension "+x.numAttributes()); out.writeLine(prob+","+train.numInstances()+","+test.numInstances()+","+x.numInstances()+","+x.numAttributes()+","+train.numClasses()); // System.out.println(" Object type ="+x); } } public static void summariseUnivariateData(String path){ // String path="Z:\\Data\\TSCProblems2018\\"; OutFile out=new OutFile(path+"SummaryData.csv"); out.writeLine("problem,numTrainCases,numTestCases,seriesLength,numClasses"); for(int i=0;i<DatasetLists.tscProblems2018.length;i++){ String prob=DatasetLists.tscProblems2018[i]; try{ System.out.println("PROBLEM "+prob); Instances train =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); Instances test =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num attributes ="+(train.numAttributes()-1)); System.out.println("num classes="+train.numClasses()); out.writeLine(prob+","+train.numInstances()+","+test.numInstances()+","+(train.numAttributes()-1)+","+train.numClasses()); }catch(Exception e){ System.out.println("ERROR loading file "+prob); } // System.out.println(" Object type ="+x); } } public static void testSimpleClassifier() throws Exception{ String path="Z:\\Data\\MultivariateTSCProblems\\"; for(int i=15;i<DatasetLists.mtscProblems2018.length;i++){ String prob=DatasetLists.mtscProblems2018[i]; System.out.println("PROBLEM "+prob); Instances train =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); Instances test =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num attributes (should be 2!)="+train.numAttributes()); System.out.println("num classes="+train.numClasses()); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); System.out.println(" number of dimensions "+x.numInstances()); System.out.println(" number of attributes per dimension "+x.numAttributes()); NN_ED_I nb = new NN_ED_I(); nb.buildClassifier(train); double a=ClassifierTools.accuracy(test, nb); System.out.println("Problem ="+prob+" 1-NN ED accuracy ="+a); } } //1. Format into a standard flat ARFF, then make into a multivariate problem. BCI II data set ia public static void formatSelfRegulationSCP1() throws Exception { String path="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 2\\Data Set 1a\\"; InFile class1=new InFile(path+"Traindata_0.txt"); InFile class2=new InFile(path+"Traindata_1.txt"); OutFile arff=new OutFile(path+"SelfRegulationSCPUni_TRAIN.arff"); int numC1=135; int numC2=133; int d=6; int m=896; arff.writeLine("@relation SelfRegulationSCP1"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<numC1;i++){ String line=class1.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("negativity"); } for(int i=0;i<numC2;i++){ String line=class2.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("positivity"); } arff.closeFile(); Instances temp=DatasetLoading.loadDataNullable(path+"SelfRegulationSCP1Uni_TRAIN.arff"); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,896); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP1_TRAIN.arff"); arff.writeLine(multi.toString()); int testSize=293; InFile test=new InFile(path+"TestData.txt"); arff=new OutFile(path+"SelfRegulationSCP1Uni_TEST.arff"); arff.writeLine("@relation SelfRegulationSCP1"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<testSize;i++){ String line=test.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); if(split[0].equals("0.00")) arff.writeLine("negativity"); else arff.writeLine("positivity"); } temp=DatasetLoading.loadDataNullable(path+"SelfRegulationSCPUni_TEST.arff"); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,896); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP1_TEST.arff"); arff.writeLine(multi.toString()); } //1. Format into a standard flat ARFF, then make into a multivariate problem. BCI II data set ib public static void formatSelfRegulationSCP2() throws Exception { String path="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 2\\Data Set 1b\\"; InFile class1=new InFile(path+"Traindata_0.txt"); InFile class2=new InFile(path+"Traindata_1.txt"); OutFile arff=new OutFile(path+"SelfRegulationSCP2Uni_TRAIN.arff"); int numC1=100; int numC2=100; int d=7; int m=1152; arff.writeLine("@relation SelfRegulationSCP2"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<numC1;i++){ String line=class1.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("negativity"); } for(int i=0;i<numC2;i++){ String line=class2.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("positivity"); } arff.closeFile(); Instances temp=DatasetLoading.loadDataNullable(path+"SelfRegulationSCP2Uni_TRAIN.arff"); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP2_TRAIN.arff"); arff.writeLine(multi.toString()); int testSize=180; InFile test=new InFile(path+"TestData.txt"); arff=new OutFile(path+"SelfRegulationSCP2Uni_TEST.arff"); arff.writeLine("@relation SelfRegulationSCP2"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<testSize;i++){ String line=test.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); if(split[0].equals("0.00")) arff.writeLine("negativity"); else arff.writeLine("positivity"); } temp=DatasetLoading.loadDataNullable(path+"SelfRegulationSCP2Uni_TEST.arff"); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP2_TEST.arff"); arff.writeLine(multi.toString()); } //1. Format into a standard flat ARFF, then make into a multivariate problem. BCI II data set IV public static void formatFingerMovements() throws Exception { String path="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 2\\Data Set IV\\"; InFile train=new InFile(path+"sp1s_aa_train.txt"); OutFile arffTrain=new OutFile(path+"FingerMovementsUni_TRAIN.arff"); int d=28; int m=50; int trainSize=316; int testSize=100; arffTrain.writeLine("@relation FingerMovements"); for(int i=1;i<=d*m;i++) arffTrain.writeLine("@attribute att"+i+" real"); arffTrain.writeLine("@attribute hand {left,right}"); arffTrain.writeLine("@data"); for(int i=0;i<trainSize;i++){ String line=train.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arffTrain.writeString(split[j]+","); if(split[0].equals("0.00")) arffTrain.writeLine("left"); else arffTrain.writeLine("right"); } Instances temp=DatasetLoading.loadDataNullable(path+"FingerMovementsUni_TRAIN.arff"); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTrain=new OutFile(path+"FingerMovements_TRAIN.arff"); arffTrain.writeLine(multi.toString()); InFile test=new InFile(path+"sp1s_aa_test.txt"); OutFile arffTest=new OutFile(path+"FingerMovementsUni_TEST.arff"); arffTest.writeLine("@relation FingerMovements"); for(int i=1;i<=d*m;i++) arffTest.writeLine("@attribute att"+i+" real"); arffTest.writeLine("@attribute hand {left,right}"); arffTest.writeLine("@data"); for(int i=0;i<testSize;i++){ String line=test.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arffTest.writeString(split[j]+","); if(split[0].equals("0.00")) arffTest.writeLine("left"); else arffTest.writeLine("right"); } temp=DatasetLoading.loadDataNullable(path+"FingerMovementsUni_TEST.arff"); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTrain=new OutFile(path+"FingerMovements_TEST.arff"); arffTrain.writeLine(multi.toString()); } public static void formatCharacterTrajectories() throws Exception { //#classes= 20, d=3, length=109-205, train 6600, test 2200 InFile train = new InFile(""); InFile test = new InFile(""); OutFile trainarff = new OutFile(""); OutFile testarff = new OutFile(""); String line=train.readLine(); while(line!=null){ // String[] split } } //BCI 3 Dataset 1 public static void formatMotorImagery(){ //Each channel is on a different line in the text file. //Labels in a separate text file int m=3000; int d=64; int trainSize=278; int testSize=100; InFile trainCSV=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Competition_train_cnt.csv"); InFile testCSV=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Competition_test_cnt.csv"); InFile trainLabels=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Competition_train_lab.txt"); InFile testLabels=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Test Set Labels.txt"); String arffP="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImageryUni_TRAIN.arff"; String arffP2="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImageryUni_TEST.arff"; OutFile arffTrain=new OutFile(arffP); arffTrain.writeLine("@relation MotorImagery"); for(int i=1;i<=d*m;i++) arffTrain.writeLine("@attribute att"+i+" real"); arffTrain.writeLine("@attribute motion{finger,tongue}"); arffTrain.writeLine("@data"); for(int i=0;i<trainSize;i++){ for(int j=0;j<d;j++) arffTrain.writeString(trainCSV.readLine()+","); int label=trainLabels.readInt(); if(label==-1) arffTrain.writeLine("finger"); else arffTrain.writeLine("tongue"); } arffTrain.closeFile(); Instances tr=DatasetLoading.loadDataNullable(arffP); System.out.println("Num instances ="+tr.numInstances()+" num atts ="+tr.numAttributes()); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(tr,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTrain=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImagery_TRAIN.arff"); arffTrain.writeLine(multi.toString()); OutFile arffTest=new OutFile(arffP2); arffTest.writeLine("@relation MotorImagery"); for(int i=1;i<=d*m;i++) arffTest.writeLine("@attribute att"+i+" real"); arffTest.writeLine("@attribute motion{finger,tongue}"); arffTest.writeLine("@data"); for(int i=0;i<testSize;i++){ for(int j=0;j<d;j++) arffTest.writeString(testCSV.readLine()+","); int label=testLabels.readInt(); if(label==-1) arffTest.writeLine("finger"); else arffTest.writeLine("tongue"); } arffTest.closeFile(); Instances te=DatasetLoading.loadDataNullable(arffP2); System.out.println("Num instances ="+te.numInstances()+" num atts ="+te.numAttributes()); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(te,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTest=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImagery_TEST.arff"); arffTest.writeLine(multi.toString()); System.out.println("TEST Num instances ="+te.numInstances()+" num atts ="+te.numAttributes()); } public static void formatERing(){ InFile inf= new InFile("C:\\temp\\ERingTest.csv"); InFile labs= new InFile("C:\\temp\\ERingTestLabels.csv"); OutFile outf=new OutFile("C:\\temp\\ERing_TEST.arff"); int[] labels=new int[270]; int dims=4; for(int i=0;i<270;i++){ labels[i]=labs.readInt(); outf.writeString("'"); for(int j=0;j<dims-1;j++){ String temp=inf.readLine(); System.out.println(temp); outf.writeString(temp+" \\n "); } String temp=inf.readLine(); outf.writeLine(temp+"',"+labels[i]); } inf= new InFile("C:\\temp\\ERingTrain.csv"); labs= new InFile("C:\\temp\\ERingTrainLabels.csv"); outf=new OutFile("C:\\temp\\ERing_TRAIN.arff"); labels=new int[30]; for(int i=0;i<30;i++){ labels[i]=labs.readInt(); outf.writeString("'"); for(int j=0;j<dims-1;j++){ String temp=inf.readLine(); System.out.println(temp); outf.writeLine(temp); } String temp=inf.readLine(); outf.writeLine(temp+"',"+labels[i]); } } public static void makeTSFormatFilesForUnivariate(String[] problems, String path, String outPath){ boolean overwrite = true; for(int i=0;i<problems.length;i++){ //Check if they already exist String prob=problems[i]; System.out.println("PROBLEM "+prob); Instances[] split= new Instances[2]; split[0] =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); split[1] =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); boolean univariate =true; for(int j=0;j<split[0].numAttributes()&& univariate; j++) if(split[0].attribute(j).isRelationValued()) univariate=false; if(!univariate){ System.out.println("Problem "+prob+" is multivariate, call makeTSFormatFilesForMultivariate instead"); continue; } OutFile[] tsFormat= new OutFile[2]; File dir=new File(outPath+prob); if(!dir.isDirectory()){ dir.mkdirs(); } else if(!overwrite) { File f1=new File(outPath+prob+"\\"+prob+"_TEST.ts"); File f2=new File(outPath+prob+"\\"+prob+"_TRAIN.ts"); if(f1.exists() || f2.exists()){ System.out.println("Problem "+prob+ " already formatted, skipping"); continue; } } tsFormat[0]=new OutFile(outPath+prob+"\\"+prob+"_TRAIN.ts"); tsFormat[1]=new OutFile(outPath+prob+"\\"+prob+"_TEST.ts"); File f= new File(path+prob+"\\"+prob+".txt"); if(!f.exists()) System.out.println("ERROR cannot locate header "+path+prob+"\\"+prob+".txt"); InFile comment= new InFile(path+prob+"\\"+prob+".txt"); //Insert comment first String line=comment.readLine(); while(line!=null){ if(tsFormat[0]!=null) tsFormat[0].writeLine("#"+line); if(tsFormat[1]!=null) tsFormat[1].writeLine("#"+line); line=comment.readLine(); } boolean padded=false; //Assume padded if last value is missing for(int j=0;j<split.length && !padded;j++){ for(int k=0;k< split[j].numInstances()&& !padded; k++){ if(split[j].instance(k).isMissing(split[j].numAttributes()-2)){ padded=true; System.out.println("Problem "+prob+" is padded for instance "+k+ " for split "+j); System.out.println("Value = "+split[j].instance(k).value(split[j].numAttributes()-2)+" Previous value is "+split[j].instance(k).value(split[j].numAttributes()-3)); } } } boolean missing =false; if(!padded){ //Search the whole series for(int j=0;j<split.length && !missing;j++){ for(int k=0;k< split[j].numInstances() && !missing; k++){ if(split[j].instance(k).hasMissingValue()) missing=true; } } }else{//, only search upto the last value present for(int j=0;j<split.length && !missing;j++){ for(int k=0;k< split[j].numInstances() && !missing; k++){ //Find last value not missing int end=split[j].numAttributes()-2; while( end>=0 && split[j].instance(k).isMissing(end)) end--; for(int m=0;m<=end;m++){ if(split[j].instance(k).isMissing(m)){ missing=true; System.out.println("Case "+k+" missing value pos ="+m+" end = "+end); continue; } } } } } System.out.println("Missing = "+missing); for(int j=0;j<tsFormat.length;j++){ tsFormat[j].writeLine("@problemName "+prob); tsFormat[j].writeLine("@timeStamps false"); tsFormat[j].writeLine("@missing "+missing); tsFormat[j].writeLine("@univariate "+univariate); tsFormat[j].writeLine("@equalLength "+!padded); if(!padded) tsFormat[j].writeLine("@seriesLength "+(split[j].instance(0).numAttributes()-1)); tsFormat[j].writeString("@classLabel true"); Attribute cv= split[j].classAttribute(); // Print the values of "position" Enumeration attValues = cv.enumerateValues(); while (attValues.hasMoreElements()) { String string = (String)attValues.nextElement(); tsFormat[j].writeString(" "+string); } tsFormat[j].writeLine("\n@data"); System.out.println("Finished meta data"); for(Instance ins:split[j]){ double[] data=ins.toDoubleArray(); //Find end if padded int end=data.length-2; if(padded){ while( end>=0 && Double.isNaN(data[end])) end--; } // System.out.println("End of series = "+end); if(Double.isNaN(data[0])) tsFormat[j].writeString("?"); else tsFormat[j].writeString(""+data[0]); for(int k=1;k<=end;k++){ if(Double.isNaN(data[k])) tsFormat[j].writeString(",?"); else tsFormat[j].writeString(","+data[k]); } int classV=(int)data[data.length-1]; String clsString=cv.value(classV); tsFormat[j].writeLine(":"+clsString); } } } } public static void makeTSFormatFilesForResamples(String[] problems, String path, String outPath){ boolean overwrite = true; for(int i=0;i<problems.length;i++){ //Check if they already exist String prob=problems[i]; System.out.println("PROBLEM "+prob); for(int fold=0;fold<30;fold++){ Instances[] split= new Instances[2]; split[0] =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+fold+"_TRAIN"); split[1] =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+fold+"_TEST"); boolean univariate =true; for(int j=0;j<split[0].numAttributes()&& univariate; j++) if(split[0].attribute(j).isRelationValued()) univariate=false; if(!univariate){ System.out.println("Problem "+prob+" is multivariate, call makeTSFormatFilesForMultivariate intead"); continue; } OutFile[] tsFormat= new OutFile[2]; File dir=new File(outPath+prob); if(!dir.isDirectory()){ dir.mkdirs(); } else if(!overwrite) { File f1=new File(outPath+prob+"\\"+prob+fold+"_TEST.ts"); File f2=new File(outPath+prob+"\\"+prob+fold+"_TRAIN.ts"); if(f1.exists() || f2.exists()){ System.out.println("Problem "+prob+ " already formatted, skipping"); continue; } } tsFormat[0]=new OutFile(outPath+prob+"\\"+prob+fold+"_TRAIN.ts"); tsFormat[1]=new OutFile(outPath+prob+"\\"+prob+fold+"_TEST.ts"); File f= new File(path+prob+"\\"+prob+".txt"); if(!f.exists()) System.out.println("ERROR cannot locate header "+path+prob+"\\"+prob+".txt"); InFile comment= new InFile(path+prob+"\\"+prob+".txt"); //Insert comment first String line=comment.readLine(); while(line!=null){ if(tsFormat[0]!=null) tsFormat[0].writeLine("#"+line); if(tsFormat[1]!=null) tsFormat[1].writeLine("#"+line); line=comment.readLine(); } boolean padded=false; //Assume padded if last value is missing for(int j=0;j<split.length && !padded;j++){ for(int k=0;k< split[j].numInstances()&& !padded; k++){ if(split[j].instance(k).isMissing(split[j].numAttributes()-2)){ padded=true; System.out.println("Problem "+prob+" is padded for instance "+k+ " for split "+j); System.out.println("Value = "+split[j].instance(k).value(split[j].numAttributes()-2)+" Previous value is "+split[j].instance(k).value(split[j].numAttributes()-3)); } } } boolean missing =false; if(!padded){ //Search the whole series for(int j=0;j<split.length && !missing;j++){ for(int k=0;k< split[j].numInstances() && !missing; k++){ if(split[j].instance(k).hasMissingValue()) missing=true; } } }else{//, only search upto the last value present for(int j=0;j<split.length && !missing;j++){ for(int k=0;k< split[j].numInstances() && !missing; k++){ //Find last value not missing int end=split[j].numAttributes()-2; while( end>=0 && split[j].instance(k).isMissing(end)) end--; for(int m=0;m<=end;m++){ if(split[j].instance(k).isMissing(m)){ missing=true; System.out.println("Case "+k+" missing value pos ="+m+" end = "+end); continue; } } } } } for(int j=0;j<tsFormat.length;j++){ tsFormat[j].writeLine("@problemName "+prob); tsFormat[j].writeLine("@timeStamps false"); tsFormat[j].writeLine("@missing "+missing); tsFormat[j].writeLine("@univariate "+univariate); tsFormat[j].writeLine("@equalLength "+!padded); if(!padded) tsFormat[j].writeLine("@seriesLength "+(split[j].instance(0).numAttributes()-1)); tsFormat[j].writeString("@classLabel true"); Attribute cv= split[j].classAttribute(); // Print the values of "position" Enumeration attValues = cv.enumerateValues(); while (attValues.hasMoreElements()) { String string = (String)attValues.nextElement(); tsFormat[j].writeString(" "+string); } tsFormat[j].writeLine("\n@data"); for(Instance ins:split[j]){ double[] data=ins.toDoubleArray(); //Find end if padded int end=data.length-2; if(padded){ while( end>=0 && Double.isNaN(data[end])) end--; } // System.out.println("End of series = "+end); if(Double.isNaN(data[0])) tsFormat[j].writeString("?"); else tsFormat[j].writeString(""+data[0]); for(int k=1;k<=end;k++){ if(Double.isNaN(data[k])) tsFormat[j].writeString(",?"); else tsFormat[j].writeString(","+data[k]); } int classV=(int)data[data.length-1]; String clsString=cv.value(classV); tsFormat[j].writeLine(":"+clsString); } } } } } public static void makeTSFormatFilesForMultivariate(String[] problems, String path, String outPath, boolean overwrite){ for(int i=0;i<problems.length;i++){ //Check if they already exist System.out.println("Problem = "+problems[i]); String prob=problems[i]; File dir=new File(outPath+prob); if(!dir.isDirectory()){ dir.mkdirs(); } else if(!overwrite) { File f1=new File(outPath+prob+"\\"+prob+"_TEST.ts"); File f2=new File(outPath+prob+"\\"+prob+"_TRAIN.ts"); if(f1.exists() || f2.exists()){ System.out.println("Problem "+prob+ " already formatted, skipping"); continue; } } Instances[] split= new Instances[2]; split[0] =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); split[1] =DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); int dimensions=split[0].instance(0).relationalValue(0).numInstances(); boolean univariate =false; for(int j=0;j<split[0].numAttributes()-1&& !univariate; j++) if(!split[0].attribute(j).isRelationValued()) univariate=true; if(univariate){ System.out.println("Problem "+prob+" is univariate, call makeTSFormatFilesForMultivariate intead. Skipping this one"); continue; } System.out.println("PROBLEM "+prob+" has dimension "+dimensions); OutFile[] tsFormat= new OutFile[2]; tsFormat[0]=new OutFile(outPath+prob+"\\"+prob+"_TRAIN.ts"); tsFormat[1]=new OutFile(outPath+prob+"\\"+prob+"_TEST.ts"); File f= new File(path+prob+"\\"+prob+".txt"); if(!f.exists()) System.out.println("ERROR cannot locate header "+path+prob+"\\"+prob+".txt"); InFile comment= new InFile(path+prob+"\\"+prob+".txt"); //Insert comment first String line=comment.readLine(); while(line!=null){ if(tsFormat[0]!=null) tsFormat[0].writeLine("#"+line); if(tsFormat[1]!=null) tsFormat[1].writeLine("#"+line); line=comment.readLine(); } //Find if padded with missing values boolean padded=false; //Assume padded if last value is missing for(int j=0;j<split.length && !padded;j++){ for(int k=0;k< split[j].numInstances()&& !padded; k++){ Instances d=split[j].instance(k).relationalValue(0); for(Instance ins:d){ if(ins.isMissing(d.numAttributes()-1)){ padded=true; System.out.println("Problem "+prob+" is padded for instance "+k+ " for split "+j); System.out.println("Split: "+j+" Case: "+k+" Value = "+ins.value(d.numAttributes()-1)); System.out.println("INSTANCE = "+ins); break; } } } } int seriesLength=0; boolean missing =false; if(!padded){ //Search the whole series for(int j=0;j<split.length && !missing;j++){ for(int k=0;k< split[j].numInstances() && !missing; k++){ Instances d=split[j].instance(k).relationalValue(0); if(InstanceTools.hasMissing(d)) missing=true; } } seriesLength=split[0].instance(0).relationalValue(0).numAttributes(); }else{//, only search upto the last value present for(int j=0;j<split.length && !missing;j++){ for(int k=0;k< split[j].numInstances() && !missing; k++){ Instances d=split[j].instance(k).relationalValue(0); for(int m=0;m<d.numInstances()&&!missing;m++){ int end=d.numAttributes()-1; while( end>=0 && d.instance(m).isMissing(end)) end--; for(int n=0;n<=end;n++){ if(d.instance(m).isMissing(n)){ missing=true; System.out.println("Case "+k+" has missing value in dimension ="+m+" position = "+n+ "series length = "+end); continue; } } } } } } System.out.println("Missing = "+missing); for(int j=0;j<tsFormat.length;j++){ tsFormat[j].writeLine("@problemName "+prob); tsFormat[j].writeLine("@timeStamps false"); tsFormat[j].writeLine("@missing "+missing); tsFormat[j].writeLine("@univariate "+univariate); tsFormat[j].writeLine("@dimensions "+dimensions); tsFormat[j].writeLine("@equalLength "+!padded); if(!padded) tsFormat[j].writeLine("@seriesLength "+seriesLength); tsFormat[j].writeString("@classLabel true"); Attribute cv= split[j].classAttribute(); // Print the values of "position" Enumeration attValues = cv.enumerateValues(); while (attValues.hasMoreElements()) { String string = (String)attValues.nextElement(); tsFormat[j].writeString(" "+string); } tsFormat[j].writeLine("\n@data"); for(Instance ins:split[j]){ Instances dim=ins.relationalValue(0); //Find end if padded for(Instance d:dim){ int end=dim.numAttributes()-1; double[] data=d.toDoubleArray(); if(padded){ while( end>=0 && Double.isNaN(data[end])) end--; } if(Double.isNaN(data[0])) tsFormat[j].writeString("?"); else tsFormat[j].writeString(""+data[0]); for(int k=1;k<=end;k++){ if(Double.isNaN(data[k])) tsFormat[j].writeString(",?"); else tsFormat[j].writeString(","+data[k]); } tsFormat[j].writeString(":"); } int classV=(int)ins.classValue(); String clsString=cv.value(classV); tsFormat[j].writeLine(clsString); } } } } //<editor-fold defaultstate="collapsed" desc="Multivariate TSC datasets 2018 release"> public static String[] mtscProblems2018={ "InsectWingbeat",//15 // "KickVsPunch", Poorly formatted and very small train size "JapaneseVowels", "Libras", "LSST", "MotorImagery", "NATOPS",//20 "PenDigits", "PEMS-SF", "PhonemeSpectra", "RacketSports", "SelfRegulationSCP1",//25 "SelfRegulationSCP2", "SpokenArabicDigits", "StandWalkJump", "UWaveGestureLibrary" }; //</editor-fold> public static void main(String[] args) throws Exception { generateAllResamplesInARFF(); System.exit(0); String[] paths={"E:\\ArchiveData\\Multivariate_arff\\","E:\\ArchiveData\\Multivariate_ts\\"}; String dest="E:\\ArchiveData\\Zips_Multivariate\\"; String[] probs={"AsphaltObstaclesCoordinates","AsphaltPavementTypeCoordinates","AsphaltRegularityCoordinates"}; // makeZips(probs,dest,paths); paths=new String[]{"E:\\ArchiveData\\Univariate_arff\\","E:\\ArchiveData\\Univariate_ts\\"}; dest="E:\\ArchiveData\\Zips_Univariate\\"; probs=DatasetLists.tscProblems2018; // makeZips(probs,dest,paths); makeTSFormatFilesForResamples(probs,paths[0],paths[1]); System.exit(0); // makeTSFormatFilesForUnivariate(probs,path,outPath); // path="E:/ArchiveData/Multivariate_arff/"; // outPath="E:/ArchiveData/Multivariate_ts/"; // makeTSFormatFilesForMultivariate(probs,path,outPath,true); System.exit(0); // checkAllZipFiles(); // summariseUnivariateData("E:\\ShapeletPaper\\Version 2\\Transforms\\ShapeletTransform1\\Transforms\\"); summariseUnivariateData("E:\\ShapeletPaper\\Version 2\\Transforms\\ShapeletTransform10\\Transforms\\"); // summariseUnivariateData("Z:\\Data\\TSCProblems2018\\"); makeAllZipFiles(); //formatERing(); // String path="C:\\temp\\"; // Instances test = DatasetLoading.loadDataNullable(path+"ERing_TEST"); // Instances train = DatasetLoading.loadDataNullable(path+"ERing_TRAIN"); // Instances train2 = DatasetLoading.loadDataNullable(path+"ERing_TRAIN2"); //insertNewLineSpaces(); //checkSpeechMarks(); //removeSpeechMarks(); //System.out.println("Summarise data "); testSimpleClassifier(); // summariseData(); // formatDuckDuckGeese(); // formatCricket(); summariseMultivariateData(); debugFormat(); makeConcatenatedFiles(); /* String prob="UWaveGestureLibrary"; String dest="Z:\\Data\\UnivariateMTSC\\"; String path="Z:\\Data\\Multivariate TSC Problems\\"; Instances test = DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); Instances train = DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); Instances test2 = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TEST"); Instances train2 = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TRAIN"); */ // summariseData(); // System.exit(0); // debugFormat(); // makeUnivariateFiles(); // String prob="UWaveGestureLibrary"; // String dest="Z:\\Data\\UnivariateMTSC\\"; // String path="Z:\\Data\\Multivariate TSC Problems\\"; // Instances test = DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TEST"); // Instances train = DatasetLoading.loadDataNullable(path+prob+"\\"+prob+"_TRAIN"); // // Instances test2 = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TEST"); // Instances train2 = DatasetLoading.loadDataNullable(dest+prob+"\\"+prob+"_TRAIN"); // // // // checkUnivariateFiles(); //// formatMotorImagery(); // // formatFingerMovements(); // //formatSelfRegulationSCP1(); // // formatSelfRegulationSCP2(); // // formatPhilData(); // // splitData("\\\\cmptscsvr.cmp.uea.ac.uk\\ueatsc\\Data\\Multivariate Working Area\\Michael_Unfinalised\\","Phoneme"); // // summariseData(); // // //gettingStarted(); // // mergeEpilepsy(); //Instances[] data = convertToUnivariate("C:/UEAMachineLearning/Datasets/Kaggle/PLAsTiCCAstronomicalClassification/", "C:/UEAMachineLearning/Datasets/Kaggle/PLAsTiCCAstronomicalClassification/", "LSSTTrain"); //System.out.println(data[0]); //System.out.println(data[1]); } public static void debugFormat(){ // ECGActivities Instances train,test; train=DatasetLoading.loadDataNullable("Z:\\Data\\MultivariateTSCProblems\\ECGActivities\\ECGActivities_TRAIN"); test=DatasetLoading.loadDataNullable("Z:\\Data\\MultivariateTSCProblems\\ECGActivities\\ECGActivities_TEST"); // Instances[] split=InstanceTools.resampleTrainAndTestInstances(train, test, 1); Instances[] split=MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances(train, test, 1); System.out.println("IS it relational ? "+split[0].checkForAttributeType(Attribute.RELATIONAL)); System.out.println("IS it relational ? "+split[0].checkForAttributeType(Attribute.RELATIONAL)); System.out.println("Fold 1 TRAIN num instances "+split[0].numInstances()+" Num atts ="+(split[0].numAttributes()-1)); // System.out.println(split[0]+""); System.out.println("Fold 1 TRAIN instance 1 num dimensions "+split[0].instance(0).relationalValue(0).numInstances()+" series length "+split[0].instance(0).relationalValue(0).numAttributes()); for(Instance ins:split[0]) System.out.println("Fold TRAIN instance num dimensions "+ins.relationalValue(0).numInstances()+" series length "+ins.relationalValue(0).numAttributes()); } public static void mergeEpilepsy(){ Instances x,y,z; Instances all; String sourcePath="C:\\Users\\ajb\\Dropbox\\TSC Problems\\EpilepsyX\\"; String destPath="C:\\Users\\ajb\\Dropbox\\Multivariate TSC Problems\\HAR\\Epilepsy\\"; x=DatasetLoading.loadDataNullable(sourcePath+"EpilepsyX_ALL"); y=DatasetLoading.loadDataNullable(sourcePath+"EpilepsyY_ALL"); z=DatasetLoading.loadDataNullable(sourcePath+"EpilepsyZ_ALL"); //Delete the use ID, will reinsert manually after x.deleteAttributeAt(0); y.deleteAttributeAt(0); z.deleteAttributeAt(0); all=utilities.multivariate_tools.MultivariateInstanceTools.mergeToMultivariateInstances(new Instances[]{x,y,z}); // OutFile out=new OutFile(destPath+"EpilepsyNew.arff"); // out.writeString(all.toString()); //Create train test splits so participant 1,2,3 in train and 4,5,6 in test int trainSize=149; int testSize=126; Instances train= new Instances(all,0); Instances test= new Instances(all); for(int i=0;i<trainSize;i++){ Instance t= test.remove(0); train.add(t); } OutFile tr=new OutFile(destPath+"Epilepsy_TRAIN.arff"); OutFile te=new OutFile(destPath+"Epilepsy_TEST.arff"); tr.writeString(train.toString()); te.writeString(test.toString()); } /**A getting started with relational attributes in Weka. Once you have the basics * there are a range of tools for manipulating them in * package utilities.multivariate_tools * * See https://weka.wikispaces.com/Multi-instance+classification * for more * */ public static void gettingStarted(){ //Load a multivariate data set String path="\\\\cmptscsvr.cmp.uea.ac.uk\\ueatsc\\Data\\Multivariate\\univariateConcatExample"; Instances train = DatasetLoading.loadDataNullable(path); System.out.println(" univariate data = "+train); path="\\\\cmptscsvr.cmp.uea.ac.uk\\ueatsc\\Data\\Multivariate\\multivariateConcatExample"; train = DatasetLoading.loadDataNullable(path); System.out.println(" multivariate data = "+train); //Recover the first instance Instance first=train.instance(0); //Split into separate dimensions Instances split=first.relationalValue(0); System.out.println(" A single multivariate case split into 3 instances with no class values= "+split); for(Instance ins:split) System.out.println("Dimension of first case =" +ins); //Extract as arrays double[][] d = new double[split.numInstances()][]; for(int i=0;i<split.numInstances();i++) d[i]=split.instance(i).toDoubleArray(); } public static void generateAllResamplesInARFF(){ // String path = "C:\\Users\\ajb\\Dropbox\\Working docs\\Data Resample Debug\\Data\\"; String path = "Z:\\ArchiveData\\"; String[] datasets=DatasetLists.tscProblems2018; // datasets=new String[]{"Chinatown"}; for(String problem:datasets){ System.out.println("Generating folds for "+problem); Instances[] data = { null, null }; File trainFile = new File(path + problem + "/" + problem + "_TRAIN.arff"); File testFile = new File(path + problem + "/" + problem + "_TEST.arff"); boolean predefinedFold0Exists = (trainFile.exists() && testFile.exists()); if (predefinedFold0Exists) { Instances train = DatasetLoading.loadDataNullable(trainFile); Instances test = DatasetLoading.loadDataNullable(testFile); for(int fold =0;fold<=29;fold++){ data[0] = new Instances(train); //making absolutely sure no funny business happening data[1] = new Instances(test); if (train.checkForAttributeType(Attribute.RELATIONAL)) data = MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances(data[0], data[1], fold); else data = InstanceTools.resampleTrainAndTestInstances(data[0], data[1], fold); System.out.println(" instance 0 in fold "+fold+" train "+data[0].instance(0).toString()); //toString produces 'printing-friendly' 6 sig figures for doubles, using proper arffsaver now instead DatasetLoading.saveDataset(data[0], path + problem + "/" + problem + fold+"_TRAIN.arff"); DatasetLoading.saveDataset(data[1], path + problem + "/" + problem + fold+"_TEST.arff"); // //Save folds. // OutFile trainF=new OutFile(path + problem + "/" + problem + fold+"_TRAIN.arff"); // trainF.writeLine(data[0].toString()); // OutFile testF=new OutFile(path + problem + "/" + problem + fold+"_TEST.arff"); // testF.writeLine(data[1].toString()); } }else{ System.out.println("File does not exist on "+path); } } } }
81,835
46.304046
216
java
tsml-java
tsml-java-master/src/main/java/experiments/data/DatasetLists.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments.data; import fileIO.InFile; import fileIO.OutFile; import tsml.transformers.SummaryStats; import utilities.ClassifierTools; import weka.classifiers.Classifier; import weka.classifiers.lazy.IBk; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.TreeSet; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; /** * Class containing lists of data sets in the UCR and UEA archive. * @author ajb */ public class DatasetLists { //Slow problems for contract testing String[] slowProblems={ "StarLightCurves", "FaceAll", "UWaveGestureLibraryAll", "UWaveGestureLibraryX", "UWaveGestureLibraryZ", "UWaveGestureLibraryY", "HandOutlines", "ShapesAll", "PhalangesOutlinesCorrect", "Crop", "ElectricDevices", "FordA", "FordB", "NonInvasiveFetalECGThorax1", "NonInvasiveFetalECGThorax2" }; //TSC data sets added since 2018 release //<editor-fold defaultstate="collapsed" desc=" new univariate tsc Problems"> public static String[] newForHC2Paper= { "AllGestureWiimoteX", "AllGestureWiimoteY", "AllGestureWiimoteZ", "AsphaltObstacles", "AsphaltPavementType", "AsphaltRegularity", "Colposcopy", "ElectricDeviceDetection", "GestureMidAirD1", "GestureMidAirD2", "GestureMidAirD3", "GesturePebbleZ1", "GesturePebbleZ2", "MITBIH-Heartbeat", "PickupGestureWiimoteZ", "PLAID", "AconityMINIPrinterLarge", "AconityMINIPrinterSmall", "PyrometerLaserScanLarge", "PyrometerLaserScanSmall", "ShakeGestureWiimoteZ", "SharePriceIncrease" }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc=" new multivariate tsc Problems"> public static String[] newMultivariate= { "AsphaltObstaclesCoordinates", "AsphaltPavementTypeCoordinates", "AsphaltRegularityCoordinates", "EyesOpenShut", "CounterMovementJump", "Tiselac", }; //</editor-fold> /* public static String clusterPath="/gpfs/home/ajb/"; public static String dropboxPath="C:/Users/ajb/Dropbox/"; public static String beastPath="//cmptscsvr.cmp.uea.ac.uk/ueatsc/Data/"; public static String path=clusterPath; public static String problemPath=path+"/TSCProblems/"; public static String resultsPath=path+"Results/"; public static String uciPath=path+"UCIContinuous"; */ //Multivariate TSC data sets //<editor-fold defaultstate="collapsed" desc="Multivariate TSC datasets 2018 release"> public static String[] mtscProblems2018={ "ArticularyWordRecognition", //Index 0 "AsphaltObstaclesCoordinates", "AsphaltPavementTypeCoordinates", "AsphaltRegularityCoordinates", "AtrialFibrillation", "BasicMotions", "CharacterTrajectories", "Cricket", "DuckDuckGeese", "EigenWorms", "Epilepsy", "EthanolConcentration", "ERing", "FaceDetection", "FingerMovements", "HandMovementDirection", "Handwriting", "Heartbeat", "InsectWingbeat", // "KickVsPunch", Poorly formatted and very small train size "JapaneseVowels", "Libras", "LSST", "MotorImagery", "NATOPS", "PenDigits", "PEMS-SF", "PhonemeSpectra", "RacketSports", "SelfRegulationSCP1", "SelfRegulationSCP2", "SpokenArabicDigits", "StandWalkJump", "UWaveGestureLibrary" }; //</editor-fold> //TSC data sets for relaunch in 2018 //<editor-fold defaultstate="collapsed" desc="tsc Problems 2018 "> public static String[] tscProblems2018={ //Train Size, Test Size, Series Length, Nos Classes "ACSF1", "Adiac", // 390,391,176,37 "AllGestureWiimoteX", "AllGestureWiimoteY", "AllGestureWiimoteZ", "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "BME", "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "Chinatown", "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "Crop", "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxTW", // 400,139,80,6 "DodgerLoopDay", "DodgerLoopGame", "DodgerLoopWeekend", "Earthquakes", // 322,139,512,2 "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "ElectricDevices", // 8926,7711,96,7 "EOGHorizontalSignal", "EOGVerticalSignal", "EthanolLevel", "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "FreezerRegularTrain", "FreezerSmallTrain", "Fungi", "GestureMidAirD1", "GestureMidAirD2", "GestureMidAirD3", "GesturePebbleZ1", "GesturePebbleZ2", "GunPoint", // 50,150,150,2 "GunPointAgeSpan", "GunPointMaleVersusFemale", "GunPointOldVersusYoung", "Ham", //105,109,431 "HandOutlines", // 1000,370,2709,2 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "HouseTwenty", "InlineSkate", // 100,550,1882,7 "InsectEPGRegularTrain", "InsectEPGSmallTrain", "InsectWingbeatSound",//1980,220,256 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 "MelbournePedestrian", "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxTW", // 399,154,80,6 "MixedShapesRegularTrain", "MixedShapesSmallTrain", "MoteStrain", // 20,1252,84,2 "NonInvasiveFetalECGThorax1", // 1800,1965,750,42 "NonInvasiveFetalECGThorax2", // 1800,1965,750,42 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme",//1896,214, 1024 "PickupGestureWiimoteZ", "PigAirwayPressure", "PigArtPressure", "PigCVP", "PLAID", "Plane", // 105,105,144,7 "PowerCons", "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "Rock", "ScreenType", // 375,375,720,3 "SemgHandGenderCh2", "SemgHandMovementCh2", "SemgHandSubjectCh2", "ShakeGestureWiimoteZ", "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SmoothSubspace", "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "StarLightCurves", // 1000,8236,1024,3 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UMD", "UWaveGestureLibraryAll", // 896,3582,945,8 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga" // 300,3000,426,2 }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Variable length univariate datasets"> public static String[] variableLengthUnivariate ={ "AsphaltObstacles", "AsphaltPavementType", "AsphaltRegularity", "AllGestureWiimoteX", "AllGestureWiimoteY", "AllGestureWiimoteZ", "GestureMidAirD1", "GestureMidAirD2", "GestureMidAirD3", "GesturePebbleZ1", "GesturePebbleZ2", "PickupGestureWiimoteZ", "PLAID", "ShakeGestureWiimoteZ" }; //</editor-fold> int[][] minMaxUnivariate={ {11,385,2,369}, {8,369,2,385}, {33,326,2,385}, {80,360,80,360}, {80,360,80,360}, {80,360,80,360}, {115,455,100,375}, {100,455,145,345}, {29,361,37,324}, {100,1344,134,1000}, {41,385,40,369}}; //<editor-fold defaultstate="collapsed" desc="Variable length multivariate datasets"> public static String[] variableLengthMultivariate ={ "AsphaltObstaclesCoordinates", "AsphaltPavementTypeCoordinates", "AsphaltRegularityCoordinates", "CharacterTrajectories", "InsectWingbeat", "JapaneseVowels", "SpokenArabicDigits" }; //</editor-fold> int[][] minMaxMultivariate={ {96,1543,66,2370}, {95,4200,66,2371}, {60,180,61,181}, {2,21,2,21}, {7,26,7,28}, {4,92,7,83} }; //<editor-fold defaultstate="collapsed" desc="Fixed length multivariate datasets"> public static String[] fixedLengthMultivariate = { "ArticularyWordRecognition", //Index 0 "AtrialFibrillation", "BasicMotions", "Cricket", "DuckDuckGeese", "EigenWorms", "Epilepsy", "EthanolConcentration", "ERing", "FaceDetection", "FingerMovements", "HandMovementDirection", "Handwriting", "Heartbeat", "Libras", "LSST", "MotorImagery", "NATOPS", "PenDigits", "PEMS-SF", "PhonemeSpectra", "RacketSports", "SelfRegulationSCP1", "SelfRegulationSCP2", "StandWalkJump", "UWaveGestureLibrary" }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="missing value univariate datasets"> public static String[] missingValueUnivariate ={ "AllGestureWiimoteX", "AllGestureWiimoteY", "AllGestureWiimoteZ", "DodgerLoopDay", "DodgerLoopGame", "DodgerLoopWeekend", "GestureMidAirD1", "GestureMidAirD2", "GestureMidAirD3", "GesturePebbleZ1", "GesturePebbleZ2", "MelbournePedestrian", "PickupGestureWiimoteZ", "PLAID", "ShakeGestureWiimoteZ" }; //</editor-fold> //TSC data sets for bakeoff redux //<editor-fold defaultstate="collapsed" desc="tsc Problems 2018 "> public static String[] equalLengthProblems={ //Train Size, Test Size, Series Length, Nos Classes "ACSF1", "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "BME", "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "Chinatown", "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "Crop", "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "ElectricDevices", // 8926,7711,96,7 "EOGHorizontalSignal", "EOGVerticalSignal", "EthanolLevel", "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "FreezerRegularTrain", "FreezerSmallTrain", // "Fungi", removed because only one instance per class in train. This is a query problem "GunPoint", // 50,150,150,2 "GunPointAgeSpan", "GunPointMaleVersusFemale", "GunPointOldVersusYoung", "Ham", //105,109,431 "HandOutlines", // 1000,370,2709,2 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "HouseTwenty", "InlineSkate", // 100,550,1882,7 "InsectEPGRegularTrain", "InsectEPGSmallTrain", "InsectWingbeatSound",//1980,220,256 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxTW", // 399,154,80,6 "MixedShapesRegularTrain", "MixedShapesSmallTrain", "MoteStrain", // 20,1252,84,2 "NonInvasiveFetalECGThorax1", // 1800,1965,750,42 "NonInvasiveFetalECGThorax2", // 1800,1965,750,42 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme",//1896,214, 1024 "PigAirwayPressure", "PigArtPressure", "PigCVP", "Plane", // 105,105,144,7 "PowerCons", "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "Rock", "ScreenType", // 375,375,720,3 "SemgHandGenderCh2", "SemgHandMovementCh2", "SemgHandSubjectCh2", "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SmoothSubspace", "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "StarLightCurves", // 1000,8236,1024,3 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UMD", "UWaveGestureLibraryAll", // 896,3582,945,8 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga" // 300,3000,426,2 }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="tsc Problems new in 2018 release"> public static String[] newFor2018Problems={ "ACSF1", "AllGestureWiimoteX", "AllGestureWiimoteY", "AllGestureWiimoteZ", "BME", "Chinatown", "Crop", "DodgerLoopDay", "DodgerLoopGame", "DodgerLoopWeekend", "EOGHorizontalSignal", "EOGVerticalSignal", "EthanolLevel", "FreezerRegularTrain", "FreezerSmallTrain", "Fungi", "GestureMidAirD1", "GestureMidAirD2", "GestureMidAirD3", "GesturePebbleZ1", "GesturePebbleZ2", "GunPointAgeSpan", "GunPointMaleVersusFemale", "GunPointOldVersusYoung", "HouseTwenty", "InsectEPGRegularTrain", "InsectEPGSmallTrain", "MelbournePedestrian", "MixedShapesRegularTrain", "MixedShapesSmallTrain", "PickupGestureWiimoteZ", "PigAirwayPressure", "PigArtPressure", "PigCVP", "PLAID", "PowerCons", "Rock", "SemgHandGenderCh2", "SemgHandMovementCh2", "SemgHandSubjectCh2", "ShakeGestureWiimoteZ", "SmoothSubspace", "UMD", }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="tsc Problems new in 2018 release no missing"> public static String[] newFor2018Problems_noMissingValues={ "ACSF1", "BME", "Chinatown", "Crop", "EOGHorizontalSignal", "EOGVerticalSignal", "EthanolLevel", "FreezerRegularTrain", "FreezerSmallTrain", "GunPointAgeSpan", "GunPointMaleVersusFemale", "GunPointOldVersusYoung", "HouseTwenty", "InsectEPGRegularTrain", "InsectEPGSmallTrain", "MixedShapesRegularTrain", "MixedShapesSmallTrain", "PigAirwayPressure", "PigArtPressure", "PigCVP", "PowerCons", "Rock", "SemgHandGenderCh2", "SemgHandMovementCh2", "SemgHandSubjectCh2", "SmoothSubspace", "UMD", }; //</editor-fold> //TSC data sets before relaunch in 2018 //<editor-fold defaultstate="collapsed" desc="tsc Problems prior to relaunch in 2018 "> public static String[] tscProblems2017={ "AALTDChallenge", "Acsf1", //Train Size, Test Size, Series Length, Nos Classes //Train Size, Test Size, Series Length, Nos Classes "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "ElectricDevices", // 8926,7711,96,7 "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "GunPoint", // 50,150,150,2 "Ham", //105,109,431 "HandOutlines", // 1000,370,2709,2 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "InlineSkate", // 100,550,1882,7 "InsectWingbeatSound",//1980,220,256 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxTW", // 399,154,80,6 "MNIST", "MoteStrain", // 20,1252,84,2 "NonInvasiveFetalECGThorax1", // 1800,1965,750,42 "NonInvasiveFetalECGThorax2", // 1800,1965,750,42 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme",//1896,214, 1024 "Plane", // 105,105,144,7 "Plaid", "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "ScreenType", // 375,375,720,3 "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "StarlightCurves", // 1000,8236,1024,3 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "UWaveGestureLibraryAll", // 896,3582,945,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga" // 300,3000,426,2 }; //</editor-fold> //Bakeoff data sets, expansded in 2018 //<editor-fold defaultstate="collapsed" desc="tscProblems85: The new 85 UCR datasets"> public static String[] tscProblems85={ //Train Size, Test Size, Series Length, Nos Classes //Train Size, Test Size, Series Length, Nos Classes "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "ElectricDevices", // 8926,7711,96,7 "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "GunPoint", // 50,150,150,2 "Ham", //105,109,431 "HandOutlines", // 1000,370,2709,2 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "InlineSkate", // 100,550,1882,7 "InsectWingbeatSound",//1980,220,256 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxTW", // 399,154,80,6 "MoteStrain", // 20,1252,84,2 "NonInvasiveFetalECGThorax1", // 1800,1965,750,42 "NonInvasiveFetalECGThorax2", // 1800,1965,750,42 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme",//1896,214, 1024 "Plane", // 105,105,144,7 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "ScreenType", // 375,375,720,3 "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "StarlightCurves", // 1000,8236,1024,3 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "UWaveGestureLibraryAll", // 896,3582,945,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga" // 300,3000,426,2 }; //</editor-fold> //TSC data sets for relaunch in 2018 //<editor-fold defaultstate="collapsed" desc="tsc Problems 2018, no missing values"> public static String[] tscProblems112={ //Train Size, Test Size, Series Length, Nos Classes "ACSF1", "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "BME", "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "Chinatown", "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "Crop", "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "ElectricDevices", // 8926,7711,96,7 "EOGHorizontalSignal", "EOGVerticalSignal", "EthanolLevel", "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "FreezerRegularTrain", "FreezerSmallTrain", "GunPoint", // 50,150,150,2 "GunPointAgeSpan", "GunPointMaleVersusFemale", "GunPointOldVersusYoung", "Ham", //105,109,431 "HandOutlines", // 1000,370,2709,2 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "HouseTwenty", "InlineSkate", // 100,550,1882,7 "InsectEPGRegularTrain", "InsectEPGSmallTrain", "InsectWingbeatSound",//1980,220,256 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxTW", // 399,154,80,6 "MixedShapesRegularTrain", "MixedShapesSmallTrain", "MoteStrain", // 20,1252,84,2 "NonInvasiveFetalECGThorax1", // 1800,1965,750,42 "NonInvasiveFetalECGThorax2", // 1800,1965,750,42 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme",//1896,214, 1024 "PigAirwayPressure", "PigArtPressure", "PigCVP", "Plane", // 105,105,144,7 "PowerCons", "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "Rock", "ScreenType", // 375,375,720,3 "SemgHandGenderCh2", "SemgHandMovementCh2", "SemgHandSubjectCh2", "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SmoothSubspace", "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "StarLightCurves", // 1000,8236,1024,3 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UMD", "UWaveGestureLibraryAll", // 896,3582,945,8 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga" // 300,3000,426,2 }; //</editor-fold> //New TSC data sets for relaunch in 2018 //<editor-fold defaultstate="collapsed" desc="tsc Problems new for 2018, no missing values"> public static String[] newProblems27={ //Train Size, Test Size, Series Length, Nos Classes "ACSF1", "BME", "Chinatown", "Crop", "EOGHorizontalSignal", "EOGVerticalSignal", "EthanolLevel", "FreezerRegularTrain", "FreezerSmallTrain", "GunPointAgeSpan", "GunPointMaleVersusFemale", "GunPointOldVersusYoung", "HouseTwenty", "InsectEPGRegularTrain", "InsectEPGSmallTrain", "MixedShapesRegularTrain", "MixedShapesSmallTrain", "PigAirwayPressure", "PigArtPressure", "PigCVP", "PowerCons", "Rock", "SemgHandGenderCh2", "SemgHandMovementCh2", "SemgHandSubjectCh2", "SmoothSubspace", "UMD", }; //</editor-fold> //Bakeoff data sets, expansded in 2018 //<editor-fold defaultstate="collapsed" desc="tscProblems78WithoutPigs:"> public static String[] tscProblems78={ //Train Size, Test Size, Series Length, Nos Classes //Train Size, Test Size, Series Length, Nos Classes "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "GunPoint", // 50,150,150,2 "Ham", //105,109,431 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "InlineSkate", // 100,550,1882,7 "InsectWingbeatSound",//1980,220,256 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxTW", // 399,154,80,6 "MoteStrain", // 20,1252,84,2 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme",//1896,214, 1024 "Plane", // 105,105,144,7 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "ScreenType", // 375,375,720,3 "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "UWaveGestureLibraryAll", // 896,3582,945,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga" // 300,3000,426,2 }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="five splits of the new 85 UCR datasets"> public static String[][] fiveSplits={ { "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes" // 322,139,512,2 }, { "ECG200", //100, 100, 96 "ECG5000", //4500, 500,140 "ECGFiveDays", // 23,861,136,2 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "GunPoint", // 50,150,150,2 "Ham", //105,109,431 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "Meat",//60,60,448 "MedicalImages", // 381,760,99,10 }, { "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxTW", // 399,154,80,6 "MoteStrain", // 20,1252,84,2 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "Plane", // 105,105,144,7 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "ScreenType", // 375,375,720,3 "ShapeletSim", // 20,180,500,2 "SmallKitchenAppliances", // 375,375,720,3 "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "Strawberry",//370,613,235 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl" // 300,300,60,6 }, { "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "Wafer", // 1000,6164,152,2 "Wine",//54 57 234 "WordSynonyms", // 267,638,270,25 "Worms", //77, 181,900,5 "WormsTwoClass",//77, 181,900,5 "Yoga", // 300,3000,426,2 "InlineSkate", // 100,550,1882,7 "InsectWingbeatSound",//1980,220,256 "FaceAll", // 560,1690,131,14 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Phoneme", //1896,214, 1024 "ShapesAll", // 600,600,512,60 }, { "ElectricDevices", // 8926,7711,96,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "HandOutlines", // 1000,370,2709,2 "NonInvasiveFetalECGThorax1", // 1800,1965,750,42 "NonInvasiveFetalECGThorax2", // 1800,1965,750,42 "StarlightCurves", // 1000,8236,1024,3 "UWaveGestureLibraryAll", // 896,3582,945,8 } }; //</editor-fold> //UCR data sets //<editor-fold defaultstate="collapsed" desc="tscProblems46: 46 UCR Data sets"> public static String[] tscProblems46={ "Adiac", // 390,391,176,37 "Beef", // 30,30,470,5 "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "CinCECGTorso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "ECGFiveDays", // 23,861,136,2 "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "FiftyWords", // 450,455,270,50 "Fish", // 175,175,463,7 "GunPoint", // 50,150,150,2 "Haptics", // 155,308,1092,5 "InlineSkate", // 100,550,1882,7 "ItalyPowerDemand", // 67,1029,24,2 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "Mallat", // 55,2345,1024,8 "MedicalImages", // 381,760,99,10 "MoteStrain", // 20,1252,84,2 "NonInvasiveFetalECGThorax1", // 1800,1965,750,42 "NonInvasiveFetalECGThorax2", // 1800,1965,750,42 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "Plane", // 105,105,144,7 "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "StarLightCurves", // 1000,8236,1024,3 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UWaveGestureLibraryX", // 896,3582,315,8 "UWaveGestureLibraryY", // 896,3582,315,8 "UWaveGestureLibraryZ", // 896,3582,315,8 "Wafer", // 1000,6164,152,2 "WordSynonyms", // 267,638,270,25 "Yoga" // 300,3000,426,2 }; //</editor-fold> //Small UCR data sets //<editor-fold defaultstate="collapsed" desc="tscProblemsSmall: Small UCR Data sets"> public static String[] tscProblemsSmall={ "Beef", // 30,30,470,5 "Car", // 60,60,577,4 "Coffee", // 28,28,286,2 "CricketX", // 390,390,300,12 "CricketY", // 390,390,300,12 "CricketZ", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "fish", // 175,175,463,7 "GunPoint", // 50,150,150,2 "ItalyPowerDemand", // 67,1029,24,2 "MoteStrain", // 20,1252,84,2 "OliveOil", // 30,30,570,4 "Plane", // 105,105,144,7 "SonyAIBORobotSurface1", // 20,601,70,2 "SonyAIBORobotSurface2", // 27,953,65,2 "SyntheticControl", // 300,300,60,6 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="spectral: Spectral data"> public static String[] spectral={ //Train Size, Test Size, Series Length, Nos Classes "Beef", // 30,30,470,5 "Coffee", // 28,28,286,2 "Ham", "Meat", "OliveOil", // 30,30,570,4 "Strawberry", "Wine", //To add: spirits }; //</editor-fold> //Small Files //<editor-fold defaultstate="collapsed" desc="smallTSCProblems:"> public static String[] smallTSCProblems={ "Beef","BeetleFly","BirdChicken","FaceFour","Plane","FacesUCR"}; /*//Train Size, Test Size, Series Length, Nos Classes "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "CinC_ECG_torso", // 40,1380,1639,4 "Computers", // 250,250,720,2 "Cricket_X", // 390,390,300,12 "Cricket_Y", // 390,390,300,12 "Cricket_Z", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECGFiveDays", // 23,861,136,2 "ElectricDevices", // 8926,7711,96,7 "FaceAll", // 560,1690,131,14 "FacesUCR", // 200,2050,131,14 "fiftywords", // 450,455,270,50 "fish", // 175,175,463,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "GunPoint", // 50,150,150,2 "Ham", "HandOutlines", // 1000,370,2709,2 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "InlineSkate", // 100,550,1882,7 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "MALLAT", // 55,2345,1024,8 // "Meat", "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxTW", // 399,154,80,6 "MoteStrain", // 20,1252,84,2 "NonInvasiveFatalECG_Thorax1", // 1800,1965,750,42 "NonInvasiveFatalECG_Thorax2", // 1800,1965,750,42 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Plane", // 105,105,144,7 "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "ScreenType", // 375,375,720,3 // "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SonyAIBORobotSurfaceII", // 27,953,65,2 "StarLightCurves", // 1000,8236,1024,3 "Strawberry", "Symbols", // 25,995,398,6 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UWaveGestureLibrary_X", // 896,3582,315,8 "UWaveGestureLibrary_Y", // 896,3582,315,8 "UWaveGestureLibrary_Z", // 896,3582,315,8 "UWaveGestureLibraryAll", // 896,3582,945,8 "wafer", // 1000,6164,152,2 // "Wine", "WordSynonyms", // 267,638,270,25 "Worms", "WormsTwoClass", "yoga" // 300,3000,426,2 }; */ //</editor-fold> //Sets used in papers //<editor-fold defaultstate="collapsed" desc="rakthanmanon13fastshapelets"> /* Problem sets used in @article{rakthanmanon2013fast, title={Fast Shapelets: A Scalable Algorithm for Discovering Time Series Shapelets}, author={Rakthanmanon, T. and Keogh, E.}, journal={Proceedings of the 13th {SIAM} International Conference on Data Mining}, year={2013} } All included except Cricket. There are three criket problems and they are not * alligned, the class values in the test set dont match */ public static String[] fastShapeletProblems={ "ItalyPowerDemand", // 67,1029,24,2 "MoteStrain", // 20,1252,84,2 "SonyAIBORobotSurfaceII", // 27,953,65,2 "SonyAIBORobotSurface", // 20,601,70,2 "Beef", // 30,30,470,5 "GunPoint", // 50,150,150,2 "TwoLeadECG", // 23,1139,82,2 "Adiac", // 390,391,176,37 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "Coffee", // 28,28,286,2 "DiatomSizeReduction", // 16,306,345,4 "ECGFiveDays", // 23,861,136,2 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "fish", // 175,175,463,7 "Lighting2", // 60,61,637,2 "Lighting7", // 70,73,319,7 "FaceAll", // 560,1690,131,14 "MALLAT", // 55,2345,1024,8 "MedicalImages", // 381,760,99,10 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "Trace", // 100,100,275,4 "wafer", // 1000,6164,152,2 "yoga", "FaceAll", "TwoPatterns", "CinC_ECG_torso" // 40,1380,1639,4 }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="marteau09stiffness: TWED"> public static String[] marteau09stiffness={ "SyntheticControl", // 300,300,60,6 "GunPoint", // 50,150,150,2 "CBF", // 30,900,128,3 "FaceAll", // 560,1690,131,14 "OSULeaf", // 200,242,427,6 "SwedishLeaf", // 500,625,128,15 "fiftywords", // 450,455,270,50 "Trace", // 100,100,275,4 "TwoPatterns", // 1000,4000,128,4 "wafer", // 1000,6164,152,2 "FaceFour", // 24,88,350,4 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "ECG200", // 100,100,96,2 "Adiac", // 390,391,176,37 "yoga", // 300,3000,426,2 "fish", // 175,175,463,7 "Coffee", // 28,28,286,2 "OliveOil", // 30,30,570,4 "Beef" // 30,30,470,5 }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="stefan13movesplit: Move-Split-Merge"> public static String[] stefan13movesplit={ "Coffee", // 28,28,286,2 "CBF", // 30,900,128,3 "ECG200", // 100,100,96,2 "SyntheticControl", // 300,300,60,6 "GunPoint", // 50,150,150,2 "FaceFour", // 24,88,350,4 "Lightning7", // 70,73,319,7 "Trace", // 100,100,275,4 "Adiac", // 390,391,176,37 "Beef", // 30,30,470,5 "Lightning2", // 60,61,637,2 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "SwedishLeaf", // 500,625,128,15 "fish", // 175,175,463,7 "FaceAll", // 560,1690,131,14 "fiftywords", // 450,455,270,50 "TwoPatterns", // 1000,4000,128,4 "wafer", // 1000,6164,152,2 "yoga" // 300,3000,426,2 }; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="lines14elasticensemble"> public static String[] datasetsForDAMI2014_Lines={ //Train Size, Test Size, Series Length, Nos Classes "Adiac", // 390,391,176,37 "ArrowHead", // 36,175,251,3 "Beef", // 30,30,470,5 "BeetleFly", // 20,20,512,2 "BirdChicken", // 20,20,512,2 "Car", // 60,60,577,4 "CBF", // 30,900,128,3 "ChlorineConcentration", // 467,3840,166,3 "CinC_ECG_torso", // 40,1380,1639,4 "Coffee", // 28,28,286,2 "Computers", // 250,250,720,2 "Cricket_X", // 390,390,300,12 "Cricket_Y", // 390,390,300,12 "Cricket_Z", // 390,390,300,12 "DiatomSizeReduction", // 16,306,345,4 "DistalPhalanxOutlineCorrect", // 600,276,80,2 "DistalPhalanxOutlineAgeGroup", // 400,139,80,3 "DistalPhalanxTW", // 400,139,80,6 "Earthquakes", // 322,139,512,2 "ECGFiveDays", // 23,861,136,2 "ElectricDevices", // 8926,7711,96,7 "FaceAll", // 560,1690,131,14 "FaceFour", // 24,88,350,4 "FacesUCR", // 200,2050,131,14 "fiftywords", // 450,455,270,50 "fish", // 175,175,463,7 "FordA", // 3601,1320,500,2 "FordB", // 3636,810,500,2 "GunPoint", // 50,150,150,2 "HandOutlines", // 1000,370,2709,2 "Haptics", // 155,308,1092,5 "Herring", // 64,64,512,2 "InlineSkate", // 100,550,1882,7 "ItalyPowerDemand", // 67,1029,24,2 "LargeKitchenAppliances", // 375,375,720,3 "Lightning2", // 60,61,637,2 "Lightning7", // 70,73,319,7 "MALLAT", // 55,2345,1024,8 "MedicalImages", // 381,760,99,10 "MiddlePhalanxOutlineCorrect", // 600,291,80,2 "MiddlePhalanxOutlineAgeGroup", // 400,154,80,3 "MiddlePhalanxTW", // 399,154,80,6 "MoteStrain", // 20,1252,84,2 "NonInvasiveFetalECG_Thorax1", // 1800,1965,750,42 "NonInvasiveFetalECG_Thorax2", // 1800,1965,750,42 "OliveOil", // 30,30,570,4 "OSULeaf", // 200,242,427,6 "PhalangesOutlinesCorrect", // 1800,858,80,2 "Plane", // 105,105,144,7 "ProximalPhalanxOutlineCorrect", // 600,291,80,2 "ProximalPhalanxOutlineAgeGroup", // 400,205,80,3 "ProximalPhalanxTW", // 400,205,80,6 "RefrigerationDevices", // 375,375,720,3 "ScreenType", // 375,375,720,3 "ShapeletSim", // 20,180,500,2 "ShapesAll", // 600,600,512,60 "SmallKitchenAppliances", // 375,375,720,3 "SonyAIBORobotSurface", // 20,601,70,2 "SonyAIBORobotSurfaceII", // 27,953,65,2 "StarLightCurves", // 1000,8236,1024,3 "SwedishLeaf", // 500,625,128,15 "Symbols", // 25,995,398,6 "SyntheticControl", // 300,300,60,6 "ToeSegmentation1", // 40,228,277,2 "ToeSegmentation2", // 36,130,343,2 "Trace", // 100,100,275,4 "TwoLeadECG", // 23,1139,82,2 "TwoPatterns", // 1000,4000,128,4 "UWaveGestureLibrary_X", // 896,3582,315,8 "UWaveGestureLibrary_Y", // 896,3582,315,8 "UWaveGestureLibrary_Z", // 896,3582,315,8 "wafer", // 1000,6164,152,2 "WordSynonyms", // 267,638,270,25 "yoga" // 300,3000,426,2 }; //</editor-fold> static int[] testSizes85={391,175,30,20,20,60,900,3840,1380,28,250,390,390,390,306,276,139,139,139,100,4500,861,7711,1690,88,2050,455,175,1320,810,150,105,370,308,64,550,1980,1029,375,61,73,2345,60,760,291,154,154,1252,1965,1965,30,242,858,1896,105,291,205,205,375,375,180,600,375,601,953,8236,370,625,995,300,228,130,100,1139,4000,3582,3582,3582,3582,6164,54,638,77,77,3000}; //UCI Classification problems: NOTE THESE ARE -train NOT _TRAIN //<editor-fold defaultstate="collapsed" desc="UCI Classification problems"> public static String[] uciFileNames={ "abalone", "banana", "cancer", "clouds", "concentric", "diabetes", "ecoli", "german", "glass2", "glass6", "haberman", "heart", "ionosphere", "liver", "magic", "pendigitis", "phoneme", "ringnorm", "satimage", "segment", "sonar", "thyroid", "twonorm", "vehicle", "vowel", "waveform", "wdbc", "wins", "yeast"}; //</editor-fold> //Gavin data sets /* banana flare_solar splice transfusion breast_cancer synthetic vertebra image spambase tiianic */ public static String[] UCIContinuousFileNames={"abalone","acute-inflammation","acute-nephritis","adult","annealing","arrhythmia","audiology-std","balance-scale","balloons","bank","blood","breast-cancer","breast-cancer-wisc","breast-cancer-wisc-diag","breast-cancer-wisc-prog","breast-tissue","car","cardiotocography-10clases","cardiotocography-3clases", "chess-krvk","chess-krvkp","congressional-voting","conn-bench-sonar-mines-rocks","conn-bench-vowel-deterding", "connect-4","contrac","credit-approval","cylinder-bands","dermatology","echocardiogram","ecoli","energy-y1","energy-y2","fertility","flags","glass","haberman-survival","hayes-roth","heart-cleveland","heart-hungarian","heart-switzerland","heart-va","hepatitis","hill-valley","horse-colic","ilpd-indian-liver","image-segmentation","ionosphere","iris","led-display","lenses","letter","libras","low-res-spect","lung-cancer","lymphography","magic","mammographic", "miniboone","molec-biol-promoter","molec-biol-splice","monks-1","monks-2","monks-3","mushroom","musk-1","musk-2","nursery","oocytes_merluccius_nucleus_4d","oocytes_merluccius_states_2f","oocytes_trisopterus_nucleus_2f","oocytes_trisopterus_states_5b","optical","ozone","page-blocks","parkinsons","pendigits","pima","pittsburg-bridges-MATERIAL","pittsburg-bridges-REL-L","pittsburg-bridges-SPAN","pittsburg-bridges-T-OR-D","pittsburg-bridges-TYPE","planning","plant-margin","plant-shape","plant-texture","post-operative","primary-tumor","ringnorm","seeds","semeion","soybean","spambase","spect","spectf","statlog-australian-credit","statlog-german-credit","statlog-heart","statlog-image","statlog-landsat","statlog-shuttle","statlog-vehicle","steel-plates","synthetic-control","teaching","thyroid","tic-tac-toe","titanic","trains","twonorm","vertebral-column-2clases","vertebral-column-3clases","wall-following","waveform","waveform-noise","wine","wine-quality-red","wine-quality-white","yeast","zoo"}; public static String[] UCIContinuousWithoutBigFour={"abalone","acute-inflammation","acute-nephritis","annealing","arrhythmia","audiology-std","balance-scale","balloons","bank","blood","breast-cancer","breast-cancer-wisc","breast-cancer-wisc-diag","breast-cancer-wisc-prog","breast-tissue","car","cardiotocography-10clases","cardiotocography-3clases", "chess-krvkp","congressional-voting","conn-bench-sonar-mines-rocks","conn-bench-vowel-deterding", "connect-4","contrac","credit-approval","cylinder-bands","dermatology","echocardiogram","ecoli","energy-y1","energy-y2","fertility","flags","glass","haberman-survival","hayes-roth","heart-cleveland","heart-hungarian","heart-switzerland","heart-va","hepatitis","hill-valley","horse-colic","ilpd-indian-liver","image-segmentation","ionosphere","iris","led-display","lenses","letter","libras","low-res-spect","lung-cancer","lymphography","mammographic", "molec-biol-promoter","molec-biol-splice","monks-1","monks-2","monks-3","mushroom","musk-1","musk-2","nursery","oocytes_merluccius_nucleus_4d","oocytes_merluccius_states_2f","oocytes_trisopterus_nucleus_2f","oocytes_trisopterus_states_5b","optical","ozone","page-blocks","parkinsons","pendigits","pima","pittsburg-bridges-MATERIAL","pittsburg-bridges-REL-L","pittsburg-bridges-SPAN","pittsburg-bridges-T-OR-D","pittsburg-bridges-TYPE","planning","plant-margin","plant-shape","plant-texture","post-operative","primary-tumor","ringnorm","seeds","semeion","soybean","spambase","spect","spectf","statlog-australian-credit","statlog-german-credit","statlog-heart","statlog-image","statlog-landsat","statlog-shuttle","statlog-vehicle","steel-plates","synthetic-control","teaching","thyroid","tic-tac-toe","titanic","trains","twonorm","vertebral-column-2clases","vertebral-column-3clases","wall-following","waveform","waveform-noise","wine","wine-quality-red","wine-quality-white","yeast","zoo"}; //Refactor when repo back public static String[] ReducedUCI={"bank","blood","breast-cancer-wisc-diag", "breast-tissue","cardiotocography-10clases", "conn-bench-sonar-mines-rocks","conn-bench-vowel-deterding", "ecoli","glass","hill-valley", "image-segmentation","ionosphere","iris","libras","magic", // "miniboone", "oocytes_merluccius_nucleus_4d","oocytes_trisopterus_states_5b", "optical","ozone","page-blocks","parkinsons","pendigits", "planning","post-operative","ringnorm","seeds","spambase", "statlog-landsat","statlog-shuttle","statlog-vehicle","steel-plates", "synthetic-control","twonorm","vertebral-column-3clases", "wall-following","waveform-noise","wine-quality-white","yeast"}; public static String[] twoClassProblems2018={"BeetleFly","BirdChicken","Chinatown", "Coffee","Computers","DistalPhalanxOutlineCorrect","DodgerLoopGame", "DodgerLoopWeekend","Earthquakes","ECG200","ECGFiveDays","FordA","FordB", "FreezerRegularTrain","FreezerSmallTrain","GunPoint","GunPointAgeSpan", "GunPointMaleVersusFemale","GunPointOldVersusYoung","Ham","HandOutlines", "Herring","HouseTwenty","ItalyPowerDemand","Lightning2","MiddlePhalanxOutlineCorrect", "MoteStrain","PhalangesOutlinesCorrect","PowerCons","ProximalPhalanxOutlineCorrect", "SemgHandGenderCh2","ShapeletSim","SonyAIBORobotSurface1","SonyAIBORobotSurface2", "Strawberry","ToeSegmentation1","ToeSegmentation2","TwoLeadECG","Wafer","Wine", "WormsTwoClass","Yoga"}; public static String[] notNormalised={"ArrowHead","Beef","BeetleFly","BirdChicken","Coffee","Computers","Cricket_X","Cricket_Y","Cricket_Z","DistalPhalanxOutlineAgeGroup","DistalPhalanxOutlineCorrect","DistalPhalanxTW","ECG200","Earthquakes","ElectricDevices","FordA","FordB","Ham","Herring","LargeKitchenAppliances","Meat","MiddlePhalanxOutlineAgeGroup","MiddlePhalanxOutlineCorrect","MiddlePhalanxTW","OliveOil","PhalangesOutlinesCorrect","Plane","ProximalPhalanxOutlineAgeGroup","ProximalPhalanxOutlineCorrect","ProximalPhalanxTW","RefrigerationDevices","ScreenType","ShapeletSim","ShapesAll","SmallKitchenAppliances","Strawberry","ToeSegmentation1","ToeSegmentation2","UWaveGestureLibraryAll","UWaveGestureLibrary_Z","Wine","Worms","WormsTwoClass","fish"}; public static void processUCRData(String problemPath){ System.out.println(" nos files ="+tscProblems46.length); String s; for(int str=39;str<43;str++){ s=tscProblems46[str]; InFile trainF= new InFile(problemPath+s+"/"+s+"_TRAIN"); InFile testF= new InFile(problemPath+s+"/"+s+"_TEST"); Instances train= DatasetLoading.loadDataNullable(problemPath+s+"/"+s+"_TRAIN"); Instances test= DatasetLoading.loadDataNullable(problemPath+s+"/"+s+"_TEST"); int trainSize=trainF.countLines(); int testSize=testF.countLines(); Attribute a=train.classAttribute(); String tt=a.value(0); int first=Integer.parseInt(tt); System.out.println(s+" First value ="+tt+" first ="+first); if(trainSize!=train.numInstances() || testSize!=test.numInstances()){ System.out.println(" ERROR MISMATCH SIZE TRAIN="+trainSize+","+train.numInstances()+" TEST ="+testSize+","+test.numInstances()); System.exit(0); } trainF= new InFile(problemPath+s+"/"+s+"_TRAIN"); testF= new InFile(problemPath+s+"/"+s+"_TEST"); File dir = new File(problemPath+s); if(!dir.exists()){ dir.mkdir(); } OutFile newTrain = new OutFile(problemPath+s+"/"+s+"_TRAIN.arff"); OutFile newTest = new OutFile(problemPath+s+"/"+s+"_TEST.arff"); Instances header = new Instances(train,0); newTrain.writeLine(header.toString()); newTest.writeLine(header.toString()); for(int i=0;i<trainSize;i++){ String line=trainF.readLine(); line=line.trim(); String[] split=line.split("/s+"); try{ // System.out.println(split[0]+"First ="+split[1]+" last ="+split[split.length-1]+" length = "+split.length+" nos atts "+train.numAttributes()); double c=Double.valueOf(split[0]); if((int)(c-1)!=(int)train.instance(i).classValue() && (int)(c)!=(int)train.instance(i).classValue()&&(int)(c+1)!=(int)train.instance(i).classValue()){ System.out.println(" ERROR MISMATCH IN CLASS "+s+" from instance "+i+" ucr ="+(int)c+" mine ="+(int)train.instance(i).classValue()); System.exit(0); } for(int j=1;j<train.numAttributes();j++){ double v=Double.valueOf(split[j]); newTrain.writeString(v+","); } if(first<=0) newTrain.writeString((int)train.instance(i).classValue()+"\n"); else newTrain.writeString((int)(train.instance(i).classValue()+1)+"\n"); }catch(Exception e){ System.out.println("Error problem "+s+" instance ="+i+" length ="+split.length+" val ="+split[0]); System.exit(0); } } for(int i=0;i<testSize;i++){ String line=testF.readLine(); line=line.trim(); String[] split=line.split("/s+"); try{ // System.out.println(split[0]+"First ="+split[1]+" last ="+split[split.length-1]+" length = "+split.length+" nos atts "+train.numAttributes()); double c=Double.valueOf(split[0]); if((int)(c-1)!=(int)test.instance(i).classValue() && (int)(c)!=(int)test.instance(i).classValue()&&(int)(c+1)!=(int)test.instance(i).classValue()){ System.out.println(" ERROR MISMATCH IN CLASS "+s+" from instance "+i+" ucr ="+(int)c+" mine ="+(int)test.instance(i).classValue()); System.exit(0); } for(int j=1;j<test.numAttributes();j++){ double v=Double.valueOf(split[j]); newTest.writeString(v+","); } if(first<=0) newTest.writeString((int)test.instance(i).classValue()+"\n"); else newTest.writeString((int)(test.instance(i).classValue()+1)+"\n"); }catch(Exception e){ System.out.println("Error problem "+s+" instance ="+i+" length ="+split.length+" val ="+split[0]); System.exit(0); } } } } public static void listNotNormalisedList(String[] fileNames,String problemPath) throws Exception{ TreeSet<String> notNormed=new TreeSet<>(); DecimalFormat df = new DecimalFormat("###.######"); for(String s:fileNames){ //Load test train Instances train=DatasetLoading.loadDataNullable(problemPath+s+"/"+s+"_TRAIN"); Instances test=DatasetLoading.loadDataNullable(problemPath+s+"/"+s+"_TEST"); //Find summary SummaryStats ss= new SummaryStats(); train=ss.transform(train); test=ss.transform(test); int i=1; for(Instance ins:train){ double stdev=ins.value(1)*ins.value(1); // stdev*=train.numAttributes()-1/(train.numAttributes()-2); if(Math.abs(ins.value(0))>0.01 || Math.abs(1-stdev)>0.01){ System.out.println(" Not normalised train series ="+s+" index "+i+" mean = "+df.format(ins.value(0))+" var ="+df.format(stdev)); notNormed.add(s); break; } } for(Instance ins:test){ double stdev=ins.value(1)*ins.value(1); // stdev*=train.numAttributes()-1/(train.numAttributes()-2); if(Math.abs(ins.value(0))>0.01 || Math.abs(1-stdev)>0.01){ System.out.println(" Not normalised test series ="+s+" index "+i+" mean = "+df.format(ins.value(0))+" var ="+df.format(stdev)); notNormed.add(s); break; } } } System.out.print("String[] notNormalised={"); for(String s:notNormed) System.out.print("\""+s+"\","); System.out.println("}"); System.out.println("TOTAL NOT NORMED ="+notNormed.size()); } public static void dataDescription(String[] fileNames,String problemPath){ //Produce summary descriptions //dropboxPath=uciPath; OutFile f=new OutFile(problemPath+"DataDimensions.csv"); MetaData[] all=new MetaData[fileNames.length]; TreeSet<String> nm=new TreeSet<>(); nm.addAll(Arrays.asList(notNormalised)); f.writeLine("Problem,TrainSize,TestSize,SeriesLength,NumClasses,Normalised,ClassCounts"); for(int i=0;i<fileNames.length;i++){ try{ Instances test=DatasetLoading.loadDataNullable(problemPath+fileNames[i]+"/"+fileNames[i]+"_TEST"); Instances train=DatasetLoading.loadDataNullable(problemPath+fileNames[i]+"/"+fileNames[i]+"_TRAIN"); Instances allData =new Instances(test); for(int j=0;j<train.numInstances();j++) allData.add(train.instance(j)); // allData.randomize(new Random()); // OutFile combo=new OutFile(problemPath+tscProblems85[i]+"/"+tscProblems85[i]+".arff"); // combo.writeString(allData.toString()); boolean normalised=true; if(nm.contains(fileNames[i])) normalised=false; int[] classCounts=new int[allData.numClasses()*2]; for(Instance ins: train) classCounts[(int)(ins.classValue())]++; for(Instance ins: test) classCounts[allData.numClasses()+(int)(ins.classValue())]++; all[i]=new MetaData(fileNames[i],train.numInstances(),test.numInstances(),test.numAttributes()-1,test.numClasses(),classCounts,normalised); f.writeLine(all[i].toString()); System.out.println(all[i].toString()); } catch(Exception e){ System.out.println(" ERRROR"+e); } } /* Arrays.sort(all); f=new OutFile(problemPath+"DataDimensionsBySeriesLength.csv"); for(MetaData m: all) f.writeLine(m.toString()); Arrays.sort(all, new MetaData.CompareByTrain()); f=new OutFile(problemPath+"DataDimensionsByTrainSize.csv"); for(MetaData m: all) f.writeLine(m.toString()); Arrays.sort(all, new MetaData.CompareByClasses()); f=new OutFile(problemPath+"DataDimensionsByNosClasses.csv"); for(MetaData m: all) f.writeLine(m.toString()); Arrays.sort(all, new MetaData.CompareByTotalSize()); f=new OutFile(problemPath+"DataDimensionsByTotalSize.csv"); for(MetaData m: all) f.writeLine(m.toString()); */ } public static void dataDescriptionDataNotSplit(String[] fileNames, String problemPath){ //Produce summary descriptions //dropboxPath=uciPath; OutFile f=new OutFile(problemPath+"DataDimensions.csv"); f.writeLine("problem,numinstances,numAttributes,numClasses,classDistribution"); try{ for(int i=0;i<fileNames.length;i++){ Instances allData=DatasetLoading.loadDataNullable(problemPath+fileNames[i]+"/"+fileNames[i]); // allData.randomize(new Random()); // OutFile combo=new OutFile(problemPath+tscProblems85[i]+"/"+tscProblems85[i]+".arff"); // combo.writeString(allData.toString()); int[] classCounts=new int[allData.numClasses()]; for(Instance ins: allData) classCounts[(int)(ins.classValue())]++; f.writeString(fileNames[i]+","+allData.numInstances()+","+(allData.numAttributes()-1)+","+allData.numClasses()); for(int c:classCounts) f.writeString(","+(c/(double)allData.numInstances())); f.writeString("\n"); } }catch(Exception e){ System.out.println(" ERRROR"+e); } } public static void makeTable(String means, String stdDev,String outfile){ InFile m=new InFile(means); InFile sd=new InFile(stdDev); int lines=m.countLines(); m=new InFile(means); String s=m.readLine(); int columns=s.split(",").length; m=new InFile(means); OutFile out=new OutFile(outfile); DecimalFormat meanF=new DecimalFormat(".###"); DecimalFormat sdF=new DecimalFormat(".##"); } public static void createReadmeFiles(String[] problems){ String myFilePath="C:\\Users\\ajb\\Dropbox\\TSC Website\\DataDescriptions\\"; String theirFilePath="Z:\\Data\\NewTSCProblems"; int count=0; for(String str:problems){ // System.out.println("Processing "+str); File header=new File(theirFilePath+str+"\\README.md"); File txtHeader=new File(theirFilePath+str+"\\"+str+".txt"); // System.out.println("No text file "); if(header.exists()){//Copy to a text file // System.out.println("there is an md file "); InFile in=new InFile(theirFilePath+str+"\\README.md"); OutFile out=new OutFile(theirFilePath+str+"\\"+str+".txt"); String line=in.readLine(); while(line!=null){ out.writeLine(line); line=in.readLine(); } } else{ //Copy from my files File ff= new File(myFilePath+str+".txt"); if(ff.exists()){ InFile in=new InFile(myFilePath+str+".txt"); OutFile out=new OutFile(theirFilePath+str+"\\"+str+".txt"); String line=in.readLine(); while(line!=null){ out.writeLine(line); line=in.readLine(); } } else System.out.println("No description for "+str); } } } public static void buildArffs(String[] problems, String path){ String header; InFile trainTxt,testTxt,hdr; OutFile trainArff,testArff; for(String str:problems){ System.out.println("Making ARFF for "+str); trainTxt=new InFile(path+str+"\\"+str+"_TRAIN.txt"); hdr=new InFile(path+str+"\\"+str+".txt"); trainArff= new OutFile(path+str+"\\"+str+"_TRAIN.arff"); testArff= new OutFile(path+str+"\\"+str+"_TEST.arff"); //Write header comments String line=hdr.readLine(); while(line!=null){ trainArff.writeLine("%"+line); testArff.writeLine("%"+line); line=hdr.readLine(); } //Write arff meta data trainArff.writeLine("@Relation "+str); testArff.writeLine("@Relation "+str); //Read in the train data ArrayList<Integer> classValues= new ArrayList<>(); ArrayList<Double[]> attributeValues= new ArrayList<>(); line=trainTxt.readLine(); Double[] data=null; while(line!=null){ line=line.trim(); // String[] aCase=line.split(","); String[] aCase=line.split("\\s+"); for(int i=0;i<aCase.length;i++) aCase[i]=aCase[i].trim(); classValues.add((int)Double.parseDouble(aCase[0])); data= new Double[aCase.length-1]; for(int i=0;i<aCase.length-1;i++) data[i]=Double.parseDouble(aCase[i+1]); attributeValues.add(data); line=trainTxt.readLine(); } //Write all data in CSV format int numAtts=data.length; for(int i=1;i<=numAtts;i++){ trainArff.writeLine("@attribute att"+i+" numeric"); testArff.writeLine("@attribute att"+i+" numeric"); } TreeSet<Integer> ts=new TreeSet(classValues); trainArff.writeString("@attribute target {"); testArff.writeString("@attribute target {"); int size=ts.size(); int c=1; for(Integer in:ts){ trainArff.writeString(in+""); testArff.writeString(in+""); if(c<size){ trainArff.writeString(","); testArff.writeString(","); c++; }else{ trainArff.writeLine("}"); testArff.writeLine("}"); } } trainArff.writeLine("\n@data"); testArff.writeLine("\n@data"); for(int i=0;i<attributeValues.size();i++){ data=attributeValues.get(i); int classValue=classValues.get(i); for(int j=0;j<data.length;j++){ if(Double.isNaN(data[j])) trainArff.writeString("?,"); else trainArff.writeString(data[j]+","); } trainArff.writeLine(classValue+""); } //Read in the test data testTxt=new InFile(path+str+"\\"+str+"_TEST.txt"); System.out.println("Starting test"); classValues= new ArrayList<>(); attributeValues= new ArrayList<>(); line=testTxt.readLine(); line=line.trim(); data=null; int cc=0; while(line!=null){ line=line.trim(); // String[] aCase=line.split(","); String[] aCase=line.split("\\s+"); int classValue=(int)Double.parseDouble(aCase[0]); data= new Double[aCase.length-1]; for(int i=0;i<aCase.length-1;i++) data[i]=Double.parseDouble(aCase[i+1]); for(int j=0;j<data.length;j++) if(Double.isNaN(data[j])) testArff.writeString("?,"); else testArff.writeString(data[j]+","); testArff.writeLine(classValue+""); line=testTxt.readLine(); } } } public static void testArffs(String[] problems, String path){ String header; Instances train,test; for(String str:problems){ System.out.println("Loading ARFF for "+str); train=DatasetLoading.loadDataNullable(path+str+"\\"+str+"_TRAIN.arff"); test=DatasetLoading.loadDataNullable(path+str+"\\"+str+"_TEST.arff"); Classifier c= new IBk(); double acc = ClassifierTools.singleTrainTestSplitAccuracy(c, train, test); System.out.println(" 1NN acc on "+str +" = "+acc); } } public static void describeTextFiles(){ String path="Z:\\Data\\NewTSCProblems\\"; InFile train; InFile test; InFile text; int count=1; File dirList=new File(path); String[] fn = dirList.list(); System.out.println("Number of problems ="+fn.length); OutFile out = new OutFile(path+"DataDescription.csv"); out.writeLine("Problem,TrainCases,TestCases,NumberClasses,FixedLength,ReadMe"); for(String str:tscProblems2018){ // System.out.println("Testing problem "+str+" number "+count); train=new InFile(path+str+"\\"+str+"_TRAIN.txt"); test=new InFile(path+str+"\\"+str+"_TEST.txt"); int trainCases=train.countLines(); int testCases=test.countLines(); // System.out.println(str+" train cases ="+trainCases+" test cases = "+testCases); train=new InFile(path+str+"\\"+str+"_TRAIN.txt"); test=new InFile(path+str+"\\"+str+"_TEST.txt"); int attributes=0; for(int i=0;i<trainCases;i++){ String[] line = train.readLine().split("\\s+"); // System.out.println(" line "+count+" length in train ="+str+" = "+line.length); if(i==0){ attributes=line.length; // System.out.println("First line length in train ="+str+" = "+line.length); } else{ if(attributes!=line.length){ System.out.println("VARIABLE LENGTH PROBLEM IN TRAIN ="+str+" first line ="+attributes+" current line ="+line.length); } } } attributes=0; for(int i=0;i<testCases;i++){ String[] line = test.readLine().split("\\s+"); if(i==0) attributes=line.length; else{ if(attributes!=line.length){ System.out.println("VARIABLE LENGTH PROBLEM IN TEST ="+str+" first line ="+attributes+" current line ="+line.length); } } } out.writeString(str+","+trainCases+","+testCases+",,,"); File f=new File(path+str+"\\README.md"); if(f.exists()) out.writeLine("true"); else out.writeLine("false"); count++; // if(count==3) // break; } } public static void pack(String sourceDirPath, String zipFilePath) throws IOException { Path p = Files.createFile(Paths.get(zipFilePath)); try (ZipOutputStream zs = new ZipOutputStream(Files.newOutputStream(p))) { Path pp = Paths.get(sourceDirPath); Files.walk(pp) .filter(path -> !Files.isDirectory(path)) .forEach(path -> { ZipEntry zipEntry = new ZipEntry(pp.relativize(path).toString()); try { zs.putNextEntry(zipEntry); Files.copy(path, zs); zs.closeEntry(); } catch (IOException e) { System.err.println(e); } }); } } public static void packAll(String sourceDirPath, String zipDirPath, String[] files) throws IOException{ for(String str:files){ File src=new File(sourceDirPath+str); if(src.exists()){ File zip=new File(zipDirPath+str+".zip"); if(!zip.exists()){ System.out.println("Packing "+sourceDirPath+str+" to "+zipDirPath+str+".zip"); pack(sourceDirPath+str,zipDirPath+str+".zip"); } else{ System.out.println(sourceDirPath+str+" to "+zipDirPath+str+".zip already exists"); } } else System.out.println("Directory "+sourceDirPath+str+" does not exist"); } } public static void makeUploadTable(){ // Dataset_id,Dataset,Donator1,Donator2,Train_size,Test_size,Length, //Number_of_classes,Type,Best_algorithm,Best_acc,Original_source,Paper_first_used,Image,Description //First_link,Second_link,First_used_TSC,Timestamp,Multivariate Flag, Dimension } public static boolean hasMissing(String file){ for(String str: variableLengthUnivariate) if(str.equals(file)) return true; return false; } public static void makeUpLoadFile(String dest, String source){ OutFile of = new OutFile(dest); InFile inf=new InFile(source); String line=inf.readLine(); while(line!=null){ String[] split=line.split(","); for(int i=0;i<split.length-1;i++) of.writeString("\""+split[i]+"\","); of.writeLine("\""+split[split.length-1]+"\""); line=inf.readLine(); } } public static void main(String[] args) throws Exception{ String problemPath="Z:\\Results Working Area\\HIVE-COTE\\New Univariate Datasets\\"; dataDescription(DatasetLists.newForHC2Paper,problemPath); System.exit(0); for(String str:newProblems27) System.out.println(str); String path="E:\\Data\\TSCProblems2018\\"; makeUpLoadFile("Z:\\Data\\MultivariateTSCProblems\\formattedUpload.csv","Z:\\Data\\MultivariateTSCProblems\\upload.csv"); OutFile of = new OutFile("C:\\temp\\TSCNoMissing.txt"); for(String str:tscProblems2018){ if(!hasMissing(str)) of.writeLine(str); } String zipPath="Z:\\Data\\TSCProblems2018_Zips\\"; String[] test={"Adiac"}; // packAll(path,zipPath,tscProblems2018); // testArffs(tscProblems2018); // pack("Z:\\Data\\NewTSCProblems\\Car","c:\\temp\\car.zip"); // path="C:\\New TSC Data\\UCR_archive_2018_to_release\\"; buildArffs(test,path); // buildArffs(tscProblems2018); // createReadmeFiles(tscProblems2018); // describeTextFiles(); // dataDescription(uciFileNames); /* for(String s:uciFileNames){ Instances train =ClassifierTools.loadDataThrowable(uciPath+s+"\\"+s+"-train"); Instances test =ClassifierTools.loadDataThrowable(uciPath+s+"\\"+s+"-test"); System.out.println(s); } */ } public static class MetaData implements Comparable<MetaData>{ String fileName; int trainSetSize; int testSetSize; int seriesLength; int nosClasses; int[] classDistribution; boolean normalised=true; public MetaData(String n, int t1, int t2, int s, int c, int[] dist,boolean norm){ fileName=n; trainSetSize=t1; testSetSize=t2; seriesLength=s; nosClasses=c; classDistribution=dist; normalised=norm; } @Override public String toString(){ String str= fileName+","+trainSetSize+","+testSetSize+","+seriesLength+","+nosClasses+","+normalised; for(int i:classDistribution) str+=","+i; return str; } @Override public int compareTo(MetaData o) { return seriesLength-o.seriesLength; } public static class CompareByTrain implements Comparator<MetaData>{ @Override public int compare(MetaData a, MetaData b) { return a.trainSetSize-b.trainSetSize; } } public static class CompareByTrainSetSize implements Comparator<MetaData>{ @Override public int compare(MetaData a, MetaData b) { return a.trainSetSize-b.trainSetSize; } } public static class CompareByClasses implements Comparator<MetaData>{ @Override public int compare(MetaData a, MetaData b) { return a.nosClasses-b.nosClasses; } } public static class CompareByTotalSize implements Comparator<MetaData>{ @Override public int compare(MetaData a, MetaData b) { return a.seriesLength*a.trainSetSize-b.seriesLength*b.trainSetSize; } } } }
82,910
36.618421
1,009
java
tsml-java
tsml-java-master/src/main/java/experiments/data/DatasetLoading.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments.data; import experiments.ClassifierExperiments; import experiments.ClassifierLists; import tsml.classifiers.distance_based.utils.strings.StrUtils; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.ts_fileIO.TSWriter; import tsml.data_containers.utilities.Converter; import tsml.data_containers.utilities.TimeSeriesResampler; import utilities.ClassifierTools; import utilities.InstanceTools; import utilities.multivariate_tools.MultivariateInstanceTools; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Instances; import weka.core.converters.ArffSaver; import java.io.*; import java.net.URL; import java.nio.channels.Channels; import java.nio.channels.ReadableByteChannel; import java.util.Map; import java.util.TreeMap; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Pattern; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; /** * Class for handling the loading of datasets, from disk and the baked-in example datasets * * @author James Large (james.large@uea.ac.uk) */ public class DatasetLoading { private final static Logger LOGGER = Logger.getLogger(ClassifierExperiments.class.getName()); private static final String BAKED_IN_DATA_MASTERPATH = "src/main/java/experiments/data/"; public static final String BAKED_IN_UCI_DATA_PATH = BAKED_IN_DATA_MASTERPATH + "uci/"; public static final String BAKED_IN_TSC_DATA_PATH = BAKED_IN_DATA_MASTERPATH + "tsc/"; public static final String BAKED_IN_MTSC_DATA_PATH = BAKED_IN_DATA_MASTERPATH + "mtsc/"; public static final String[] BAKED_IN_UCI_DATASETS = {"iris", "hayes-roth", "teaching"}; public static final String[] BAKED_IN_TSC_DATASETS = {"ItalyPowerDemand", "Beef"}; public static final String[] BAKED_IN_MTSC_DATASETS = {"BasicMotions"}; private static String LOXO_ATT_ID = "experimentsSplitAttribute"; private static double proportionKeptForTraining = 0.5; private static int MAX_DECIMAL_PLACES = Integer.MAX_VALUE; private static boolean debug = false; public static String getLeaveOneXOutAttributeID() { return LOXO_ATT_ID; } public static void setLeaveOneXOutAttributeID(String LOXO_ATT_ID) { DatasetLoading.LOXO_ATT_ID = LOXO_ATT_ID; } public static double getProportionKeptForTraining() { return proportionKeptForTraining; } public static void setProportionKeptForTraining(double proportionKeptForTraining) { DatasetLoading.proportionKeptForTraining = proportionKeptForTraining; } public static void setDebug(boolean d) { debug = d; if (debug) LOGGER.setLevel(Level.FINEST); else LOGGER.setLevel(Level.INFO); } public static boolean getDebug() { return debug; } /* * Instances functions */ /** * Helper function for loading the baked-in BasicMotions dataset, one of the * UEA datasets for MTSC * * http://timeseriesclassification.com/description.php?Dataset=BasicMotions * * UEA-MTSC data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleBasicMotions(int seed) throws Exception { return sampleDataset(BAKED_IN_MTSC_DATA_PATH, "BasicMotions", seed); } /** * Helper function for loading the baked-in Beef dataset, one of the * UCR datasets for TSC * * http://timeseriesclassification.com/description.php?Dataset=Beef * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleBeef(int seed) throws Exception { return sampleDataset(BAKED_IN_TSC_DATA_PATH, "Beef", seed); } /** * Helper function for loading the baked-in ERing dataset, one of the * UEA datasets for MTSC * * http://timeseriesclassification.com/description.php?Dataset=ERing * * UEA-MTSC data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleERing(int seed) throws Exception { return sampleDataset(BAKED_IN_MTSC_DATA_PATH, "ERing", seed); } /** * Helper function for loading the baked-in GunPoint dataset, one of the * UCR datasets for TSC * * https://timeseriesclassification.com/description.php?Dataset=GunPoint * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleGunPoint(int seed) throws Exception { return sampleDataset(BAKED_IN_TSC_DATA_PATH, "GunPoint", seed); } /** * Helper function for loading the baked-in Hayes-Roth dataset, one of the classical * UCI datasets for general classification * * https://archive.ics.uci.edu/ml/datasets/Hayes-Roth * * UCI data comes in a single file. The proportion of data kept for training is * defined by the static proportionKeptForTraining, default = 0.5 * * @param seed the seed for resampling the data. * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleHayesRoth(int seed) throws Exception { return sampleDataset(BAKED_IN_UCI_DATA_PATH, "hayes-roth", seed); } /** * Helper function for loading the baked-in Iris dataset, one of the classical * UCI datasets for general classification * * https://archive.ics.uci.edu/ml/datasets/iris * * UCI data comes in a single file. The proportion of data kept for training is * defined by the static proportionKeptForTraining, default = 0.5 * * @param seed the seed for resampling the data. * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleIris(int seed) throws Exception { return sampleDataset(BAKED_IN_UCI_DATA_PATH, "iris", seed); } /** * Helper function for loading the baked-in ItalyPowerDemand dataset, one of the * UCR datasets for TSC * * http://timeseriesclassification.com/description.php?Dataset=ItalyPowerDemand * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleItalyPowerDemand(int seed) throws Exception { return sampleDataset(BAKED_IN_TSC_DATA_PATH, "ItalyPowerDemand", seed); } /** * Helper function for loading the baked-in ChinaTown dataset, one of the * UCR datasets for TSC * * http://timeseriesclassification.com/description.php?Dataset=ChinaTown * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new Instances[] { trainSet, testSet }; * @throws Exception if data loading or sampling failed */ public static Instances[] sampleChinatown(int seed) throws Exception { return sampleDataset(BAKED_IN_TSC_DATA_PATH, "Chinatown", seed); } public static Instances loadBasicMotions() throws Exception { final Instances[] instances = sampleBasicMotions(0); instances[0].addAll(instances[1]); return instances[0]; } public static Instances loadBeef() throws Exception { final Instances[] instances = sampleBeef(0); instances[0].addAll(instances[1]); return instances[0]; } public static Instances loadGunPoint() throws Exception { final Instances[] instances = sampleGunPoint(0); instances[0].addAll(instances[1]); return instances[0]; } public static Instances loadItalyPowerDemand() throws Exception { final Instances[] instances = sampleItalyPowerDemand(0); instances[0].addAll(instances[1]); return instances[0]; } public static Instances loadChinatown() throws Exception { final Instances[] instances = sampleChinatown(0); instances[0].addAll(instances[1]); return instances[0]; } /** * This method will return a train/test split of the problem, resampled with the fold ID given. * * Currently, there are four ways to load datasets. These will be attempted from * top to bottom, in an order designed to make the fewest assumptions * possible about the nature of the split, in terms of potential differences in class distributions, * train and test set sizes, etc. * * 1) if predefined splits are found at the specified location, in the form dataLocation/dsetName/dsetName0_TRAIN and TEST, * these will be loaded and used as they are, OTHERWISE... * 2) if a predefined fold0 split is given as in the UCR archive, and fold0 is being experimented on, the split exactly as it is defined will be used. * For fold != 0, the fold0 split is combined and resampled, maintaining the original train and test distributions. OTHERWISE... * 3) if only a single file is found containing all the data, this dataset is stratified randomly resampled with proportionKeptForTraining (default=0.5) * instances reserved for the _TRAIN_ set. OTHERWISE... * 4) if the dataset loaded has a first attribute whose name _contains_ the string "experimentsSplitAttribute".toLowerCase() * then it will be assumed that we want to perform a leave out one X cross validation. Instances are sampled such that fold N is comprised of * a test set with all instances with first-attribute equal to the Nth unique value in a sorted list of first-attributes. The train * set would be all other instances. The first attribute would then be removed from all instances, so that they are not given * to the classifier to potentially learn from. It is up to the user to ensure the the foldID requested is within the range of possible * values 1 to numUniqueFirstAttValues OTHERWISE... * 5) error * * @return new Instances[] { trainSet, testSet }; */ public static Instances[] sampleDataset(String parentFolder, String problem, int fold) throws Exception { parentFolder = StrUtils.asDirPath(parentFolder); Instances[] data = new Instances[2]; File trainFile = new File(parentFolder + problem + "/" + problem + fold + "_TRAIN.arff"); File testFile = new File(parentFolder + problem + "/" + problem + fold + "_TEST.arff"); boolean predefinedSplitsExist = trainFile.exists() && testFile.exists(); if (!predefinedSplitsExist){ //.arff files dont exist, look for .ts trainFile = new File(parentFolder + problem + "/" + problem + fold + "_TRAIN.ts"); testFile = new File(parentFolder + problem + "/" + problem + fold + "_TEST.ts"); predefinedSplitsExist = trainFile.exists() && testFile.exists(); } if (predefinedSplitsExist) { // CASE 1) data[0] = loadDataThrowable(trainFile); data[1] = loadDataThrowable(testFile); LOGGER.log(Level.FINE, problem + " loaded from predefined folds."); } else { trainFile = new File(parentFolder + problem + "/" + problem + "_TRAIN.arff"); testFile = new File(parentFolder + problem + "/" + problem + "_TEST.arff"); boolean predefinedFold0Exists = trainFile.exists() && testFile.exists(); if (!predefinedFold0Exists){ //.arff files dont exist, look for .ts trainFile = new File(parentFolder + problem + "/" + problem + "_TRAIN.ts"); testFile = new File(parentFolder + problem + "/" + problem + "_TEST.ts"); predefinedFold0Exists = trainFile.exists() && testFile.exists(); } if (predefinedFold0Exists) { // CASE 2) data[0] = loadDataThrowable(trainFile); data[1] = loadDataThrowable(testFile); if (fold != 0) // data = InstanceTools.resampleTrainAndTestInstances(data[0], data[1], fold); // data = InstanceTools.resampleTrainAndTestInstances(data[0], data[1], fold); if (data[0].checkForAttributeType(Attribute.RELATIONAL)) { data = MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances(data[0], data[1], fold); // data = MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances_old(data[0], data[1], fold); } else { data = InstanceTools.resampleTrainAndTestInstances(data[0], data[1], fold); } LOGGER.log(Level.FINE, problem + " resampled from predfined fold0 split."); } else { // We only have a single file with all the data Instances all = null; try { all = DatasetLoading.loadDataThrowable(parentFolder + problem + "/" + problem); } catch (IOException io) { String msg = "Could not find the dataset \"" + problem + "\" in any form at the path\n" + parentFolder + "\n" + "The IOException: " + io; LOGGER.log(Level.SEVERE, msg, io); } boolean needToDefineLeaveOutOneXFold = all.attribute(0).name().toLowerCase().contains(LOXO_ATT_ID.toLowerCase()); if (needToDefineLeaveOutOneXFold) { // CASE 4) data = splitDatasetByFirstAttribute(all, fold); LOGGER.log(Level.FINE, problem + " resampled from full data file."); } else { // CASE 3) if (all.checkForAttributeType(Attribute.RELATIONAL)) { data = MultivariateInstanceTools.resampleMultivariateInstances(all, fold, proportionKeptForTraining); } else { data = InstanceTools.resampleInstances(all, fold, proportionKeptForTraining); } LOGGER.log(Level.FINE, problem + " resampled from full data file."); } } } return data; } /** * If the dataset loaded has a first attribute whose name _contains_ the string "experimentsSplitAttribute".toLowerCase() * then it will be assumed that we want to perform a leave out one X cross validation. Instances are sampled such that fold N is comprised of * a test set with all instances with first-attribute equal to the Nth unique value in a sorted list of first-attributes. The train * set would be all other instances. The first attribute would then be removed from all instances, so that they are not given * to the classifier to potentially learn from. It is up to the user to ensure the the foldID requested is within the range of possible * values 1 to numUniqueFirstAttValues * * @return new Instances[] { trainSet, testSet }; */ public static Instances[] splitDatasetByFirstAttribute(Instances all, int foldId) { TreeMap<Double, Integer> splitVariables = new TreeMap<>(); for (int i = 0; i < all.numInstances(); i++) { //even if it's a string attribute, this val corresponds to the index into the array of possible strings for this att double key = all.instance(i).value(0); Integer val = splitVariables.get(key); if (val == null) val = 0; splitVariables.put(key, ++val); } //find the split attribute value to keep for testing this fold double idToReserveForTestSet = -1; int testSize = -1; int c = 0; for (Map.Entry<Double, Integer> splitVariable : splitVariables.entrySet()) { if (c++ == foldId) { idToReserveForTestSet = splitVariable.getKey(); testSize = splitVariable.getValue(); } } //make the split Instances train = new Instances(all, all.size() - testSize); Instances test = new Instances(all, testSize); for (int i = 0; i < all.numInstances(); i++) if (all.instance(i).value(0) == idToReserveForTestSet) test.add(all.instance(i)); train.addAll(all); //delete the split attribute train.deleteAttributeAt(0); test.deleteAttributeAt(0); return new Instances[]{train, test}; } /** * Loads the arff file at the target location and sets the last attribute to be the class value, * or throws IOException on any error. * * @param fullPath path to the file to try and load * @return Instances from file. * @throws java.io.IOException if cannot find the file, or file is malformed */ public static Instances loadDataThrowable(String fullPath) throws IOException { return loadDataThrowable(new File(fullPath)); } /** * Loads the arff file at the target location and sets the last attribute to be the class value, * or throws IOException on any error. * * @param targetFile the file to try and load * @return Instances from file. * @throws java.io.IOException if cannot find the file, or file is malformed */ public static Instances loadDataThrowable(File targetFile) throws IOException { String[] parts = targetFile.getName().split(Pattern.quote(".")); String extension = ""; final String ARFF = ".arff", TS = ".ts"; if (parts.length == 2) { extension = "." + parts[1]; //split will remove the . } else { //have not been given a specific extension //look for arff or ts formats //arbitrarily looking for arff first File newtarget = new File(targetFile.getAbsolutePath() + ARFF); if (newtarget.exists()) { targetFile = newtarget; extension = ARFF; } else { newtarget = new File(targetFile.getAbsolutePath() + TS); if (newtarget.exists()) { targetFile = newtarget; extension = TS; } else throw new IOException("Cannot find file " + targetFile.getAbsolutePath() + " with either .arff or .ts extensions."); } } Instances inst = null; FileReader reader = new FileReader(targetFile); if (extension.toLowerCase().equals(ARFF)) { inst = new Instances(reader); } else if (extension.toLowerCase().equals(TS)) { TSReader tsreader = new TSReader(reader); inst = tsreader.GetInstances(); } inst.setClassIndex(inst.numAttributes() - 1); reader.close(); return inst; } /** * Loads the arff file at the target location and sets the last attribute to be the class value, * or returns null on any error, such as not finding the file or it being malformed * * @param fullPath path to the file to try and load * @return Instances from file. */ public static Instances loadData(String fullPath) throws IOException { return loadDataThrowable(new File(fullPath)); } public static Instances loadDataFromWeb(String fileName, Boolean isTrain) throws IOException { String webLink = "https://timeseriesclassification.com/Downloads"; String fileExtension = isTrain? "_TRAIN.ts" : "_TEST.ts"; String folderPathString = BAKED_IN_TSC_DATA_PATH + fileName; String filePathString = folderPathString + "/"+fileName+ fileExtension; File f = new File(folderPathString); Instances instances = null; if (!f.exists()) { URL website = new URL(webLink + "/" + fileName + ".zip"); ReadableByteChannel rbc = Channels.newChannel(website.openStream()); FileOutputStream fos = new FileOutputStream(folderPathString + ".zip"); fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); String zipFilePath = folderPathString + ".zip"; unzip(zipFilePath, folderPathString); File zipFile = new File(zipFilePath); zipFile.delete(); f = new File(folderPathString); } if(f.exists() && f.isDirectory()) { File file = new File(filePathString); if (file.exists() && !file.isDirectory() ){ instances = loadDataThrowable(file); } //instances = loadDataThrowable(filePathString); ; } return instances ; } private static void unzip(String zipFilePath, String folderPath) { File folder = new File(folderPath); // create output directory if it doesn't exist if(!folder.exists()) folder.mkdirs(); FileInputStream fis = null; ZipInputStream zis = null; ZipEntry ze = null; //buffer for read and write data to file byte[] buffer = new byte[1024]; try { fis = new FileInputStream(zipFilePath); zis = new ZipInputStream(fis); ze = zis.getNextEntry(); while(ze != null){ String fileName = ze.getName(); File newFile = new File(folderPath + File.separator + fileName); System.out.println("Unzipping to "+newFile.getAbsolutePath()); //create directories for sub directories in zip new File(newFile.getParent()).mkdirs(); FileOutputStream fos = new FileOutputStream(newFile); int len; while ((len = zis.read(buffer)) > 0) { fos.write(buffer, 0, len); } fos.close(); //close this ZipEntry zis.closeEntry(); ze = zis.getNextEntry(); } zis.closeEntry(); zis.close(); fis.close(); } catch (IOException e) { e.printStackTrace(); } } /** * Loads the arff file at the target location and sets the last attribute to be the class value, * or returns null on any error, such as not finding the file or it being malformed * * @param fullPath path to the file to try and load * @return Instances from file. */ public static Instances loadDataNullable(String fullPath) { return loadDataNullable(new File(fullPath)); } /** * Loads the arff file at the target location and sets the last attribute to be the class value, * or returns null on any error, such as not finding the file or it being malformed * * @param targetFile the file to try and load * @return Instances from file. */ public static Instances loadDataNullable(File targetFile) { try { return loadDataThrowable(targetFile); } catch (IOException e) { System.out.println("Unable to load data on path " + targetFile.getAbsolutePath() + " Exception thrown =" + e); return null; } } /** * Simple util to saveDatasets out. Useful for shapelet transform. * * @param dataSet * @param fileName */ public static void saveDataset(Instances dataSet, String fileName) { try { ArffSaver saver = new ArffSaver(); saver.setMaxDecimalPlaces(MAX_DECIMAL_PLACES); saver.setInstances(dataSet); if (fileName.endsWith(".arff")) { saver.setFile(new File(fileName)); } else { saver.setFile(new File(fileName + ".arff")); } saver.writeBatch(); } catch (IOException ex) { System.out.println("Error saving transformed dataset" + ex); } } /* * TimeSeriesInstances functions */ /** * Helper function for loading the baked-in BasicMotions dataset, one of the * UEA datasets for MTSC * * http://timeseriesclassification.com/description.php?Dataset=BasicMotions * * UEA-MTSC data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data * @return new TimeSeriesInstances[] { trainSet, testSet }; */ public static TimeSeriesInstances[] sampleBasicMotionsTS(int seed) throws IOException { return sampleTSDataset(BAKED_IN_TSC_DATA_PATH, "BasicMotions", seed); } /** * Helper function for loading the baked-in Beef dataset, one of the * UCR datasets for TSC * * http://timeseriesclassification.com/description.php?Dataset=Beef * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new TimeSeriesInstances[] { trainSet, testSet }; */ public static TimeSeriesInstances[] sampleBeefTS(int seed) throws IOException { return sampleTSDataset(BAKED_IN_TSC_DATA_PATH, "Beef", seed); } /** * Helper function for loading the baked-in Chinatown dataset, one of the * UCR datasets for TSC * * https://timeseriesclassification.com/description.php?Dataset=Chinatown * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new TimeSeriesInstances[] { trainSet, testSet }; */ public static TimeSeriesInstances[] sampleChinatownTS(int seed) throws IOException { return sampleTSDataset(BAKED_IN_TSC_DATA_PATH, "Chinatown", seed); } /** * Helper function for loading the baked-in GunPoint dataset, one of the * UCR datasets for TSC * * https://timeseriesclassification.com/description.php?Dataset=GunPoint * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new TimeSeriesInstances[] { trainSet, testSet }; */ public static TimeSeriesInstances[] sampleGunPointTS(int seed) throws IOException { return sampleTSDataset(BAKED_IN_TSC_DATA_PATH, "GunPoint", seed); } /** * Helper function for loading the baked-in ItalyPowerDemand dataset, one of the * UCR datasets for TSC * * http://timeseriesclassification.com/description.php?Dataset=ItalyPowerDemand * * UCR data comes with predefined fold 0 splits. If a seed of 0 is given, that exact split is returned. * Train/test distributions are maintained between resamples. * * @param seed the seed for resampling the data. * @return new TimeSeriesInstances[] { trainSet, testSet }; */ public static TimeSeriesInstances[] sampleItalyPowerDemandTS(int seed) throws IOException { return sampleTSDataset(BAKED_IN_TSC_DATA_PATH, "ItalyPowerDemand", seed); } public static TimeSeriesInstances loadBasicMotionsTS() throws IOException { final TimeSeriesInstances[] instances = sampleBasicMotionsTS(0); instances[0].addAll(instances[1]); return instances[0]; } public static TimeSeriesInstances loadBeefTS() throws IOException { final TimeSeriesInstances[] instances = sampleBeefTS(0); instances[0].addAll(instances[1]); return instances[0]; } public static TimeSeriesInstances loadChinatownTS() throws IOException { final TimeSeriesInstances[] instances = sampleChinatownTS(0); instances[0].addAll(instances[1]); return instances[0]; } public static TimeSeriesInstances loadGunPointTS() throws IOException { final TimeSeriesInstances[] instances = sampleGunPointTS(0); instances[0].addAll(instances[1]); return instances[0]; } public static TimeSeriesInstances loadItalyPowerDemandTS() throws IOException { final TimeSeriesInstances[] instances = sampleItalyPowerDemandTS(0); instances[0].addAll(instances[1]); return instances[0]; } public static TimeSeriesInstances[] sampleTSDataset(String parentFolder, String problem, int fold) throws IOException { TimeSeriesInstances[] split = new TimeSeriesInstances[2]; TimeSeriesInstances train = DatasetLoading.loadTSData(parentFolder + problem + "/" + problem + "_TRAIN.ts"); TimeSeriesInstances test = DatasetLoading.loadTSData(parentFolder + problem + "/" + problem + "_TEST.ts"); TimeSeriesResampler.TrainTest trainTest = TimeSeriesResampler.resampleTrainTest(train, test, fold); split[0] = trainTest.train; split[1] = trainTest.test; return split; } /** * Loads the ts file at the target location or throws IOException on any error. * * @param targetFile the file to try and load * @return Instances from file. * @throws java.io.IOException if cannot find the file, or file is malformed */ public static TimeSeriesInstances loadTSData(File targetFile) throws IOException { String[] parts = targetFile.getName().split(Pattern.quote(".")); String extension; final String ARFF = ".arff", TS = ".ts"; if (parts.length == 2) { extension = "." + parts[1]; //split will remove the . } else { //have not been given a specific extension //look for ts or arff formats //arbitrarily looking for ts first File newtarget = new File(targetFile.getAbsolutePath() + TS); if (newtarget.exists()) { targetFile = newtarget; extension = TS; } else { newtarget = new File(targetFile.getAbsolutePath() + ARFF); if (newtarget.exists()) { targetFile = newtarget; extension = ARFF; } else throw new IOException("Cannot find file " + targetFile.getAbsolutePath() + " with either .arff or .ts extensions."); } } TimeSeriesInstances inst = null; FileReader reader = new FileReader(targetFile); if (extension.equalsIgnoreCase(TS)) { tsml.data_containers.ts_fileIO.TSReader tsReader = new tsml.data_containers.ts_fileIO.TSReader(reader); inst = tsReader.GetInstances(); } else if (extension.equalsIgnoreCase(ARFF)) { inst = Converter.fromArff(new Instances(reader)); } reader.close(); return inst; } /** * Loads the ts file at the target location. * * @param fullPath path to the file to try and load * @return Instances from file. */ public static TimeSeriesInstances loadTSData(String fullPath) throws IOException { return loadTSData(new File(fullPath)); } /** * Simple utility to save a TimeSeriesInstances object to a file. * * @param dataSet to save * @param fileName full path for file to be saved */ public static void saveTSDataset(TimeSeriesInstances dataSet, String fileName) { try { TSWriter writer = new TSWriter(new File(fileName + ".ts")); writer.setData(dataSet); writer.writeBatch(); } catch (IOException e) { System.out.println("Error saving dataset" + e); } } /* * Testing */ public static void main(String[] args) throws Exception { // tests(); DatasetLoading.sampleItalyPowerDemand(0); } private static boolean quickEval(Instances insts) throws Exception { Classifier ed = ClassifierLists.setClassifierClassic("ED", 0); ed.buildClassifier(insts); return ClassifierTools.accuracy(insts, ed) == 1.0; } private static void assert_t(boolean result) throws Exception { if (!result) //todo reassess how the proper assert works... throw new Exception("Hacky assert failed"); } /** * Obvious candidate for moving over to proper unit tests when codebase updates * to incorporate them properly */ private static void tests() throws Exception { //should handle both with/without extension String dataPath = BAKED_IN_UCI_DATA_PATH + "iris/iris"; System.out.println("Testing: testARFFLoad("+dataPath+")"); if (testARFFLoad(dataPath)) System.out.println("Passed: testARFFLoad("+dataPath+")\n"); dataPath += ".arff"; System.out.println("Testing: testARFFLoad("+dataPath+")"); if (testARFFLoad(dataPath)) System.out.println("Passed: testARFFLoad("+dataPath+")\n"); System.out.println("Testing: testUCILoad()"); if (testUCILoad()) System.out.println("Passed: testUCILoad()\n"); // 0 loads default split, 1 will resample System.out.println("Testing: testTSCLoad("+0+")"); if (testTSCLoad(0)) System.out.println("Passed: testTSCLoad("+0+")\n"); System.out.println("Testing: testTSCLoad("+1+")"); if (testTSCLoad(1)) System.out.println("Passed: testTSCLoad("+1+")\n"); System.out.println("Testing: testMTSCLoad("+0+")"); if (testMTSCLoad(0)) System.out.println("Passed: testMTSCLoad("+0+")\n"); System.out.println("Testing: testMTSCLoad("+1+")"); if (testMTSCLoad(1)) System.out.println("Passed: testMTSCLoad("+1+")\n"); } private static boolean testARFFLoad(String dataPath) throws Exception { Instances data = DatasetLoading.loadDataThrowable(dataPath); assert_t(data != null); assert_t(data.relationName().equals("iris")); assert_t(data.numInstances() == 150); assert_t(data.numAttributes() == 5); assert_t(data.numClasses() == 3); assert_t(data.classIndex() == data.numAttributes() - 1); assert_t(quickEval(data)); return true; } private static boolean testUCILoad() throws Exception { proportionKeptForTraining = 0.5; Instances[] data = sampleIris(0); assert_t(data != null); assert_t(data[0] != null); assert_t(data[0].relationName().equals("iris")); assert_t(data[0].numInstances() == 75); assert_t(data[0].numAttributes() == 5); assert_t(data[0].numClasses() == 3); assert_t(data[0].classIndex() == data[0].numAttributes() - 1); assert_t(data[1] != null); assert_t(data[1].relationName().equals("iris")); assert_t(data[1].numInstances() == 75); assert_t(data[1].numAttributes() == 5); assert_t(data[1].numClasses() == 3); assert_t(data[1].classIndex() == data[1].numAttributes() - 1); assert_t(quickEval(data[0])); assert_t(quickEval(data[1])); return true; } private static boolean testTSCLoad(int seed) throws Exception { //2 class Instances[] data = sampleItalyPowerDemand(seed); assert_t(data != null); assert_t(data[0] != null); assert_t(data[0].relationName().equals("ItalyPowerDemand")); assert_t(data[0].numInstances() == 67); assert_t(data[0].numAttributes() == 25); assert_t(data[0].numClasses() == 2); assert_t(data[0].classIndex() == data[0].numAttributes() - 1); assert_t(data[1] != null); assert_t(data[1].relationName().equals("ItalyPowerDemand")); assert_t(data[1].numInstances() == 1029); assert_t(data[1].numAttributes() == 25); assert_t(data[1].numClasses() == 2); assert_t(data[1].classIndex() == data[1].numAttributes() - 1); assert_t(quickEval(data[0])); assert_t(quickEval(data[1])); return true; } private static boolean testMTSCLoad(int seed) throws Exception { Instances[] data = sampleBasicMotions(seed); assert_t(data != null); assert_t(data[0] != null); assert_t(data[0].relationName().equals("BasicMotions")); assert_t(data[0].numInstances() == 40); assert_t(data[0].attribute(0).isRelationValued()); assert_t(data[0].attribute(0).relation().numAttributes() == 100); assert_t(data[0].numClasses() == 4); assert_t(data[0].classIndex() == data[0].numAttributes() - 1); assert_t(data[1] != null); assert_t(data[1].relationName().equals("BasicMotions")); assert_t(data[1].numInstances() == 40); assert_t(data[1].attribute(0).isRelationValued()); assert_t(data[1].attribute(0).relation().numAttributes() == 100); assert_t(data[1].numClasses() == 4); assert_t(data[1].classIndex() == data[1].numAttributes() - 1); assert_t(quickEval(data[0])); assert_t(quickEval(data[1])); return true; } }
39,856
39.546287
157
java
tsml-java
tsml-java-master/src/main/java/experiments/data/MultivariateProcessing.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ /* Multivariate data can be stored in Wekas "multi instance" format https://weka.wikispaces.com/Multi-instance+classification for TSC, the basic univariate syntax is */ package experiments.data; import experiments.CollateResults; import experiments.data.DatasetLists; import experiments.data.DatasetLoading; import fileIO.InFile; import fileIO.OutFile; import java.io.File; import java.io.IOException; import utilities.ClassifierTools; import utilities.InstanceTools; import utilities.multivariate_tools.MultivariateInstanceTools; import weka.classifiers.trees.J48; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; /** * Sorting out the new archive * @author ajb */ public class MultivariateProcessing { public static void makeConcatenatedFiles() throws IOException { String path="Z:\\Data\\Multivariate TSC Problems\\"; String dest="Z:\\Data\\ConcatenatedMTSC\\"; OutFile out=new OutFile(path+"SummaryData.csv"); out.writeLine("problem,numTrainCases,numTestCases,numDimensions,seriesLength,numClasses"); String[] probs={"BasicMotions"}; for(String prob: DatasetLists.mtscProblems2018){ File t1=new File(dest+prob+"\\"+prob+"_TRAIN.arff"); File t2=new File(dest+prob+"\\"+prob+"_TRAIN.arff"); if(!(t1.exists()||t2.exists())){ Instances train =DatasetLoading.loadData(path+prob+"\\"+prob+"_TRAIN"); Instances test =DatasetLoading.loadData(path+prob+"\\"+prob+"_TEST"); System.out.println("PROBLEM "+prob); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num train attributes (should be 2!)="+train.numAttributes()); System.out.println("num classes="+train.numClasses()); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); System.out.println("train number of dimensions "+x.numInstances()); System.out.println("train number of attributes per dimension "+x.numAttributes()); temp=test.instance(0); x= temp.relationalValue(0); System.out.println("test number of dimensions "+x.numInstances()); System.out.println("test number of attributes per dimension "+x.numAttributes()); out.writeLine(prob+","+train.numInstances()+","+test.numInstances()+","+x.numInstances()+","+x.numAttributes()+","+train.numClasses()); int numAtts=x.numInstances()*x.numAttributes(); System.out.println(" Total number of attributes ="+numAtts); //Build a new train test file of concatenated attributes File f= new File(dest+prob); f.mkdirs(); OutFile uniTrain=new OutFile(dest+prob+"\\"+prob+"_TRAIN.arff"); OutFile uniTest=new OutFile(dest+prob+"\\"+prob+"_TEST.arff");; String header="@relation "+prob+"\n"; for(int i=0;i<numAtts;i++){ header+="@attribute att"+i+" numeric \n"; } header+="@attribute "+train.classAttribute().name()+ " {"; for(int i=0;i<train.numClasses()-1;i++) header+=i+","; header+=train.numClasses()-1+"}\n"; header+="@data \n"; uniTrain.writeString(header); uniTest.writeString(header); for(int i=0;i<train.numInstances();i++){ temp=train.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTrain.writeString(y.value(j)+","); } uniTrain.writeString((int)temp.classValue()+"\n"); } for(int i=0;i<test.numInstances();i++){ temp=test.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTest.writeString(y.value(j)+","); } uniTest.writeString((int)temp.classValue()+"\n"); } // System.out.println(" Object type ="+x); train =DatasetLoading.loadData(dest+prob+"\\"+prob+"_TRAIN"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TRAIN"); test =DatasetLoading.loadData(dest+prob+"\\"+prob+"_TEST"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TEST"); } else System.out.println("Already done "+prob); } } static enum MV_Classifiers {SHAPELETI, SHAPELETD, SHAPELET_INDEP, ED_I, ED_D, DTW_I, DTW_D, DTW_A} public static boolean isMultivariateClassifier(String classifier){ for (MV_Classifiers mvClassifier: MV_Classifiers.values()){ if (mvClassifier.name().toLowerCase().equals(classifier.toLowerCase())) { return true; } } return false; } //TODO CHECK TO SEE IF FILES ALREADY MADE public static Instances[] convertToUnivariate(String path, String dest, String prob) throws IOException { if (!CollateResults.validateSingleFoldFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN") || !CollateResults.validateSingleFoldFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TEST")){ Instances train = DatasetLoading.loadData(path+prob+"/"+prob+"_TRAIN"); Instances test =DatasetLoading.loadData(path+prob+"/"+prob+"_TEST"); Instance temp=test.instance(0); Instances x= temp.relationalValue(0); int numAtts=x.numInstances()*x.numAttributes(); File f= new File(dest+prob+"_UNI"); f.mkdirs(); OutFile uniTrain=new OutFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN.arff"); OutFile uniTest=new OutFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TEST.arff"); String header="@relation "+prob+"\n"; for(int i=0;i<numAtts;i++){ header+="@attribute att"+i+" numeric \n"; } header+="@attribute "+train.classAttribute().name()+ " {"; for(int i=0;i<train.numClasses()-1;i++) header+=train.classAttribute().value(i)+","; header+=train.classAttribute().value(train.numClasses()-1)+"}\n"; header+="@data \n"; uniTrain.writeString(header); uniTest.writeString(header); for(int i=0;i<train.numInstances();i++){ temp=train.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTrain.writeString(y.value(j)+","); } uniTrain.writeString(temp.classAttribute().value((int)temp.classValue())+"\n"); } for(int i=0;i<test.numInstances();i++){ temp=test.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTest.writeString(y.value(j)+","); } if (temp.classIsMissing()){ uniTest.writeString("?\n"); } else { uniTest.writeString(temp.classAttribute().value((int)temp.classValue())+"\n"); } } } // System.out.println(" Object type ="+x); Instances train =DatasetLoading.loadData(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); System.out.println("Can load univariate "+dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); Instances test =DatasetLoading.loadData(dest+prob+"_UNI"+"/"+prob+"_UNI_TEST"); System.out.println("Can load univariate "+dest+prob+"_UNI"+"/"+prob+"_UNI_TEST"); Instances[] i = new Instances[2]; i[0] = train; i[1] = test; return i; } //TODO CHECK TO SEE IF FILES ALREADY MADE public static Instances convertToUnivariateTrain(String path, String dest, String prob) throws IOException { if (!CollateResults.validateSingleFoldFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN")){ Instances train =DatasetLoading.loadData(path+prob+"/"+prob+"_TRAIN"); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); int numAtts=x.numInstances()*x.numAttributes(); File f= new File(dest+prob+"_UNI"); f.mkdirs(); OutFile uniTrain=new OutFile(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN.arff"); String header="@relation "+prob+"\n"; for(int i=0;i<numAtts;i++){ header+="@attribute att"+i+" numeric \n"; } header+="@attribute "+train.classAttribute().name()+ " {"; for(int i=0;i<train.numClasses()-1;i++) header+=train.classAttribute().value(i)+","; header+=train.classAttribute().value(train.numClasses()-1)+"}\n"; header+="@data \n"; uniTrain.writeString(header); for(int i=0;i<train.numInstances();i++){ temp=train.instance(i); x= temp.relationalValue(0); for(Instance y:x){//Each dimension for(int j=0;j<y.numAttributes();j++) uniTrain.writeString(y.value(j)+","); } uniTrain.writeString(temp.classAttribute().value((int)temp.classValue())+"\n"); } } // System.out.println(" Object type ="+x); Instances train =DatasetLoading.loadData(dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); System.out.println("Can load univariate "+dest+prob+"_UNI"+"/"+prob+"_UNI_TRAIN"); return train; } public static void checkConcatenatedFiles(){ String dest="Z:\\Data\\ConcatenatedMTSC\\"; for(String prob:DatasetLists.mtscProblems2018){ // System.out.println(" Object type ="+x); try{ Instances train =DatasetLoading.loadData(dest+prob+"\\"+prob+"_TRAIN"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TRAIN"); }catch(Exception e){ System.out.println("UNABLE TO LOAD :"+prob+" TRAIN FILE: EXCEPTION "+e); } try{ Instances test =DatasetLoading.loadData(dest+prob+"\\"+prob+"_TEST"); System.out.println("Can load univariate "+dest+prob+"\\"+prob+"_TEST"); }catch(Exception e){ System.out.println("UNABLE TO LOAD :"+prob+" TEST FILE: EXCEPTION "+e); } } } public static void formatPhilData() throws IOException { Instances multi=DatasetLoading.loadData("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\FinalMulti"); Instances trans=MultivariateInstanceTools.transposeRelationalData(multi); // double[][] rawData= // Instances temp=DatasetLoading.loadData("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\FinalUni"); // System.out.println(" Uni: num cases "+temp.numInstances()+" num atts ="+temp.numAttributes()); // Instances mtsc=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,30); OutFile out=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports.arff"); out.writeString(trans.toString()); Instances test=DatasetLoading.loadData("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports.arff"); System.out.println("New data = "+test); Instances[] split=InstanceTools.resampleInstances(test, 0, 0.5); OutFile train=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports\\RacketSports_TRAIN.arff"); train.writeString(split[0].toString()); OutFile testF=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\Multivariate TSC Problems\\RacketSports\\RacketSports_TEST.arff"); testF.writeString(split[1].toString()); } public static void splitData(String path,String prob) throws IOException { Instances all=DatasetLoading.loadData(path+prob+"\\"+prob); Instances[] split=InstanceTools.resampleInstances(all, 0, 0.5); OutFile out=new OutFile(path+prob+"\\"+prob+"_TRAIN.arff"); out.writeLine(split[0].toString()); out=new OutFile(path+prob+"\\"+prob+"_TEST.arff"); out.writeLine(split[1].toString()); } public static void formatDuckDuckGeese() throws IOException { String path="Z:\\Data\\MultivariateTSCProblems\\DuckDuckGeese\\"; Instances data=DatasetLoading.loadData(path+"DuckDuckGeese"); Instance temp=data.instance(0); Instances x= temp.relationalValue(0); System.out.println("train number of dimensions "+x.numInstances()); System.out.println("train number of attributes per dimension "+x.numAttributes()); Instances[] split= MultivariateInstanceTools.resampleMultivariateInstances(data, 0, 0.6); System.out.println("Train size ="+split[0].numInstances()); System.out.println("Test size ="+split[1].numInstances()); OutFile out=new OutFile(path+"DuckDuckGeese_TRAIN.arff"); out.writeString(split[0]+""); out=new OutFile(path+"DuckDuckGeese_TEST.arff"); out.writeString(split[1]+""); } public static void formatCricket() throws IOException { String path="Z:\\Data\\Multivariate Working Area\\Cricket\\"; Instances[] data=new Instances[6]; data[0]=DatasetLoading.loadData(path+"CricketXLeft.arff"); data[1]=DatasetLoading.loadData(path+"CricketYLeft.arff"); data[2]=DatasetLoading.loadData(path+"CricketZLeft.arff"); data[3]=DatasetLoading.loadData(path+"CricketXRight.arff"); data[4]=DatasetLoading.loadData(path+"CricketYRight.arff"); data[5]=DatasetLoading.loadData(path+"CricketZRight.arff"); Instances all=MultivariateInstanceTools.mergeToMultivariateInstances(data); OutFile out=new OutFile(path+"Cricket.arff"); System.out.println("Cricket number of instances ="+all.numInstances()); Instance temp=all.instance(0); Instances x= temp.relationalValue(0); System.out.println(" number of dimensions "+x.numInstances()); System.out.println(" number of attributes per dimension "+x.numAttributes()); out.writeString(all+""); Instances[] split= MultivariateInstanceTools.resampleMultivariateInstances(all, 0, 0.6); System.out.println("Train size ="+split[0].numInstances()); System.out.println("Test size ="+split[1].numInstances()); out=new OutFile(path+"Cricket_TRAIN.arff"); out.writeString(split[0]+""); out=new OutFile(path+"Cricket_TEST.arff"); out.writeString(split[1]+""); } public static void makeSingleDimensionFiles() throws IOException { String path="Z:\\Results Working Area\\HIVE-COTE\\New Multivariate Datasets\\"; String[] probs={"CounterMovementJump"};//DatasetLists.newMultivariate for(String prob: probs){ File f= new File(path+prob+"\\"+prob+"Dimension"+(1)+"_TRAIN.arff"); File f2= new File(path+prob+"\\"+prob+"Dimension"+(1)+"_TEST.arff"); if(!f.exists()||!f2.exists()){ Instances train =DatasetLoading.loadData(path+prob+"\\"+prob+"_TRAIN.arff"); Instances test =DatasetLoading.loadData(path+prob+"\\"+prob+"_TEST.arff"); System.out.println("PROBLEM "+prob); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num attributes (should be 2!)="+train.numAttributes()); System.out.println("num classes="+train.numClasses()); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); System.out.println(" number of dimensions "+x.numInstances()); System.out.println(" number of attributes per dimension "+x.numAttributes()); Instances[] splitTest=MultivariateInstanceTools.splitMultivariateInstances(test); Instances[] splitTrain=MultivariateInstanceTools.splitMultivariateInstances(train); System.out.println(" Num split files ="+splitTest.length); for(int i=0;i<splitTrain.length;i++){ System.out.println("Number of test instances = "+splitTest[i].numInstances()); File file=new File(path+"SingleDimensionProblems\\"+prob); file.mkdirs(); OutFile outTrain=new OutFile(path+"SingleDimensionProblems\\"+prob+"\\"+prob+"Dimension"+(i+1)+"_TRAIN.arff"); outTrain.writeLine(splitTrain[i].toString()+""); OutFile outTest=new OutFile(path+"SingleDimensionProblems\\"+prob+"\\"+prob+"Dimension"+(i+1)+"_TEST.arff"); outTest.writeLine(splitTest[i].toString()+""); } } // System.out.println(" Object type ="+x); } } public static void summariseData() throws IOException { String path="Z:\\Data\\MultivariateTSCProblems\\"; OutFile out=new OutFile("Z:\\Data\\MultivariateTSCProblems\\SummaryData.csv"); out.writeLine("problem,numTrainCases,numTestCases,numDimensions,seriesLength,numClasses"); for(String prob: DatasetLists.mtscProblems2018){ Instances train =DatasetLoading.loadData(path+prob+"\\"+prob+"_TRAIN"); Instances test =DatasetLoading.loadData(path+prob+"\\"+prob+"_TEST"); System.out.println("PROBLEM "+prob); System.out.println("Num train instances ="+train.numInstances()); System.out.println("Num test instances ="+test.numInstances()); System.out.println("num attributes (should be 2!)="+train.numAttributes()); System.out.println("num classes="+train.numClasses()); Instance temp=train.instance(0); Instances x= temp.relationalValue(0); System.out.println(" number of dimensions "+x.numInstances()); System.out.println(" number of attributes per dimension "+x.numAttributes()); out.writeLine(prob+","+train.numInstances()+","+test.numInstances()+","+x.numInstances()+","+x.numAttributes()+","+train.numClasses()); // System.out.println(" Object type ="+x); } } //1. Format into a standard flat ARFF, then make into a multivariate problem. BCI II data set ia public static void formatSelfRegulationSCP1() throws Exception { String path="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 2\\Data Set 1a\\"; InFile class1=new InFile(path+"Traindata_0.txt"); InFile class2=new InFile(path+"Traindata_1.txt"); OutFile arff=new OutFile(path+"SelfRegulationSCPUni_TRAIN.arff"); int numC1=135; int numC2=133; int d=6; int m=896; arff.writeLine("@relation SelfRegulationSCP1"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<numC1;i++){ String line=class1.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("negativity"); } for(int i=0;i<numC2;i++){ String line=class2.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("positivity"); } arff.closeFile(); Instances temp=DatasetLoading.loadData(path+"SelfRegulationSCP1Uni_TRAIN.arff"); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,896); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP1_TRAIN.arff"); arff.writeLine(multi.toString()); int testSize=293; InFile test=new InFile(path+"TestData.txt"); arff=new OutFile(path+"SelfRegulationSCP1Uni_TEST.arff"); arff.writeLine("@relation SelfRegulationSCP1"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<testSize;i++){ String line=test.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); if(split[0].equals("0.00")) arff.writeLine("negativity"); else arff.writeLine("positivity"); } temp=DatasetLoading.loadData(path+"SelfRegulationSCPUni_TEST.arff"); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,896); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP1_TEST.arff"); arff.writeLine(multi.toString()); } //1. Format into a standard flat ARFF, then make into a multivariate problem. BCI II data set ib public static void formatSelfRegulationSCP2() throws Exception { String path="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 2\\Data Set 1b\\"; InFile class1=new InFile(path+"Traindata_0.txt"); InFile class2=new InFile(path+"Traindata_1.txt"); OutFile arff=new OutFile(path+"SelfRegulationSCP2Uni_TRAIN.arff"); int numC1=100; int numC2=100; int d=7; int m=1152; arff.writeLine("@relation SelfRegulationSCP2"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<numC1;i++){ String line=class1.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("negativity"); } for(int i=0;i<numC2;i++){ String line=class2.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); arff.writeLine("positivity"); } arff.closeFile(); Instances temp=DatasetLoading.loadData(path+"SelfRegulationSCP2Uni_TRAIN.arff"); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP2_TRAIN.arff"); arff.writeLine(multi.toString()); int testSize=180; InFile test=new InFile(path+"TestData.txt"); arff=new OutFile(path+"SelfRegulationSCP2Uni_TEST.arff"); arff.writeLine("@relation SelfRegulationSCP2"); for(int i=1;i<=d*m;i++) arff.writeLine("@attribute att"+i+" real"); arff.writeLine("@attribute cortical {negativity,positivity}"); arff.writeLine("@data"); for(int i=0;i<testSize;i++){ String line=test.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arff.writeString(split[j]+","); if(split[0].equals("0.00")) arff.writeLine("negativity"); else arff.writeLine("positivity"); } temp=DatasetLoading.loadData(path+"SelfRegulationSCP2Uni_TEST.arff"); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arff=new OutFile(path+"SelfRegulationSCP2_TEST.arff"); arff.writeLine(multi.toString()); } //1. Format into a standard flat ARFF, then make into a multivariate problem. BCI II data set IV public static void formatFingerMovements() throws Exception { String path="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 2\\Data Set IV\\"; InFile train=new InFile(path+"sp1s_aa_train.txt"); OutFile arffTrain=new OutFile(path+"FingerMovementsUni_TRAIN.arff"); int d=28; int m=50; int trainSize=316; int testSize=100; arffTrain.writeLine("@relation FingerMovements"); for(int i=1;i<=d*m;i++) arffTrain.writeLine("@attribute att"+i+" real"); arffTrain.writeLine("@attribute hand {left,right}"); arffTrain.writeLine("@data"); for(int i=0;i<trainSize;i++){ String line=train.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arffTrain.writeString(split[j]+","); if(split[0].equals("0.00")) arffTrain.writeLine("left"); else arffTrain.writeLine("right"); } Instances temp=DatasetLoading.loadData(path+"FingerMovementsUni_TRAIN.arff"); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTrain=new OutFile(path+"FingerMovements_TRAIN.arff"); arffTrain.writeLine(multi.toString()); InFile test=new InFile(path+"sp1s_aa_test.txt"); OutFile arffTest=new OutFile(path+"FingerMovementsUni_TEST.arff"); arffTest.writeLine("@relation FingerMovements"); for(int i=1;i<=d*m;i++) arffTest.writeLine("@attribute att"+i+" real"); arffTest.writeLine("@attribute hand {left,right}"); arffTest.writeLine("@data"); for(int i=0;i<testSize;i++){ String line=test.readLine(); String[] split=line.split("\\s+"); for(int j=1;j<=d*m;j++) arffTest.writeString(split[j]+","); if(split[0].equals("0.00")) arffTest.writeLine("left"); else arffTest.writeLine("right"); } temp=DatasetLoading.loadData(path+"FingerMovementsUni_TEST.arff"); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(temp,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTrain=new OutFile(path+"FingerMovements_TEST.arff"); arffTrain.writeLine(multi.toString()); } public static void formatCharacterTrajectories() throws Exception { //#classes= 20, d=3, length=109-205, train 6600, test 2200 InFile train = new InFile(""); InFile test = new InFile(""); OutFile trainarff = new OutFile(""); OutFile testarff = new OutFile(""); String line=train.readLine(); while(line!=null){ // String[] split } } //BCI 3 Dataset 1 public static void formatMotorImagery() throws IOException { //Each channel is on a different line in the text file. //Labels in a separate text file int m=3000; int d=64; int trainSize=278; int testSize=100; InFile trainCSV=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Competition_train_cnt.csv"); InFile testCSV=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Competition_test_cnt.csv"); InFile trainLabels=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Competition_train_lab.txt"); InFile testLabels=new InFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\Test Set Labels.txt"); String arffP="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImageryUni_TRAIN.arff"; String arffP2="C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImageryUni_TEST.arff"; OutFile arffTrain=new OutFile(arffP); arffTrain.writeLine("@relation MotorImagery"); for(int i=1;i<=d*m;i++) arffTrain.writeLine("@attribute att"+i+" real"); arffTrain.writeLine("@attribute motion{finger,tongue}"); arffTrain.writeLine("@data"); for(int i=0;i<trainSize;i++){ for(int j=0;j<d;j++) arffTrain.writeString(trainCSV.readLine()+","); int label=trainLabels.readInt(); if(label==-1) arffTrain.writeLine("finger"); else arffTrain.writeLine("tongue"); } arffTrain.closeFile(); Instances tr=DatasetLoading.loadData(arffP); System.out.println("Num instances ="+tr.numInstances()+" num atts ="+tr.numAttributes()); Instances multi=MultivariateInstanceTools.convertUnivariateToMultivariate(tr,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTrain=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImagery_TRAIN.arff"); arffTrain.writeLine(multi.toString()); OutFile arffTest=new OutFile(arffP2); arffTest.writeLine("@relation MotorImagery"); for(int i=1;i<=d*m;i++) arffTest.writeLine("@attribute att"+i+" real"); arffTest.writeLine("@attribute motion{finger,tongue}"); arffTest.writeLine("@data"); for(int i=0;i<testSize;i++){ for(int j=0;j<d;j++) arffTest.writeString(testCSV.readLine()+","); int label=testLabels.readInt(); if(label==-1) arffTest.writeLine("finger"); else arffTest.writeLine("tongue"); } arffTest.closeFile(); Instances te=DatasetLoading.loadData(arffP2); System.out.println("Num instances ="+te.numInstances()+" num atts ="+te.numAttributes()); multi=MultivariateInstanceTools.convertUnivariateToMultivariate(te,m); System.out.println("Num instances "+multi.numInstances()); System.out.println("Num atts "+multi.numAttributes()); arffTest=new OutFile("C:\\Users\\ajb\\Dropbox\\Data\\BCI Competition 3\\Data Set 1\\MotorImagery_TEST.arff"); arffTest.writeLine(multi.toString()); System.out.println("TEST Num instances ="+te.numInstances()+" num atts ="+te.numAttributes()); } public static void main(String[] args) throws Exception { makeSingleDimensionFiles(); System.exit(0); exampleUsage(); String prob="UWaveGestureLibrary"; String dest="Z:\\Data\\UnivariateMTSC\\"; String path="Z:\\Data\\Multivariate TSC Problems\\"; Instances test =DatasetLoading.loadData(path+prob+"\\"+prob+"_TEST"); Instances train =DatasetLoading.loadData(path+prob+"\\"+prob+"_TRAIN"); } public static void exampleUsage() throws IOException { // ECGActivities Instances train,test; train=DatasetLoading.loadData("Z:\\Data\\MultivariateTSCProblems\\ECGActivities\\ECGActivities_TRAIN"); test=DatasetLoading.loadData("Z:\\Data\\MultivariateTSCProblems\\ECGActivities\\ECGActivities_TEST"); // Instances[] split=InstanceTools.resampleTrainAndTestInstances(train, test, 1); Instances[] split=MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances(train, test, 1); System.out.println("IS it relational ? "+split[0].checkForAttributeType(Attribute.RELATIONAL)); System.out.println("Fold 1 TRAIN num instances "+split[0].numInstances()+" Num atts ="+(split[0].numAttributes()-1)); // System.out.println(split[0]+""); System.out.println("Fold 1 TRAIN instance 1 num dimensions "+split[0].instance(0).relationalValue(0).numInstances()+" series length "+split[0].instance(0).relationalValue(0).numAttributes()); for(Instance ins:split[0]) System.out.println("Fold TRAIN instance num dimensions "+ins.relationalValue(0).numInstances()+" series length "+ins.relationalValue(0).numAttributes()); } public static void mergeEpilepsy() throws IOException { Instances x,y,z; Instances all; String sourcePath="C:\\Users\\ajb\\Dropbox\\TSC Problems\\EpilepsyX\\"; String destPath="C:\\Users\\ajb\\Dropbox\\Multivariate TSC Problems\\HAR\\Epilepsy\\"; x=DatasetLoading.loadData(sourcePath+"EpilepsyX_ALL"); y=DatasetLoading.loadData(sourcePath+"EpilepsyY_ALL"); z=DatasetLoading.loadData(sourcePath+"EpilepsyZ_ALL"); //Delete the use ID, will reinsert manually after x.deleteAttributeAt(0); y.deleteAttributeAt(0); z.deleteAttributeAt(0); all=utilities.multivariate_tools.MultivariateInstanceTools.mergeToMultivariateInstances(new Instances[]{x,y,z}); // OutFile out=new OutFile(destPath+"EpilepsyNew.arff"); // out.writeString(all.toString()); //Create train test splits so participant 1,2,3 in train and 4,5,6 in test int trainSize=149; int testSize=126; Instances train= new Instances(all,0); Instances test= new Instances(all); for(int i=0;i<trainSize;i++){ Instance t= test.remove(0); train.add(t); } OutFile tr=new OutFile(destPath+"Epilepsy_TRAIN.arff"); OutFile te=new OutFile(destPath+"Epilepsy_TEST.arff"); tr.writeString(train.toString()); te.writeString(test.toString()); } /**A getting started with relational attributes in Weka. Once you have the basics * there are a range of tools for manipulating them in * package utilities.multivariate_tools * * See https://weka.wikispaces.com/Multi-instance+classification * for more * */ public static void gettingStarted() throws IOException { //Load a multivariate data set String path="\\\\cmptscsvr.cmp.uea.ac.uk\\ueatsc\\Data\\Multivariate\\univariateConcatExample"; Instances train =DatasetLoading.loadData(path); System.out.println(" univariate data = "+train); path="\\\\cmptscsvr.cmp.uea.ac.uk\\ueatsc\\Data\\Multivariate\\multivariateConcatExample"; train =DatasetLoading.loadData(path); System.out.println(" multivariate data = "+train); //Recover the first instance Instance first=train.instance(0); //Split into separate dimensions Instances split=first.relationalValue(0); System.out.println(" A single multivariate case split into 3 instances with no class values= "+split); for(Instance ins:split) System.out.println("Dimension of first case =" +ins); //Extract as arrays double[][] d = new double[split.numInstances()][]; for(int i=0;i<split.numInstances();i++) d[i]=split.instance(i).toDoubleArray(); } }
39,229
48.098874
204
java
tsml-java
tsml-java-master/src/main/java/experiments/data/TSReader.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package experiments.data; import utilities.generic_storage.Pair; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.io.*; import java.util.ArrayList; import java.util.HashMap; import java.util.Optional; import static utilities.multivariate_tools.MultivariateInstanceTools.createRelationHeader; /** File for reading sktime format data @author Aaron Bostrom, pushed 22/4/2020 */ public class TSReader { //need to change this to a map function. public static final String PROBLEM_NAME = "@problemName"; public static final String TIME_STAMPS = "@timeStamps"; public static final String CLASS_LABEL = "@classLabel"; public static final String UNIVARIATE = "@univariate"; public static final String MISSING = "@missing"; public static final String DATA = "@data"; private HashMap<String, String> variables = new HashMap<>(); private final StreamTokenizer m_Tokenizer; private int m_Lines; Instances m_data; private String problemName; private boolean univariate = false; private boolean missing = false; private boolean timeStamps = false; private boolean classLabel; private ArrayList<String> classLabels; private ArrayList<Attribute> attList; private ArrayList<ArrayList<Double>> uni_raw_data; private ArrayList<ArrayList<ArrayList<Double>>> multi_raw_data; private ArrayList<Double> raw_labels; public TSReader(Reader reader) throws IOException{ m_Tokenizer = new StreamTokenizer(reader); initTokenizer(); readHeader(); if(univariate){ CreateUnivariateInstances(); } else{ CreateMultivariateInstances(); } } private void CreateMultivariateInstances() throws IOException { multi_raw_data = new ArrayList<>(); raw_labels = new ArrayList<>(); //read each line and extract a data Instance Pair<ArrayList<ArrayList<Double>>, Double> multi_series_and_label; //extract the multivariate series, and the possible label. while(( multi_series_and_label = readMultivariateInstance()) != null){ multi_raw_data.add(multi_series_and_label.var1); raw_labels.add(multi_series_and_label.var2); } //go through all the raw data, and find the longest row. int max_length = 0; for(ArrayList<ArrayList<Double>> channel : multi_raw_data){ int curr = channel.stream().mapToInt(ArrayList::size).max().getAsInt(); if(curr > max_length) max_length = curr; } int numAttsInChannel=max_length; int numChannels = multi_raw_data.get(0).size(); //each array in this list is a channel. // create attribute list attList = new ArrayList<>(); //construct relational attribute.# Instances relationHeader = createRelationHeader(numAttsInChannel,numChannels); relationHeader.setRelationName("relationalAtt"); Attribute relational_att = new Attribute("relationalAtt", relationHeader, numAttsInChannel); attList.add(relational_att); if(classLabel) attList.add(new Attribute("classVal", classLabels, classLabels.size())); m_data = new Instances(problemName, attList, multi_raw_data.size()); for(int i=0; i< multi_raw_data.size(); i++){ ArrayList<ArrayList<Double>> series = multi_raw_data.get(i); m_data.add(new DenseInstance(attList.size())); //TODO: add all the time series values, dealing with missing values. Instances relational = new Instances(relationHeader, series.size()); //each dense instance is row/ which is actually a channel. for(int k=0; k< series.size(); k++){ DenseInstance ds = new DenseInstance(numAttsInChannel); int index = 0; for(Double d : series.get(k)) ds.setValue(index++, d); relational.add(ds); } //add the relational series to the attribute, and set the value of the att to the relations index. int index = m_data.instance(i).attribute(0).addRelation(relational); //System.out.println(index); m_data.instance(i).setValue(0, index); //set class value. if(classLabel) { m_data.instance(i).setValue(1, raw_labels.get(i)); } } } private void CreateUnivariateInstances() throws IOException { uni_raw_data = new ArrayList<>(); raw_labels = new ArrayList<>(); //read each line and extract a data Instance Pair<ArrayList<Double>, Double> series_and_label; //extract series and the possible label. while((series_and_label = readUnivariateInstance()) != null){ uni_raw_data.add(series_and_label.var1); raw_labels.add(series_and_label.var2); } //go through all the raw data, and find the longest row. int max_length = uni_raw_data.stream().mapToInt(ArrayList::size).max().getAsInt(); // create attribute list attList = new ArrayList<>(); for(int i = 0; i < max_length; i++){ attList.add(new Attribute("att"+(i+1), i)); } //have to cast the null to arraylist type (this is taken from WEKAS OWN CODE.) to force it to use the right method. if(classLabel) attList.add(new Attribute("classVal", classLabels, classLabels.size())); this.m_data = new Instances(problemName, attList, uni_raw_data.size()); for(int i=0; i<uni_raw_data.size(); i++){ ArrayList<Double> timeSeries = uni_raw_data.get(i); //add all the time series values. Instance ds = new DenseInstance(max_length+1); int index = 0; for(Double d : timeSeries) ds.setValue(index++, d); //only add if we have a classLabel //get the value from the end of the current time series, and put it at the end of the attribute list. if(classLabel) { ds.setValue(max_length, raw_labels.get(i)); } this.m_data.add(ds); } } public Instances GetInstances(){ return m_data; } private Pair<ArrayList<ArrayList<Double>>, Double> readMultivariateInstance() throws IOException { getFirstToken(); if (m_Tokenizer.ttype == StreamTokenizer.TT_EOF) { return null; } ArrayList<ArrayList<Double>> multi_timeSeries = new ArrayList<>(); String classValue =""; ArrayList<Double> timeSeries = new ArrayList<>(); do{ //this means we're about to get the class value if(m_Tokenizer.ttype == ':' && classLabel){ //add the current time series to the list. multi_timeSeries.add(timeSeries); timeSeries = new ArrayList<>(); } else{ timeSeries.add(m_Tokenizer.sval == "?" ? Double.NaN : m_Tokenizer.nval); classValue = m_Tokenizer.sval == null ? ""+m_Tokenizer.nval : m_Tokenizer.sval; //the last value to be tokenized should be the class value. can be in string or number format so check both. } m_Tokenizer.nextToken(); } while(m_Tokenizer.ttype != StreamTokenizer.TT_EOL); //don't add the last series to the list, instead extract the first element and figure out what the class value is. double classVal = classLabel ? (double) this.classLabels.indexOf(classValue) : -1.0; return new Pair<>(multi_timeSeries,classVal); } private Pair<ArrayList<Double>, Double> readUnivariateInstance() throws IOException { getFirstToken(); if (m_Tokenizer.ttype == StreamTokenizer.TT_EOF) { return null; } ArrayList<Double> timeSeries = new ArrayList<>(); String classValue = null; //read the tokens, and if we hit a : then we need to do something clever. boolean bFoundColon = false; do{ //this means we're about to get the class value if(m_Tokenizer.ttype == ':' && classLabel){ bFoundColon = true; } else{ if(bFoundColon){ classValue = m_Tokenizer.sval == null ? ""+m_Tokenizer.nval : m_Tokenizer.sval; bFoundColon = false; } else{ //if the tokenizer has a ? in it, then we set the value to nan. timeSeries.add(m_Tokenizer.sval == "?" ? Double.NaN : m_Tokenizer.nval); } } m_Tokenizer.nextToken(); } while(m_Tokenizer.ttype != StreamTokenizer.TT_EOL); double classVal = classLabel ? (double)this.classLabels.indexOf(classValue) : -1.0; return new Pair<>(timeSeries, classVal); } private void initTokenizer() { //Setup the tokenizer to read the stream. m_Tokenizer.resetSyntax(); ////ignore 0-9 chars m_Tokenizer.wordChars(' '+1,'\u00FF'); m_Tokenizer.parseNumbers(); //setup the white space tokens m_Tokenizer.whitespaceChars(' ', ' '); m_Tokenizer.whitespaceChars(',',','); //if we encounter a colon it means we need to start a new line? or it means a new multivariate instance. m_Tokenizer.ordinaryChar(':'); //setup the comment char m_Tokenizer.commentChar('#'); //end of line is a significant token. it means the end of an instance. m_Tokenizer.eolIsSignificant(true); } //this function reads upto the @data bit in the file. protected void readHeader() throws IOException { //first token should be @problem name. as we skip whitespace and comments. //this gets the token there may be weirdness at the front of the file. getFirstToken(); if (m_Tokenizer.ttype == StreamTokenizer.TT_EOF) { errorMessage("premature end of file"); } do{ String token = m_Tokenizer.sval; if(token.equalsIgnoreCase(CLASS_LABEL)){ ExtractClassLabels(); } else{ variables.put(token, ExtractVariable(token)); } getNextToken(); }while(!m_Tokenizer.sval.equalsIgnoreCase(DATA)); //these are required. problemName = variables.get(PROBLEM_NAME); if (problemName == null){ errorMessage("keyword " + PROBLEM_NAME + " expected"); } if (variables.get(UNIVARIATE) == null){ errorMessage("keyword " + UNIVARIATE + " expected"); } else{ univariate = Boolean.parseBoolean(variables.get(UNIVARIATE)); } //set optionals. if(variables.get(MISSING) != null) missing = Boolean.parseBoolean(variables.get(MISSING)); if(variables.get(TIME_STAMPS) != null) timeStamps = Boolean.parseBoolean(variables.get(TIME_STAMPS)); //clear out last tokens. getLastToken(false); } private void ExtractClassLabels() throws IOException { classLabels = new ArrayList<>(); getNextToken(); classLabel = Boolean.parseBoolean(m_Tokenizer.sval); if(!classLabel) { getLastToken(false); return; } getNextToken(); //now read all the class values until we reach the EOL do{ classLabels.add(m_Tokenizer.sval == null ? ""+m_Tokenizer.nval : m_Tokenizer.sval); m_Tokenizer.nextToken(); }while(m_Tokenizer.ttype != StreamTokenizer.TT_EOL); } private String ExtractVariable(String VARIABLE) throws IOException { //check if the current token matches the hardcoded value for @types e.g. @problemName etc. getNextToken(); String value = m_Tokenizer.sval; getLastToken(false); return value; } /** * Gets next token, skipping empty lines. * * @throws IOException if reading the next token fails */ protected void getFirstToken() throws IOException { while (m_Tokenizer.nextToken() == StreamTokenizer.TT_EOL) {}; //this handles quotations single and double/ if ((m_Tokenizer.ttype == '\'') || (m_Tokenizer.ttype == '"')) { m_Tokenizer.ttype = StreamTokenizer.TT_WORD; // this handles ? in the file. } else if ((m_Tokenizer.ttype == StreamTokenizer.TT_WORD) && (m_Tokenizer.sval.equals("?"))){ m_Tokenizer.ttype = '?'; } } /** * Gets next token, checking for a premature and of line. * * @throws IOException if it finds a premature end of line */ protected void getNextToken() throws IOException { if (m_Tokenizer.nextToken() == StreamTokenizer.TT_EOL) { errorMessage("premature end of line"); } if (m_Tokenizer.ttype == StreamTokenizer.TT_EOF) { errorMessage("premature end of file"); } else if ((m_Tokenizer.ttype == '\'') || (m_Tokenizer.ttype == '"')) { m_Tokenizer.ttype = StreamTokenizer.TT_WORD; } else if ((m_Tokenizer.ttype == StreamTokenizer.TT_WORD) && (m_Tokenizer.sval.equals("?"))){ m_Tokenizer.ttype = '?'; } } /** * Gets token and checks if its end of line. * * @param endOfFileOk whether EOF is OK * @throws IOException if it doesn't find an end of line */ protected void getLastToken(boolean endOfFileOk) throws IOException { if ((m_Tokenizer.nextToken() != StreamTokenizer.TT_EOL) && ((m_Tokenizer.ttype != StreamTokenizer.TT_EOF) || !endOfFileOk)) { errorMessage("end of line expected"); } } /** * Throws error message with line number and last token read. * * @param msg the error message to be thrown * @throws IOException containing the error message */ protected void errorMessage(String msg) throws IOException { String str = msg + ", read " + m_Tokenizer.toString(); if (m_Lines > 0) { int line = Integer.parseInt(str.replaceAll(".* line ", "")); str = str.replaceAll(" line .*", " line " + (m_Lines + line - 1)); } throw new IOException(str); } public static void main(String[] args) throws IOException { String local_path = "D:\\Work\\Data\\Univariate_ts\\"; String local_path_orig = "D:\\Work\\Data\\Univariate_arff\\"; String m_local_path = "D:\\Work\\Data\\Multivariate_ts\\"; String m_local_path_orig = "D:\\Work\\Data\\Multivariate_arff\\"; //for(String dataset : DatasetLists.tscProblems2018){ String dataset = "AllGestureWiimoteZ"; String filepath = local_path + dataset + "\\" + dataset; String filepath_orig = local_path_orig + dataset + "\\" + dataset; File f = new File(filepath + "_TRAIN" + ".ts"); //System.out.println(f); long time = System.nanoTime(); TSReader ts_reader = new TSReader(new FileReader(f)); System.out.println("after: " + (System.nanoTime() - time)); Instances train_data = ts_reader.GetInstances(); //System.out.println(train_data); //} //File f_orig = new File(filepath_orig); //Instances train_data_orig = new Instances(new FileReader(f_orig)); //System.out.println(train_data.toString()); //for(String dataset_multi : DatasetLists.mtscProblems2018){ String dataset_multi = "CharacterTrajectories"; String filepath_multi = m_local_path + dataset_multi + "\\" + dataset_multi; String filepath_orig_multi = m_local_path_orig + dataset_multi + "\\" + dataset_multi; File f1 = new File(filepath_multi + "_TRAIN" + ".ts"); System.out.println(f1); time = System.nanoTime(); TSReader ts_reader_multi = new TSReader(new FileReader(f1)); Instances train_data_multi = ts_reader_multi.GetInstances(); System.out.println("after: " + (System.nanoTime() - time)); //JAMESL ADDED TESTS //Instances tsisntances = DatasetLoading.loadData(filepath_multi + "_TRAIN"); //} //File f_orig_multi = new File(filepath_orig_multi); //Instances train_data_orig_multi = new Instances(new FileReader(f_orig_multi)); //System.out.println(train_data_multi.instance(0)); //do some comparison! //System.out.println(train_data_multi.toString()); } }
17,725
35.101833
204
java
tsml-java
tsml-java-master/src/main/java/fileIO/FullAccessOutFile.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package fileIO; import java.io.*; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.PosixFilePermission; import java.util.TreeSet; public class FullAccessOutFile extends OutFile{ public FullAccessOutFile(String n) { super(n); } @Override public void closeFile() { outFile.close(); File f=new File(name); TreeSet<PosixFilePermission> perms=new TreeSet<>(); for(PosixFilePermission p:PosixFilePermission.values()) perms.add(p); Path path=f.toPath(); try{ Files.setPosixFilePermissions(path, perms); }catch(Exception e){ System.out.println("UNABLE TO CHANGE PERMISSIONS FOR FILE "+name); } } }
1,517
32
78
java
tsml-java
tsml-java-master/src/main/java/fileIO/InFile.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package fileIO; import java.io.*; import java.util.*; import java.util.logging.Level; import java.util.logging.Logger; public class InFile{ private String fileName; private FileReader fr; private BufferedReader in; private StreamTokenizer token; private StreamTokenizer markerToken; private static int MAXBUFFERSIZE =1000000; //Standard File public InFile(String name){ try{ fileName=name; fr = new FileReader(name); in = new BufferedReader(fr,MAXBUFFERSIZE); token = new StreamTokenizer(in); markerToken= new StreamTokenizer(in); token.wordChars(' ',' '); token.wordChars('_','_'); token.whitespaceChars(',',','); token.slashStarComments(true); markPoint(); //Mark start of file for rewind } catch(Exception ex) { throw new RuntimeException("File "+name+" not found Exception in InFile constructor :"+ex.toString()+" Current token is >"+token.sval); } } public InFile(String name, char sep){ try{ fileName=name; fr = new FileReader(name); in = new BufferedReader(fr,MAXBUFFERSIZE); token = new StreamTokenizer(in); token.whitespaceChars(sep,sep); token.ordinaryChar('_'); token.slashStarComments(true); markPoint(); //Mark start of file for rewind } catch(Exception ex){ throw new RuntimeException("File "+name+" with Separator "+sep+" not found Exception in InFile constructor :"+ex.toString()+" Current token is >"+token.sval); } } //Reopenfile public void reopen(){ try{ fr = new FileReader(fileName); in = new BufferedReader(fr,MAXBUFFERSIZE); token = new StreamTokenizer(in); markerToken= new StreamTokenizer(in); token.wordChars(' ',' '); token.wordChars('_','_'); token.whitespaceChars(',',','); token.slashStarComments(true); markPoint(); //Mark start of file for rewind } catch(Exception ex){ throw new RuntimeException("File "+fileName+" not found on call to reopen() in InFile :"+ex.toString()+" Current token is >"+token.sval); } } public String getName(){return fileName;} //CSV file public void openFile(String name){ try{ fileName=name; fr = new FileReader(name); in = new BufferedReader(fr,MAXBUFFERSIZE); token = new StreamTokenizer(in); markerToken= new StreamTokenizer(in); token.wordChars(' ',' '); token.wordChars('_','_'); token.whitespaceChars(',',','); token.slashStarComments(true); } catch(Exception ex){ throw new RuntimeException("File "+name+" not found on call to openFile() in InFile :"+ex.toString()+" Current token is >"+token.sval); } } //CSV file public void closeFile(){ try { in.close(); fr.close(); } catch (IOException ex) { throw new RuntimeException("Failed to close "+fileName+" not found on call to closeFile() in InFile :"+ex.toString()+" Current token is >"+token.sval); } } public char readChar(){ char c; try{ c=(char)in.read(); } catch(Exception ex){ throw new RuntimeException("Failed to read a character from "+fileName+" readChar() in InFile :"+ex.toString()+"Current token is >"+token.sval); } return(c); } //Reads and returns //Problems: Ignoring comments prior to the line //Returns null if EOF?? public String readLine() { String v=null; try{ //To force ignore of comments preceeding the line /* //CHECK token.pushBack(); token.nextToken(); */ v=in.readLine(); } catch(Exception ex) { throw new RuntimeException("Failed to read a line from "+fileName+" readLine() in InFile :"+ex.toString()+"Current token is >"+token.sval); } return(v); } public Object read(){ int v=0; int t; Object o=null; try{ t=token.nextToken(); if(t==StreamTokenizer.TT_NUMBER) o= new Double(token.nval); else o=token.sval; } catch(IOException ex){ throw new RuntimeException("Failed to read a line from "+fileName+" read() in InFile :"+ex.toString()+"Current token is >"+token.sval); } return o; } public int readInt(){ int v=0; int t; try{ t=token.nextToken(); if(t!=StreamTokenizer.TT_NUMBER) { System.out.println("ERROR: Attempting to read a non integer"); System.out.println("Current token is >"+token.sval); System.out.println("File name ="+fileName); throw new RuntimeException("Failed to read an integer from "+fileName+" readInt() in InFile Current token is >"+token.sval); } v= (int)token.nval; } catch(Exception ex){ throw new RuntimeException("Failed to read an integer from "+fileName+" readInt() in InFile Current token is >"+token.sval); } return(v); } public double readDouble(){ double v=0; try{ int t =token.nextToken(); if(t!=StreamTokenizer.TT_NUMBER){ System.out.println("ERROR: Attempting to read a non double"); System.out.println("Current token is >"+token.sval); System.out.println("File name ="+fileName); throw new RuntimeException("Failed to read a double from "+fileName+" readDouble() in InFile Current token is >"+token.sval); } v= token.nval; } catch(Exception ex){ throw new RuntimeException("Failed to read a double from "+fileName+" readDouble() in InFile Current token is >"+token.sval+" exception ="+ex); } return(v); } public float readFloat(){ double v=0; try{ int t =token.nextToken(); if(t!=StreamTokenizer.TT_NUMBER){ System.out.println("ERROR: Attempting to read a non double"); System.out.println("Current token is >"+token.sval); System.out.println("File name ="+fileName); throw new RuntimeException("Failed to read a float from "+fileName+" readFloat() in InFile Current token is >"+token.sval); } v= token.nval; } catch(Exception ex) { System.out.println("ERROR: wrong Format"); System.out.println("File name ="+fileName); throw new RuntimeException("Failed to read a float from "+fileName+" readFloat() in InFile Current token is >"+token.sval+" exception ="+ex); } return((float)v); } public String readStringToChar(char delimit) { String v=""; char[] name = new char[1]; try{ name[0]=readChar(); while(name[0]==' '||name[0]=='\n'|| name[0]=='\t') name[0]=readChar(); while(name[0]!=' ' && name[0]!='\n' && name[0]!='\t' && name[0]!=delimit) //name!=EOF && ) { v+=new String(name); name[0]=readChar(); } while(name[0]!=delimit) //name!=EOF && ) { name[0]=readChar(); } } catch(Exception ex){ throw new RuntimeException("Failed to read a straing from chars from "+fileName+" readStringToChar() in InFile Current token is >"+token.sval+" exception ="+ex); } return(v); } public String readStringIgnoreWhite() { String v=""; char[] name = new char[1]; try{ name[0]=readChar(); while(name[0]==' '||name[0]=='\n'|| name[0]=='\t') name[0]=readChar(); while(name[0]!=' ' && name[0]!='\n' && name[0]!='\t') //name!=EOF && ) { v+=new String(name); name[0]=readChar(); } } catch(NoSuchElementException ex) { throw new RuntimeException("Failed to read a string from chars from "+fileName+" readStringIgnoreWhite() in InFile Current token is >"+token.sval+" exception ="+ex); } return(v); } public String readString(){ String v; int t; try{ t=token.nextToken(); if(t!=StreamTokenizer.TT_WORD) { System.out.println("ERROR: Attempting to read a non string"); System.out.println("Current token is >"+token.sval+"\t t ="+token.nval+"\t"+token.toString()); System.out.println("File name ="+fileName); System.exit(0); } v= token.sval; } catch(IOException ex){ throw new RuntimeException("Failed to read a string from "+fileName+" readString() in InFile Current token is >"+token.sval+" exception ="+ex); } return(v); } //Reads header line delimited by:Vector! oollld code public Vector readStringLine(String delimit){ Vector headers = new Vector(); String line; String name; try{ line=readLine(); StringTokenizer sToke = new StringTokenizer(line,delimit); while(sToke.hasMoreTokens()) { name=sToke.nextToken(); headers.addElement(name); } } catch(NoSuchElementException ex) { throw new RuntimeException("Failed to read a string from "+fileName+" readStringLine(String delimit) in InFile Current token is >"+token.sval+" exception ="+ex); } return(headers); } public Vector readStringLine(){ Vector headers = new Vector(); String line; String name; try{ line=readLine(); StringTokenizer sToke = new StringTokenizer(line); while(sToke.hasMoreTokens()) { name=sToke.nextToken(); headers.addElement(name); } } catch(NoSuchElementException ex){ throw new RuntimeException("Failed to read a string from "+fileName+" readStringLine() in InFile Current token is >"+token.sval+" exception ="+ex); } return(headers); } //PRE: EOF NOT reached during the line //POST: Returns NULL if first read is EOF public double[] readDoubleLine(int size){ double d=readDouble(); if(token.ttype==StreamTokenizer.TT_EOF) return(null); double[] data = new double[size]; data[0]=d; for(int i=1;i<size;i++) data[i]=readDouble(); return(data); } //POST: Returns FALSE if first read is EOF public boolean readDoubleLine(double[] data) { data[0]=this.readDouble(); if(token.ttype==StreamTokenizer.TT_EOF) return(false); for(int i=1;i<data.length;i++) data[i]=this.readDouble(); return(true); } //Reads upto the first occurence of delimit string //VERY INEFFICIENT AND HACKED //String conversion bad //Should check EOF //Shouldnt use += for string public String readStringUpTo(char delimit) { char[] name = new char[1]; String header=""; try{ name[0]=readChar(); while(name[0]!=delimit) //name!=EOF && ) { header+=new String(name); name[0]=readChar(); } } catch(NoSuchElementException ex) { System.out.println("IO Exception caught in readStringUpTo"); throw new RuntimeException("Failed to read a string from "+fileName+" readStringUpTo() in InFile Current token is >"+token.sval+" exception ="+ex); } return(header); } //Sets a marker in the BufferedStream that should persist until the //file is finished public void markPoint(){ try{ in.mark(MAXBUFFERSIZE-1); } catch(IOException ex) { throw new RuntimeException("Failed to mark a point from "+fileName+" markPoint() in InFile Current token is >"+token.sval+" exception ="+ex); } } public void rewind(){ try{ in.reset(); } catch(IOException ex){ throw new RuntimeException("Failed to mark a point from "+fileName+" rewind() in InFile Current token is >"+token.sval+" exception ="+ex); } } public int countLines(){ int count =0; String str=readLine(); while(str!=null){ str=readLine(); count++; } rewind(); return(count); } //Test Harness static public void main(String[] args) { InFile t=new InFile("C:/JavaSource/FileIO/test.csv"); int a,b; double x; String s,s2; a=t.readInt(); x=t.readDouble(); s=t.readStringUpTo(','); System.out.println(s+"\tEND"); StringTokenizer st = new StringTokenizer(s); s2=st.nextToken("-"); System.out.println(s2+"STRING"); a= Integer.parseInt(s2); System.out.println(a+"INTEGER"); // a=(Integer.getInteger(s2)).intValue(); // System.out.println(a+"\tEND"); } public static boolean directoryExists(String s){ File f= new File(s); if(f.exists() && f.isDirectory()) return true; return false; } public static boolean fileExists(String s){ File f= new File(s); if(f.exists() && !f.isDirectory()) return true; return false; } public static boolean deleteFile(String s){ File f= new File(s); if(f.exists()&& !f.isDirectory()){ f.delete(); return true; } return false; } }
15,291
33.913242
176
java
tsml-java
tsml-java-master/src/main/java/fileIO/OutFile.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package fileIO; import java.io.*; public class OutFile{ private FileWriter fw; private BufferedWriter bw; protected PrintWriter outFile; protected String name; private char delimit; public OutFile(String name) { this.name=name; try{ fw = new FileWriter(name); bw = new BufferedWriter(fw); outFile = new PrintWriter(fw); delimit=' '; } catch(IOException exception) { System.err.println(exception+" File "+ name+" Not found"); } } public OutFile(String name, char delimiter) { try { fw = new FileWriter(name); bw = new BufferedWriter(fw); outFile = new PrintWriter(fw); delimit=delimiter; } catch(IOException exception) { System.out.println(" File "+ name+" Not found"); } } public OutFile(String name, boolean append) { try { fw = new FileWriter(name, append); bw = new BufferedWriter(fw); outFile = new PrintWriter(fw); delimit=' '; } catch(IOException exception) { System.out.println(" File "+ name+" Not found"); } } //Reads and returns single line public boolean writeString(String v) { outFile.print(v); if(outFile.checkError()) return(false); return(true); } public boolean writeLine(String v) { outFile.print(v+"\n"); if(outFile.checkError()) return(false); return(true); } public boolean writeInt(int v) { outFile.print(""+v+delimit); if(outFile.checkError()) return(false); return(true); } public boolean writeLong(long v) { outFile.print(""+v+delimit); if(outFile.checkError()) return(false); return(true); } public boolean writeChar(char c) { outFile.print(c); if(outFile.checkError()) return(false); return(true); } public boolean writeBoolean(boolean b) { outFile.print(b); if(outFile.checkError()) return(false); return(true); } public boolean writeDouble(double v) { outFile.print(""+v+delimit); if(outFile.checkError()) return(false); return(true); } public boolean newLine() { outFile.print("\n"); if(outFile.checkError()) return(false); return(true); } public void closeFile() { outFile.close(); } }
3,480
24.977612
76
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ChooseClassifierFromFile.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.Random; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; /** * * @author cjr13geu */ public class ChooseClassifierFromFile implements Classifier{ private Random randomNumber; private final int bufferSize = 100000; private int foldNumber = 0; private int indexOfLargest = 0; ArrayList<String> line; /** * if size results path == 1, all classifier's results read from that one path * else, resultsPaths.length must equal classifiers.length, with each index aligning * to the path to read the classifier's results from. * * e.g to read 2 classifiers from one directory, and another 2 from 2 different directories: * * Index | Paths | Classifier * -------------------------- * 0 | pathA | c1 * 1 | pathA | c2 * 2 | pathB | c3 * 3 | pathC | c4 * */ private String[] resultsPaths = { "Results/" }; /** * if resultsWritePath is not set, will default to resultsPaths[0] * i.e, if only reading from one directory, will write back the chosen results * under the same directory. if reading from multiple directories but a particular * write path not set, will simply pick the first one given. */ private String resultsWritePath = null; private String classifiers[] = {"TunedSVMRBF", "TunedSVMPolynomial"}; private String name = "EnsembleResults"; private String relationName = "abalone"; private double accuracies[]; private File dir; private BufferedReader[] trainFiles; private BufferedReader testFile; private BufferedWriter outTrain; private BufferedWriter outTest; public void setFold(int foldNumber){ this.foldNumber = foldNumber; } public void setClassifiers(String[] classifiers){ this.classifiers = classifiers; } public void setResultsPath(String[] resultsPaths){ this.resultsPaths = resultsPaths; } public void setResultsPath(String resultsPath){ this.resultsPaths = new String[] { resultsPath }; } public void setResultsWritePath(String writePath) { this.resultsWritePath = writePath; } public void setName(String name){ this.name = name; } public void setRelationName(String name){ this.relationName = name; } @Override public void buildClassifier(Instances data) throws Exception { if (resultsPaths.length > 1) if (resultsPaths.length != classifiers.length) throw new Exception("ChooseClassifierFromFile.buildClassifier: more than one results path given, but number given does not align with the number of classifiers."); if (resultsWritePath == null) resultsWritePath = resultsPaths[0]; dir = new File(resultsWritePath + "/" + this.name + "/Predictions/" + relationName + "/trainFold" + foldNumber + ".csv"); if(!dir.exists()){ try{ trainFiles = new BufferedReader[classifiers.length]; accuracies = new double[classifiers.length]; for (int i = 0; i < classifiers.length; i++) { int pathIndex = resultsPaths.length == 1 ? 0 : i; trainFiles[i] = new BufferedReader(new FileReader(resultsPaths[pathIndex] + "/"+ classifiers[i] + "/Predictions/" + relationName + "/trainFold" + foldNumber + ".csv"), bufferSize); trainFiles[i].mark(bufferSize); trainFiles[i].readLine(); trainFiles[i].readLine(); accuracies[i] = Double.valueOf(trainFiles[i].readLine()); } for (int i = 0; i < accuracies.length; i++ ) { if ( accuracies[i] > accuracies[indexOfLargest] ) { indexOfLargest = i; } } ArrayList<Integer> duplicates = new ArrayList<>(); for (int i = 0; i < accuracies.length; i++) { if(accuracies[indexOfLargest] == accuracies[i] && indexOfLargest != i){ duplicates.add(i); } } randomNumber = new Random(foldNumber); if(!duplicates.isEmpty()){ indexOfLargest = randomNumber.nextInt(duplicates.size()); } //Write Train file. dir = new File(resultsWritePath + "/" + this.name + "/Predictions/" + relationName); dir.mkdirs(); outTrain = new BufferedWriter(new FileWriter(dir + "/trainFold" + foldNumber + ".csv")); trainFiles[indexOfLargest].reset(); line = new ArrayList<>(Arrays.asList(trainFiles[indexOfLargest].readLine().split(","))); line.set(1, name); outTrain.write(line.toString().replace("[", "").replace("]", "")); outTrain.newLine(); line = new ArrayList<>(Arrays.asList(trainFiles[indexOfLargest].readLine().split(","))); line.add("originalClassifier"); line.add(classifiers[indexOfLargest]); outTrain.write(line.toString().replace("[", "").replace("]", "")); outTrain.newLine(); while((line = new ArrayList<>(Arrays.asList(new String[] { trainFiles[indexOfLargest].readLine() }))).get(0) != null){ outTrain.write(line.get(0)); outTrain.newLine(); } //Write Test file. outTest = new BufferedWriter(new FileWriter(dir + "/testFold" + foldNumber + ".csv")); int pathIndex = resultsPaths.length == 1 ? 0 : indexOfLargest; testFile = new BufferedReader(new FileReader(resultsPaths[pathIndex] + "/"+ classifiers[indexOfLargest] + "/Predictions/" + relationName + "/testFold" + foldNumber + ".csv"), bufferSize); line = new ArrayList<>(Arrays.asList(testFile.readLine().split(","))); line.set(1, name); outTest.write(line.toString().replace("[", "").replace("]", "")); outTest.newLine(); line = new ArrayList<>(Arrays.asList(testFile.readLine().split(","))); line.add("originalClassifier"); line.add(classifiers[indexOfLargest]); outTest.write(line.toString().replace("[", "").replace("]", "")); outTest.newLine(); while((line = new ArrayList<>(Arrays.asList(new String[] { testFile.readLine() }))).get(0) != null){ outTest.write(line.get(0)); outTest.newLine(); } for (int i = 0; i < classifiers.length; i++) { trainFiles[i].close(); testFile.close(); } outTrain.flush(); outTrain.close(); outTest.flush(); outTest.close(); }catch(FileNotFoundException | NumberFormatException e){ System.out.println("Fold " + foldNumber + " not present: "+ e); } }else{ System.out.println(dir.getAbsolutePath() + ": Already exists."); } } @Override public double classifyInstance(Instance instance) throws Exception { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } @Override public double[] distributionForInstance(Instance instance) throws Exception { classifyInstance(instance); return null; } @Override public Capabilities getCapabilities() { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } }
9,125
38.336207
203
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ChooseDatasetFromFile.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.Random; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; /** * Hacky edit of ChooseClassifierFromFile. For the filtering application, I made * many filtered *datasets*, instead of many *classifiers* that preprocess a standard * dataset with a filter. Therefore, to select the best filtering method from the train data, * I essentially want to select the best dataset for a single classifier, instead of * the best classifier for a single dataset. * * @author James Large (james.large@uea.ac.uk) */ public class ChooseDatasetFromFile implements Classifier{ private Random randomNumber; private final int bufferSize = 100000; private int foldNumber = 0; private int indexOfLargest = 0; ArrayList<String> line; private String resultsPath = "Results/"; private String name = "EnsembleResults"; private String classifier; private String finalRelationName; private String[] relationNames; private double accuracies[]; private File dir; private BufferedReader[] trainFiles; private BufferedReader testFile; private BufferedWriter outTrain; private BufferedWriter outTest; public void setFold(int foldNumber){ this.foldNumber = foldNumber; } public void setClassifier(String classifier){ this.classifier = classifier; } public void setResultsPath(String resultsPath){ this.resultsPath = resultsPath; } public void setName(String name){ this.name = name; } public void setRelationNames(String[] names){ this.relationNames = names; } public void setFinalRelationName(String name){ this.finalRelationName = name; } @Override public void buildClassifier(Instances data) throws Exception { dir = new File(resultsPath + "/" + this.name + "/Predictions/" + finalRelationName + "/trainFold" + foldNumber + ".csv"); if(!dir.exists()){ try{ trainFiles = new BufferedReader[relationNames.length]; accuracies = new double[relationNames.length]; for (int i = 0; i < relationNames.length; i++) { trainFiles[i] = new BufferedReader(new FileReader(resultsPath + "/"+ classifier + "/Predictions/" + relationNames[i] + "/trainFold" + foldNumber + ".csv"), bufferSize); trainFiles[i].mark(bufferSize); trainFiles[i].readLine(); trainFiles[i].readLine(); accuracies[i] = Double.valueOf(trainFiles[i].readLine()); } for (int i = 0; i < accuracies.length; i++ ) { if ( accuracies[i] > accuracies[indexOfLargest] ) { indexOfLargest = i; } } ArrayList<Integer> duplicates = new ArrayList<>(); for (int i = 0; i < accuracies.length; i++) { if(accuracies[indexOfLargest] == accuracies[i] && indexOfLargest != i){ duplicates.add(i); } } randomNumber = new Random(foldNumber); if(!duplicates.isEmpty()){ indexOfLargest = randomNumber.nextInt(duplicates.size()); } //Write Train file. dir = new File(resultsPath + "/" + this.name + "/Predictions/" + finalRelationName); dir.mkdirs(); outTrain = new BufferedWriter(new FileWriter(dir + "/trainFold" + foldNumber + ".csv")); trainFiles[indexOfLargest].reset(); line = new ArrayList<>(Arrays.asList(trainFiles[indexOfLargest].readLine().split(","))); line.set(1, name); outTrain.write(line.toString().replace("[", "").replace("]", "")); outTrain.newLine(); line = new ArrayList<>(Arrays.asList(trainFiles[indexOfLargest].readLine().split(","))); line.add("originalDataset"); line.add(relationNames[indexOfLargest]); outTrain.write(line.toString().replace("[", "").replace("]", "")); outTrain.newLine(); while((line = new ArrayList<>(Arrays.asList(new String[] { trainFiles[indexOfLargest].readLine() }))).get(0) != null){ outTrain.write(line.get(0)); outTrain.newLine(); } //Write Test file. outTest = new BufferedWriter(new FileWriter(dir + "/testFold" + foldNumber + ".csv")); testFile = new BufferedReader(new FileReader(resultsPath + "/"+ classifier + "/Predictions/" + relationNames[indexOfLargest] + "/testFold" + foldNumber + ".csv"), bufferSize); line = new ArrayList<>(Arrays.asList(testFile.readLine().split(","))); line.set(1, name); outTest.write(line.toString().replace("[", "").replace("]", "")); outTest.newLine(); line = new ArrayList<>(Arrays.asList(testFile.readLine().split(","))); line.add("originalDataset"); line.add(relationNames[indexOfLargest]); outTest.write(line.toString().replace("[", "").replace("]", "")); outTest.newLine(); while((line = new ArrayList<>(Arrays.asList(new String[] { testFile.readLine() }))).get(0) != null){ outTest.write(line.get(0)); outTest.newLine(); } for (int i = 0; i < relationNames.length; i++) { trainFiles[i].close(); testFile.close(); } outTrain.flush(); outTrain.close(); outTest.flush(); outTest.close(); }catch(FileNotFoundException | NumberFormatException e){ System.out.println("Fold " + foldNumber + " not present: "+ e); } }else{ System.out.println(dir.getAbsolutePath() + ": Already exists."); } } @Override public double classifyInstance(Instance instance) throws Exception { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } @Override public double[] distributionForInstance(Instance instance) throws Exception { classifyInstance(instance); return null; } @Override public Capabilities getCapabilities() { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } }
7,815
38.675127
191
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ContinuousIntervalTree.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Randomizable; import java.io.Serializable; import java.util.ArrayList; import java.util.Random; import java.util.concurrent.TimeUnit; import java.util.function.Function; import static utilities.ArrayUtilities.normalise; import static utilities.ArrayUtilities.sum; /** * A tree for time series interval forests. * Based on the time series tree (TST) implementation from the time series forest (TSF) paper. * * @author Matthew Middlehurst **/ public class ContinuousIntervalTree extends AbstractClassifier implements Randomizable, Serializable { private static double log2 = Math.log(2); //Margin gain from TSF paper private boolean useMargin = true; //Number of thresholds to try for attribute splits private int k = 20; //Max tree depth private int maxDepth = Integer.MAX_VALUE; private int seed = 0; private Random rand; private TreeNode root; private int numAttributes; protected static final long serialVersionUID = 2L; public ContinuousIntervalTree() { } @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.setMinimumNumberInstances(2); result.enable(Capabilities.Capability.MISSING_VALUES); // attributes result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capabilities.Capability.NOMINAL_CLASS); return result; } @Override public void setSeed(int seed) { this.seed = seed; } public void setUseMargin(boolean b) { this.useMargin = b; } public void setK(int i) { this.k = i; } public void setMaxDepth(int i) { this.maxDepth = i; } @Override public int getSeed() { return seed; } @Override public void buildClassifier(Instances data) throws Exception { numAttributes = data.numAttributes() - 1; if (data.classIndex() != numAttributes) throw new Exception("Class attribute must be the last index."); rand = new Random(seed); //thresholds for each attribute double[][] thresholds = findThresholds(data); //Initial tree node setup double[] dist = new double[data.numClasses()]; for (Instance inst : data) { dist[(int) inst.classValue()]++; } double rootEntropy = 0; for (int i = 0; i < data.numClasses(); i++) { double p = dist[i] / data.numInstances(); rootEntropy += p > 0 ? -(p * Math.log(p) / log2) : 0; } root = new TreeNode(); root.buildTree(data, thresholds, rootEntropy, dist, -1, false); } @Override public double classifyInstance(Instance instance) throws Exception { double[] probs = distributionForInstance(instance); return tieBreak(probs); } public double classifyInstance(double[][] instance, Function<Interval, Double>[] functions, int[][] intervals, int[] attributes, int[] dimensions) throws Exception { double[] probs = distributionForInstance(instance, functions, intervals, attributes, dimensions); return tieBreak(probs); } public double classifyInstance(double[][][] instance, Function<Interval, Double>[] functions, int[][][] intervals, int[] attributes, int[][] dimensions) throws Exception { double[] probs = distributionForInstance(instance, functions, intervals, attributes, dimensions); return tieBreak(probs); } public double classifyInstance(double[][] instance, Function<Interval, Double>[] functions, int[][] intervals, int[] attributes, int[] dimensions, ArrayList<double[]> info) throws Exception { double[] probs = distributionForInstance(instance, functions, intervals, attributes, dimensions, info); return tieBreak(probs); } public double classifyInstance(double[][][] instance, Function<Interval, Double>[] functions, int[][][] intervals, int[] attributes, int[][] dimensions, ArrayList<double[]> info) throws Exception { double[] probs = distributionForInstance(instance, functions, intervals, attributes, dimensions, info); return tieBreak(probs); } private int tieBreak(double[] probs) { int maxClass = 0; for (int n = 1; n < probs.length; n++) { if (probs[n] > probs[maxClass] || (probs[n] == probs[maxClass] && rand.nextBoolean())) { maxClass = n; } } return maxClass; } @Override public double[] distributionForInstance(Instance instance) throws Exception { return root.distributionForInstance(instance); } //For interval forests, transforms the time series at the node level to save time on predictions (CIF) public double[] distributionForInstance(double[][] instance, Function<Interval, Double>[] functions, int[][] intervals, int[] attributes, int[] dimensions) throws Exception { return root.distributionForInstance(instance, functions, intervals, attributes, dimensions); } //For interval forests with multiple representations, transforms the time series at the node level to save time on //predictions (DrCIF) public double[] distributionForInstance(double[][][] instance, Function<Interval, Double>[] functions, int[][][] intervals, int[] attributes, int[][] dimensions) throws Exception { return root.distributionForInstance(instance, functions, intervals, attributes, dimensions); } //Fills the info List with the attribute, threshold and the next node for each node traversed public double[] distributionForInstance(double[][] instance, Function<Interval, Double>[] functions, int[][] intervals, int[] attributes, int[] dimensions, ArrayList<double[]> info) throws Exception { return root.distributionForInstance(instance, functions, intervals, attributes, dimensions, info); } //Same as above but for multiple representations public double[] distributionForInstance(double[][][] instance, Function<Interval, Double>[] functions, int[][][] intervals, int[] attributes, int[][] dimensions, ArrayList<double[]> info) throws Exception { return root.distributionForInstance(instance, functions, intervals, attributes, dimensions, info); } private double[][] findThresholds(Instances data) { double[][] thresholds = new double[numAttributes][k]; for (int i = 0; i < numAttributes; i++) { double min = Double.MAX_VALUE; double max = -99999999; for (Instance inst : data) { double v = inst.value(i); if (v < min) { min = v; } if (v > max) { max = v; } } double step = (max - min) / (k - 1); for (int n = 0; n < k; n++) { thresholds[i][n] = min + step * n; } } return thresholds; } //Returns the attribute used for each node and its information gain public ArrayList<Double>[] getTreeSplitsGain() { ArrayList<Double> splits = new ArrayList<>(); ArrayList<Double> gain = new ArrayList<>(); if (root.bestSplit > -1) findSplitsGain(root, splits, gain); ArrayList<Double>[] r = new ArrayList[2]; r[0] = splits; r[1] = gain; return r; } private void findSplitsGain(TreeNode node, ArrayList<Double> splits, ArrayList<Double> gain) { splits.add((double) node.bestSplit); gain.add(node.bestGain); for (int i = 0; i < node.children.length; i++) { if (node.children[i].bestSplit > -1) { findSplitsGain(node.children[i], splits, gain); } } } //Returns true for attributes which are used in tree nodes, false otherwise public boolean[] getAttributesUsed() { boolean[] attsUsed = new boolean[numAttributes]; if (root.bestSplit > -1) findAttributesUsed(root, attsUsed); return attsUsed; } private void findAttributesUsed(TreeNode node, boolean[] attsUsed) { if (!attsUsed[node.bestSplit]) { attsUsed[node.bestSplit] = true; } for (int i = 0; i < node.children.length; i++) { if (node.children[i].bestSplit > -1) { findAttributesUsed(node.children[i], attsUsed); } } } private class TreeNode implements Serializable { int bestSplit = -1; double bestThreshold = 0; double bestGain = 0; double bestMargin = -1; TreeNode[] children; double[] leafDistribution; int depth; protected static final long serialVersionUID = 1L; TreeNode() { } void buildTree(Instances data, double[][] thresholds, double entropy, double[] distribution, int lastDepth, boolean leaf) { double[][] bestEntropies = new double[0][0]; depth = lastDepth + 1; int remainingClasses = 0; for (double d : distribution) { if (d > 0) remainingClasses++; } if (!leaf && remainingClasses > 1 && depth < maxDepth) { //Loop through all attributes each using k threshold values looking the best split for this node for (int i = 0; i < numAttributes; i++) { for (int n = 0; n < k; n++) { //gain stored in [0][0] double[][] entropies = entropyGain(data, i, thresholds[i][n], entropy); if (entropies[0][0] > bestGain || (!useMargin && entropies[0][0] == bestGain && entropies[0][0] > 0 && rand.nextBoolean())) { bestSplit = i; bestThreshold = thresholds[i][n]; bestGain = entropies[0][0]; bestMargin = -1; bestEntropies = entropies; } //Use margin gain if there is a tie else if (useMargin && entropies[0][0] == bestGain && entropies[0][0] > 0) { double margin = findMargin(data, i, thresholds[i][n]); if (bestMargin == -1) bestMargin = findMargin(data, bestSplit, bestThreshold); //Select randomly if there is a tie again if (margin > bestMargin || (margin == bestMargin && rand.nextBoolean())) { bestSplit = i; bestThreshold = thresholds[i][n]; bestMargin = margin; bestEntropies = entropies; } } } } } if (bestSplit > -1) { Instances[] split = splitData(data); children = new TreeNode[3]; //Left node children[0] = new TreeNode(); if (split[0].isEmpty()) { children[0].buildTree(split[0], thresholds, entropy, distribution, depth, true); } else { children[0].buildTree(split[0], thresholds, bestEntropies[0][1], bestEntropies[1], depth, false); } //Right node children[1] = new TreeNode(); if (split[1].isEmpty()) { children[1].buildTree(split[1], thresholds, entropy, distribution, depth, true); } else { children[1].buildTree(split[1], thresholds, bestEntropies[0][2], bestEntropies[2], depth, false); } //Missing value node children[2] = new TreeNode(); if (split[2].isEmpty()) { children[2].buildTree(split[2], thresholds, entropy, distribution, depth, true); } else { children[2].buildTree(split[2], thresholds, bestEntropies[0][3], bestEntropies[3], depth, false); } } else { leafDistribution = normalise(distribution); } } //Distribution, entropy for each split and information gain double[][] entropyGain(Instances data, int att, double threshold, double parentEntropy) { double[][] dists = new double[4][data.numClasses()]; for (Instance inst : data) { if (Double.isNaN(inst.value(att))) { dists[3][(int) inst.classValue()]++; } else if (inst.value(att) <= threshold) { dists[1][(int) inst.classValue()]++; } else { dists[2][(int) inst.classValue()]++; } } double sumLeft = sum(dists[1]); double sumRight = sum(dists[2]); double sumMissing = sum(dists[3]); double[] entropies = new double[4]; for (int i = 0; i < data.numClasses(); i++) { double p1 = sumLeft > 0 ? dists[1][i] / sumLeft : 0; entropies[1] += p1 > 0 ? -(p1 * Math.log(p1) / log2) : 0; double p2 = sumRight > 0 ? dists[2][i] / sumRight : 0; entropies[2] += p2 > 0 ? -(p2 * Math.log(p2) / log2) : 0; double p3 = sumMissing > 0 ? dists[3][i] / sumMissing : 0; entropies[3] += p3 > 0 ? -(p3 * Math.log(p3) / log2) : 0; } entropies[0] = parentEntropy - sumLeft / data.numInstances() * entropies[1] - sumRight / data.numInstances() * entropies[2] - sumMissing / data.numInstances() * entropies[3]; dists[0] = entropies; return dists; } //Margin gain for tie breaks double findMargin(Instances data, int att, double threshold) { double min = Double.MAX_VALUE; for (Instance inst : data) { double n = Math.abs(inst.value(att) - threshold); if (n < min) { min = n; } } return min; } Instances[] splitData(Instances data) { Instances[] split = new Instances[3]; split[0] = new Instances(data, data.numInstances()); split[1] = new Instances(data, data.numInstances()); split[2] = new Instances(data, data.numInstances()); for (Instance inst : data) { if (Double.isNaN(inst.value(bestSplit))) { split[2].add(inst); } else if (inst.value(bestSplit) <= bestThreshold) { split[0].add(inst); } else { split[1].add(inst); } } return split; } double[] distributionForInstance(Instance inst) { if (bestSplit > -1) { if (Double.isNaN(inst.value(bestSplit))) { return children[2].distributionForInstance(inst); } else if (inst.value(bestSplit) <= bestThreshold) { return children[0].distributionForInstance(inst); } else { return children[1].distributionForInstance(inst); } } else { return leafDistribution; } } double[] distributionForInstance(double[][] inst, Function<Interval, Double>[] functions, int[][] intervals, int[] attributes, int[] dimensions) { if (bestSplit > -1) { int interval = bestSplit / attributes.length; int att = bestSplit % attributes.length; int dim = dimensions[interval]; double val = functions[attributes[att]].apply(new Interval(inst[dim], intervals[interval][0], intervals[interval][1])); if (Double.isNaN(val)) { return children[2].distributionForInstance(inst, functions, intervals, attributes, dimensions); } else if (val <= bestThreshold) { return children[0].distributionForInstance(inst, functions, intervals, attributes, dimensions); } else { return children[1].distributionForInstance(inst, functions, intervals, attributes, dimensions); } } else { return leafDistribution; } } double[] distributionForInstance(double[][][] inst, Function<Interval, Double>[] functions, int[][][] intervals, int[] attributes, int[][] dimensions) { if (bestSplit > -1) { int repSum = 0; int rep = -1; for (int i = 0; i < intervals.length; i++) { if (bestSplit < repSum + attributes.length * intervals[i].length) { rep = i; break; } repSum += attributes.length * intervals[i].length; } int att = bestSplit % attributes.length; int interval = (bestSplit - repSum) / attributes.length; int dim = dimensions[rep][interval]; double val = functions[attributes[att]].apply(new Interval(inst[rep][dim], intervals[rep][interval][0], intervals[rep][interval][1])); if (Double.isNaN(val)) { return children[2].distributionForInstance(inst, functions, intervals, attributes, dimensions); } else if (val <= bestThreshold) { return children[0].distributionForInstance(inst, functions, intervals, attributes, dimensions); } else { return children[1].distributionForInstance(inst, functions, intervals, attributes, dimensions); } } else { return leafDistribution; } } double[] distributionForInstance(double[][] inst, Function<Interval, Double>[] functions, int[][] intervals, int[] attributes, int[] dimensions, ArrayList<double[]> info) { if (bestSplit > -1) { int interval = bestSplit / attributes.length; int att = bestSplit % attributes.length; int dim = dimensions[interval]; double val = functions[attributes[att]].apply(new Interval(inst[dim], intervals[interval][0], intervals[interval][1])); if (Double.isNaN(val)) { info.add(new double[]{bestSplit, bestThreshold, 2}); return children[2].distributionForInstance(inst, functions, intervals, attributes, dimensions, info); } else if (val <= bestThreshold) { info.add(new double[]{bestSplit, bestThreshold, 0}); return children[0].distributionForInstance(inst, functions, intervals, attributes, dimensions, info); } else { info.add(new double[]{bestSplit, bestThreshold, 1}); return children[1].distributionForInstance(inst, functions, intervals, attributes, dimensions, info); } } else { info.add(leafDistribution); return leafDistribution; } } double[] distributionForInstance(double[][][] inst, Function<Interval, Double>[] functions, int[][][] intervals, int[] attributes, int[][] dimensions, ArrayList<double[]> info) { if (bestSplit > -1) { int repSum = 0; int rep = -1; for (int i = 0; i < intervals.length; i++) { if (bestSplit < repSum + attributes.length * intervals[i].length) { rep = i; break; } repSum += attributes.length * intervals[i].length; } int att = bestSplit % attributes.length; int interval = (bestSplit - repSum) / attributes.length; int dim = dimensions[rep][interval]; double val = functions[attributes[att]].apply(new Interval(inst[rep][dim], intervals[rep][interval][0], intervals[rep][interval][1])); if (Double.isNaN(val)) { info.add(new double[]{bestSplit, bestThreshold, 2}); return children[2].distributionForInstance(inst, functions, intervals, attributes, dimensions, info); } else if (val <= bestThreshold) { info.add(new double[]{bestSplit, bestThreshold, 0}); return children[0].distributionForInstance(inst, functions, intervals, attributes, dimensions, info); } else { info.add(new double[]{bestSplit, bestThreshold, 1}); return children[1].distributionForInstance(inst, functions, intervals, attributes, dimensions, info); } } else { info.add(leafDistribution); return leafDistribution; } } @Override public String toString() { return "[" + bestSplit + "," + depth + "]"; } } public static class Interval { public double[] series; public int start; public int end; public Interval(double[] series, int start, int end) { this.series = series; this.start = start; this.end = end; } } }
23,856
39.851027
119
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/LibLinearClassifier.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import de.bwaldvogel.liblinear.*; import weka.classifiers.AbstractClassifier; import weka.core.Instance; import weka.core.Instances; import weka.core.Randomizable; import java.util.Random; /** * liblnear wrapper for WEKA * * @author Matthew Middlehurst */ public class LibLinearClassifier extends AbstractClassifier implements Randomizable { private boolean normalise = true; private double[] means, stdevs; private boolean tuneC = true; private int nr_fold = 5; private double bias = 0; private SolverType solverType = SolverType.L2R_L2LOSS_SVC; private int iterations = 1000; private double e = 0.01; private double p = 0.1; private double c = 1; private Model linearModel; private int seed; public void setNormalise(boolean normalise) { this.normalise = normalise; } public void setTuneC(boolean tuneC) { this.tuneC = tuneC; } public void setBias(double bias) { this.bias = bias; } public void setSolverType(SolverType solverType) { this.solverType = solverType; } public void setIterations(int iterations) { this.iterations = iterations; } public void setE(double e) { this.e = e; } public void setP(double p) { this.p = p; } public void setC(double c) { this.c = c; } @Override public void setSeed(int seed) { this.seed = seed; } @Override public int getSeed() { return seed; } @Override public void buildClassifier(Instances data) throws Exception { if (normalise) { means = new double[data.numAttributes() - 1]; stdevs = new double[data.numAttributes() - 1]; for (int i = 0; i < data.numAttributes() - 1; i++) { for (int n = 0; n < data.numInstances(); n++) { means[i] += data.get(n).value(i); } means[i] /= data.numInstances(); for (int n = 0; n < data.numInstances(); n++) { double temp = data.get(n).value(i) - means[i]; stdevs[i] += temp * temp; } stdevs[i] = Math.sqrt(stdevs[i] / (data.numInstances() - 1)); if (stdevs[i] == 0) stdevs[i] = 1; } } FeatureNode[][] features = new FeatureNode[data.numInstances()][]; double[] labels = new double[features.length]; for (int i = 0; i < features.length; i++) { Instance inst = data.get(i); features[i] = toFeatureNodes(inst); labels[i] = inst.classValue(); } Problem problem = new Problem(); problem.bias = bias; problem.y = labels; problem.n = data.numAttributes() - 1; problem.l = features.length; problem.x = features; Parameter par = new Parameter(solverType, c, e, iterations, p); Linear.resetRandom(); Linear.disableDebugOutput(); if (tuneC) { final int l = problem.l; if (nr_fold > l) { nr_fold = l; } final int[] perm = new int[l]; final int[] fold_start = new int[nr_fold + 1]; Random rand = new Random(seed); int k; for (k = 0; k < l; k++) { perm[k] = k; } for (k = 0; k < l; k++) { int j = k + rand.nextInt(l - k); int temp = perm[k]; perm[k] = perm[j]; perm[j] = temp; } for (k = 0; k <= nr_fold; k++) { fold_start[k] = k * l / nr_fold; } double[] cVals = new double[]{0.001, 0.01, 0.1, 1, 10, 100}; int mostCorrect = Integer.MIN_VALUE; for (double cVal : cVals) { Parameter subpar = new Parameter(solverType, cVal, e, iterations, p); int correct = 0; for (int i = 0; i < nr_fold; i++) { int begin = fold_start[i]; int end = fold_start[i + 1]; int j, kk; Problem subprob = new Problem(); subprob.bias = problem.bias; subprob.n = problem.n; subprob.l = l - (end - begin); subprob.x = new Feature[subprob.l][]; subprob.y = new double[subprob.l]; kk = 0; for (j = 0; j < begin; j++) { subprob.x[kk] = problem.x[perm[j]]; subprob.y[kk] = problem.y[perm[j]]; ++kk; } for (j = end; j < l; j++) { subprob.x[kk] = problem.x[perm[j]]; subprob.y[kk] = problem.y[perm[j]]; ++kk; } Model submodel = Linear.train(subprob, subpar); for (j = begin; j < end; j++) { if (problem.y[perm[j]] == Linear.predict(submodel, problem.x[perm[j]])) correct++; } } if (correct > mostCorrect) { mostCorrect = correct; par = subpar; } } } linearModel = Linear.train(problem, par); } @Override public double classifyInstance(Instance inst) throws Exception { FeatureNode[] features = toFeatureNodes(inst); return Linear.predict(linearModel, features); } public double[] distributionForInstance(Instance inst) throws Exception { double[] probs; if (solverType.isLogisticRegressionSolver()) { FeatureNode[] features = toFeatureNodes(inst); probs = new double[inst.dataset().numClasses()]; Linear.predictProbability(linearModel, features, probs); } else { probs = super.distributionForInstance(inst); } return probs; } private FeatureNode[] toFeatureNodes(Instance inst) { FeatureNode[] features = new FeatureNode[inst.numAttributes() - 1]; if (normalise) { for (int n = 0; n < features.length; n++) { features[n] = new FeatureNode(n + 1, (inst.value(n) - means[n]) / stdevs[n]); } } else { for (int n = 0; n < features.length; n++) { features[n] = new FeatureNode(n + 1, inst.value(n)); } } return features; } }
7,429
29.958333
106
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/MultiLinearRegression.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import experiments.data.DatasetLoading; import utilities.InstanceTools; import weka.classifiers.AbstractClassifier; import weka.classifiers.functions.LinearRegression; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; /** * Creates [numClasses] 1vsAll linear regression models. Prediction is max of outputs * * @author James Large (james.large@uea.ac.uk) */ public class MultiLinearRegression extends AbstractClassifier { Instances numericClassInsts = null; LinearRegression[] regressors = null; @Override public void buildClassifier(Instances data) throws Exception { //creating the 2class version of the insts numericClassInsts = new Instances(data); numericClassInsts.setClassIndex(0); //temporary numericClassInsts.deleteAttributeAt(numericClassInsts.numAttributes()-1); Attribute newClassAtt = new Attribute("newClassVal"); //numeric class numericClassInsts.insertAttributeAt(newClassAtt, numericClassInsts.numAttributes()); numericClassInsts.setClassIndex(numericClassInsts.numAttributes()-1); //temporary //and building the regressors regressors = new LinearRegression[data.numClasses()]; double[] trueClassVals = data.attributeToDoubleArray(data.classIndex()); for (int c = 0; c < data.numClasses(); c++) { for (int i = 0; i < numericClassInsts.numInstances(); i++) { //if this inst is of the class we're currently handling (c), set new class val to 1 else 0 double cval = trueClassVals[i] == c ? 1 : 0; numericClassInsts.instance(i).setClassValue(cval); } regressors[c] = new LinearRegression(); regressors[c].buildClassifier(numericClassInsts); } } @Override public double[] distributionForInstance(Instance inst) throws Exception { Instances newinst = new Instances(numericClassInsts, 0); newinst.add(new DenseInstance(1.0, inst.toDoubleArray())); newinst.instance(0).setClassMissing(); double[] outputs = new double[regressors.length]; for (int i = 0; i < outputs.length; i++) { outputs[i] = regressors[i].classifyInstance(newinst.instance(0)); //class 1 is the class being discriminated FOR } double max = utilities.GenericTools.max(outputs); double min = utilities.GenericTools.min(outputs); double sum = .0; //get in range 0 to 1 (may be some negatives) if (max == min) for (int i = 0; i < outputs.length; i++) outputs[i] = 1.0 / outputs.length;//numclasses else { //get in range 0 to 1, since there may be some negative vals //regressing between 0 and 1, something may predict e.g -0.2 for (int i = 0; i < outputs.length; i++) { outputs[i] = (outputs[i] - min) / (max - min); sum += outputs[i]; } //and then make them sum to 1 for (int i = 0; i < outputs.length; i++) outputs[i] /= sum; } return outputs; } @Override public double classifyInstance(Instance inst) throws Exception { double[] dist = distributionForInstance(inst); return utilities.GenericTools.indexOfMax(dist); } public static void main(String[] args) throws Exception { // String dsetGroup = "UCI"; // // String basePath = "C:/JamesLPHD/HESCA/"+dsetGroup+"/"; //// String[] datasets = (new File(basePath + "Results/DTWCV/Predictions/")).list(); // // new MultipleEstimatorEvaluation(basePath+"XGBoostAnalysis/", dsetGroup+"_testy", 10). // setTestResultsOnly(true). //// setBuildMatlabDiagrams(true). // setDatasets(dsetGroup.equals("UCI") ? development.experiments.DataSets.UCIContinuousFileNames : development.experiments.DataSets.fileNames). //// setDatasets(basePath + dsetGroup + "2.txt"). // readInClassifiers(new String[] { "MLR", "1NN", "C4.5", }, basePath+dsetGroup+"Results/"). //// readInClassifiers(new String[] { "XGBoost", "XGBoost500Iterations", "RotF", "RandF" }, basePath+dsetGroup+"Results/"). // runComparison(); // Instances train = ClassifierTools.loadDataThrowable("Z:/Data/TSCProblems/ItalyPowerDemand/ItalyPowerDemand_TRAIN.arff"); // Instances test = ClassifierTools.loadDataThrowable("Z:/Data/TSCProblems/ItalyPowerDemand/ItalyPowerDemand_TEST.arff"); Instances all = DatasetLoading.loadDataNullable("Z:/Data/UCIContinuous/molec-biol-promoter/molec-biol-promoter.arff"); int folds = 10; double acc = 0; for (int i = 0; i < folds; i++) { // Instances[] data = InstanceTools.resampleTrainAndTestInstances(train, test, i); Instances[] data = InstanceTools.resampleInstances(all, i, 0.5); MultiLinearRegression mlr = new MultiLinearRegression(); mlr.buildClassifier(data[0]); double a = .0; for (int j = 0; j < data[1].numInstances(); j++) { double pred = mlr.classifyInstance(data[1].instance(j)); double p = data[1].instance(i).classValue(); if (pred == p) a++; } // System.out.println(a); System.out.println((a/data[1].numInstances())); acc+=(a/data[1].numInstances()); } System.out.println("acc="+(acc/folds)); } }
6,543
41.493506
154
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/MultiResponseModelTrees.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import experiments.data.DatasetLoading; import utilities.InstanceTools; import weka.classifiers.AbstractClassifier; import weka.classifiers.trees.M5P; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; /** * Creates [numClasses] 1vsAll model trees (M5). Prediction is max of outputs * * @author James Large (james.large@uea.ac.uk) */ public class MultiResponseModelTrees extends AbstractClassifier { Instances numericClassInsts = null; M5P[] regressors = null; @Override public void buildClassifier(Instances data) throws Exception { //creating the 2class version of the insts numericClassInsts = new Instances(data); numericClassInsts.setClassIndex(0); //temporary numericClassInsts.deleteAttributeAt(numericClassInsts.numAttributes()-1); Attribute newClassAtt = new Attribute("newClassVal"); //numeric class numericClassInsts.insertAttributeAt(newClassAtt, numericClassInsts.numAttributes()); numericClassInsts.setClassIndex(numericClassInsts.numAttributes()-1); //temporary //and building the regressors regressors = new M5P[data.numClasses()]; double[] trueClassVals = data.attributeToDoubleArray(data.classIndex()); for (int c = 0; c < data.numClasses(); c++) { for (int i = 0; i < numericClassInsts.numInstances(); i++) { //if this inst is of the class we're currently handling (c), set new class val to 1 else 0 double cval = trueClassVals[i] == c ? 1 : 0; numericClassInsts.instance(i).setClassValue(cval); } regressors[c] = new M5P(); regressors[c].buildClassifier(numericClassInsts); } } @Override public double[] distributionForInstance(Instance inst) throws Exception { Instances newinst = new Instances(numericClassInsts, 0); newinst.add(new DenseInstance(1.0, inst.toDoubleArray())); newinst.instance(0).setClassMissing(); double[] outputs = new double[regressors.length]; for (int i = 0; i < outputs.length; i++) { outputs[i] = regressors[i].classifyInstance(newinst.instance(0)); //class 1 is the class being discriminated FOR } double max = utilities.GenericTools.max(outputs); double min = utilities.GenericTools.min(outputs); double sum = .0; //get in range 0 to 1 (may be some negatives) if (max == min) for (int i = 0; i < outputs.length; i++) outputs[i] = 1.0 / outputs.length;//numclasses else { //get in range 0 to 1, since there may be some negative vals //regressing between 0 and 1, something may predict e.g -0.2 for (int i = 0; i < outputs.length; i++) { outputs[i] = (outputs[i] - min) / (max - min); sum += outputs[i]; } //and then make them sum to 1 for (int i = 0; i < outputs.length; i++) outputs[i] /= sum; } return outputs; } @Override public double classifyInstance(Instance inst) throws Exception { double[] dist = distributionForInstance(inst); return utilities.GenericTools.indexOfMax(dist); } public static void main(String[] args) throws Exception { // Instances train = ClassifierTools.loadDataThrowable("Z:/Data/TSCProblems/ItalyPowerDemand/ItalyPowerDemand_TRAIN.arff"); // Instances test = ClassifierTools.loadDataThrowable("Z:/Data/TSCProblems/ItalyPowerDemand/ItalyPowerDemand_TEST.arff"); Instances all = DatasetLoading.loadDataNullable("Z:/Data/UCIContinuous/hayes-roth/hayes-roth.arff"); int folds = 10; double acc = 0; for (int i = 0; i < folds; i++) { // Instances[] data = InstanceTools.resampleTrainAndTestInstances(train, test, i); Instances[] data = InstanceTools.resampleInstances(all, i, 0.5); MultiLinearRegression mlr = new MultiLinearRegression(); mlr.buildClassifier(data[0]); double a = .0; for (int j = 0; j < data[1].numInstances(); j++) { double pred = mlr.classifyInstance(data[1].instance(j)); double p = data[1].instance(i).classValue(); if (pred == p) a++; } // System.out.println(a); System.out.println((a/data[1].numInstances())); acc+=(a/data[1].numInstances()); } System.out.println("acc="+(acc/folds)); } }
5,580
39.442029
130
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/PLSNominalClassifier.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import weka.classifiers.functions.PLSClassifier; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; /** * * Built for my (James') alcohol datasets, to allow comparative testing between TSC approaches * and the de-facto chemometrics approach, Partial Least Squares regression * * Extends the weka PLSClassifier, and essentially just converts the nominal class valued * dataset passed (initial intention being the ifr non-invasive whiskey datasets) * and does the standard regression, before converting the output back into a discrete class value * * This version ignores the true values of the classes, instead just representing as class 0,1...c * If c>2, classes should be ordered (ascending or descending, doesn't matter) by whatever logical * ordering makes sense for the dataset. * * @author James Large (james.large@uea.ac.uk) */ public class PLSNominalClassifier extends PLSClassifier { protected Attribute classAttribute; protected int classind; protected int numClasses; public PLSNominalClassifier() { super(); } public int getNumComponents() { return this.m_Filter.getNumComponents(); } public void setNumComponents(int value) { this.m_Filter.setNumComponents(value); } @Override public void buildClassifier(Instances data) throws Exception { Instances train = new Instances(data); numClasses = train.numClasses(); classind = train.classIndex(); classAttribute = train.classAttribute(); FastVector<Attribute> atts = new FastVector<>(train.numAttributes()); for (int i = 0; i < train.numAttributes(); i++) { if (i != classind) atts.add(train.attribute(i)); else { //class attribute Attribute numericClassAtt = new Attribute(train.attribute(i).name()); atts.add(numericClassAtt); } } Instances temp = new Instances(train.relationName(), atts, train.numInstances()); temp.setClassIndex(classind); for (int i = 0; i < train.numInstances(); i++) { temp.add(new DenseInstance(1.0, train.instance(i).toDoubleArray())); temp.instance(i).setClassValue(train.instance(i).classValue()); } train = temp; //datset is in the proper format, now do the model fitting as normal super.buildClassifier(train); } protected double convertNominalToNumeric(String strClassVal) { return Double.parseDouble(strClassVal.replaceAll("[A-Za-z ]", "")); } public double regressInstance(Instance instance) throws Exception { return super.classifyInstance(instance); } @Override public double classifyInstance(Instance instance) throws Exception { return utilities.GenericTools.indexOfMax(distributionForInstance(instance)); } public double[] distributionForInstance(Instance instance) throws Exception { double regpred = regressInstance(instance); double[] dist = new double[numClasses]; if (regpred <= 0) dist[0] = 1.0; else if (regpred >= numClasses-1) dist[numClasses-1] = 1.0; else { for (int i = 1; i < numClasses; i++) { if (regpred < i) { double t = regpred % 1; dist[i] = t; dist[i-1] = 1-t; break; } } } return dist; } }
4,581
33.712121
98
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/RidgeClassifierCV.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import experiments.data.DatasetLoading; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.eigen.Eigen; import org.nd4j.linalg.factory.Nd4j; import tsml.classifiers.MultiThreadable; import tsml.transformers.ROCKET; import utilities.ClassifierTools; import weka.classifiers.AbstractClassifier; import weka.core.Instance; import weka.core.Instances; import java.util.Arrays; import static utilities.InstanceTools.resampleTrainAndTestInstances; /** * Ridge classification with cross-validation to select the alpha value. * <p> * Based on RidgeClassifierCV from sklearn. * https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeClassifierCV.html * * @author Matthew Middlehurst */ public class RidgeClassifierCV extends AbstractClassifier implements MultiThreadable { //alphas used in sktime ROCKET private final double[] alphas = {1.00000000e-03, 4.64158883e-03, 2.15443469e-02, 1.00000000e-01, 4.64158883e-01, 2.15443469e+00, 1.00000000e+01, 4.64158883e+01, 2.15443469e+02, 1.00000000e+03}; private INDArray coefficients; private double[] intercept; private int numThreads = 1; private static boolean printedNumThreadsError = false; private double bestScore = -999999; public double getBestScore() { return bestScore; } @Override public void enableMultiThreading(int numThreads) { this.numThreads = numThreads; } @Override public void buildClassifier(Instances instances) throws Exception { if (instances.classIndex() != instances.numAttributes() - 1) throw new Exception("Class attribute must be the final index."); //Set to OMP_NUM_THREADS=1 for single thread run //Check if OMP_NUM_THREADS matches numThreads if (!printedNumThreadsError) { String value = System.getenv("OMP_NUM_THREADS"); if (value == null && numThreads != Runtime.getRuntime().availableProcessors()) System.err.println("RidgeClassifierCV: OMP_NUM_THREADS environmental variable not set. Set it to the " + "number of threads you wish to use or set numThreads to " + "Runtime.getRuntime().availableProcessors(). Must be consistent with numThreads field." + System.lineSeparator() + "Example: OMP_NUM_THREADS=1 java tsml.jar"); if (value != null && Integer.parseInt(value) != numThreads) System.err.println("RidgeClassifierCV: OMP_NUM_THREADS environmental variable and numThreads do not " + "match."); printedNumThreadsError = true; } bestScore = -999999; double[][] data = new double[instances.numInstances()][instances.numAttributes() - 1]; for (int i = 0; i < data.length; i++) { Instance inst = instances.get(i); for (int n = 0; n < data[i].length; n++) { data[i][n] = inst.value(n); } } double[][] labels; if (instances.numClasses() > 2) { labels = new double[data.length][instances.numClasses()]; for (int i = 0; i < data.length; i++) { Instance inst = instances.get(i); for (int n = 0; n < labels[i].length; n++) { if (inst.classValue() == n) { labels[i][n] = 1; } else { labels[i][n] = -1; } } } } else { labels = new double[data.length][1]; for (int i = 0; i < data.length; i++) { if (instances.get(i).classValue() == 1) { labels[i][0] = 1; } else { labels[i][0] = -1; } } } double[] xOffset = new double[data[0].length]; double[] yOffset = new double[labels[0].length]; double[] xScale = new double[data[0].length]; preprocessData(data, labels, xOffset, yOffset, xScale); //original uses SVD when no. instances > no. attributes INDArray matrix = Nd4j.create(data); INDArray q = matrix.mmul(matrix.transpose()); INDArray eigvals = Eigen.symmetricGeneralizedEigenvalues(q); INDArray qt_y = q.transpose(); qt_y = qt_y.mmul(Nd4j.create(labels)); INDArray bestCoef = null; for (double alpha : alphas) { double[] w = new double[(int) eigvals.size(0)]; for (int i = 0; i < w.length; i++) { w[i] = 1. / (eigvals.getDouble(i) + alpha); } double[][] p = new double[1][data.length]; Arrays.fill(p[0], Math.sqrt(data.length) / data.length); INDArray sw = Nd4j.create(p); double[] k = sw.mmul(q).toDoubleVector(); for (int i = 0; i < k.length; i++) k[i] = Math.abs(k[i]); int idx = argmax(k); w[idx] = 0; double[][] d = new double[w.length][(int) qt_y.size(1)]; for (int i = 0; i < d.length; i++) { for (int n = 0; n < d[i].length; n++) { d[i][n] = w[i] * qt_y.getDouble(i, n); } } INDArray coefs = q.mmul(Nd4j.create(d)); double[] sums = new double[w.length]; for (int i = 0; i < w.length; i++) { for (int n = 0; n < q.size(0); n++) { sums[n] += w[i] * Math.pow(q.getDouble(n, i), 2); } } double e = 0; for (int i = 0; i < sums.length; i++) { for (int n = 0; n < coefs.size(1); n++) { e += Math.pow(coefs.getDouble(i, n) / sums[i], 2); } } e /= sums.length * coefs.size(1); e = 1 - e; if (e > bestScore) { bestScore = e; bestCoef = coefs; } } INDArray a = bestCoef.transpose().mmul(matrix); double[][] b = a.size(0) == 1 ? new double[][]{a.toDoubleVector()} : a.toDoubleMatrix(); for (int i = 0; i < b.length; i++) { for (int n = 0; n < b[i].length; n++) { b[i][n] /= xScale[n]; } } double[][] c = new double[][]{xOffset}; coefficients = Nd4j.create(b).transpose(); INDArray d = Nd4j.create(c).mmul(coefficients); intercept = new double[yOffset.length]; for (int i = 0; i < intercept.length; i++) { intercept[i] = yOffset[i] - d.getDouble(i); } } @Override public double classifyInstance(Instance inst) { double[][] data = new double[1][(int) coefficients.size(0)]; for (int i = 0; i < data[0].length; i++) { data[0][i] = inst.value(i); } double[] x = Nd4j.create(data).mmul(coefficients).toDoubleVector(); for (int i = 0; i < intercept.length; i++) { x[i] += intercept[i]; } return x.length > 1 ? argmax(x) : (x[0] > 0 ? 1 : 0); } private void preprocessData(double[][] data, double[][] labels, double[] xOffset, double[] yOffset, double[] xScale) { for (int i = 0; i < data.length; i++) { for (int n = 0; n < data[i].length; n++) { xOffset[n] += data[i][n]; } for (int n = 0; n < labels[i].length; n++) { yOffset[n] += labels[i][n]; } } for (int i = 0; i < xOffset.length; i++) { xOffset[i] /= data.length; } for (int i = 0; i < yOffset.length; i++) { yOffset[i] /= labels.length; } for (int i = 0; i < data.length; i++) { for (int n = 0; n < data[i].length; n++) { data[i][n] -= xOffset[n]; } for (int n = 0; n < labels[i].length; n++) { labels[i][n] -= yOffset[n]; } } for (double[] row : data) { for (int n = 0; n < row.length; n++) { xScale[n] += row[n] * row[n]; } } for (int i = 0; i < xOffset.length; i++) { xScale[i] = Math.sqrt(xScale[i]); if (xScale[i] == 0) xScale[i] = 1; } for (int i = 0; i < data.length; i++) { for (int n = 0; n < data[i].length; n++) { data[i][n] /= xScale[n]; } } } private int argmax(double[] arr) { double max = -999999; int idx = -1; for (int i = 0; i < arr.length; i++) { if (arr[i] > max) { max = arr[i]; idx = i; } } return idx; } public static void main(String[] args) throws Exception { int fold = 0; //Minimum working example Instances[] data = DatasetLoading.sampleItalyPowerDemand(fold); Instances train = data[0]; Instances test = data[1]; RidgeClassifierCV c = new RidgeClassifierCV(); double accuracy; ROCKET r = new ROCKET(); Instances tTrain = r.fitTransform(train); c.buildClassifier(tTrain); accuracy = ClassifierTools.accuracy(r.transform(test), c); System.out.println(accuracy); } }
10,205
34.314879
120
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/SaveEachParameter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; /** * * @author ajb */ public interface SaveEachParameter { void setPathToSaveParameters(String r); default void setSaveEachParaAcc(){setSaveEachParaAcc(true);} void setSaveEachParaAcc(boolean b); }
1,015
34.034483
76
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/kNN.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers; import experiments.data.DatasetLists; import experiments.data.DatasetLoading; import java.text.DecimalFormat; import utilities.ClassifierTools; import weka.classifiers.lazy.AttributeFilterBridge; import weka.classifiers.lazy.IBk; import weka.core.*; import weka.core.neighboursearch.NearestNeighbourSearch; /** * Nearest neighbour classifier that extends the weka one but can take * alternative distance functions. * * @author ajb * @version 1.0 * @since 5/4/09 * <p> * 1. Normalisation: set by method normalise(boolean) * 2. Cross Validation: set by method crossValidate(int folds) * 3. Use weighting: set by the method weightVotes() */ public class kNN extends IBk { protected DistanceFunction dist; double[][] distMatrix; boolean storeDistance; public kNN() { //Defaults to Euclidean distance 1NN without attribute normalisation super(); super.setKNN(1); EuclideanDistance ed = new EuclideanDistance(); ed.setDontNormalize(true); setDistanceFunction(ed); } public kNN(int k) { super(k); EuclideanDistance ed = new EuclideanDistance(); ed.setDontNormalize(true); setDistanceFunction(ed); } public kNN(DistanceFunction df) { super(); setDistanceFunction(df); } public final void setDistanceFunction(DistanceFunction df) { dist = df; NearestNeighbourSearch s = super.getNearestNeighbourSearchAlgorithm(); try { s.setDistanceFunction(df); } catch (Exception e) { System.err.println(" Exception thrown setting distance function =" + e + " in " + this); e.printStackTrace(); } } //Need to implement the early abandon for the search? public double distance(Instance first, Instance second) { return dist.distance(first, second); } //Only use with a Euclidean distance method public void normalise(boolean v) { if (dist instanceof NormalizableDistance) ((NormalizableDistance) dist).setDontNormalize(!v); else throw new RuntimeException("ERROR in kNN classifier when setting dist. Distance function " + dist.getClass().getSimpleName() + " is not normalisable"); } @Override public void buildClassifier(Instances d) throws Exception { Instances d2 = d; if (filterAttributes) { d2 = filter(d); } dist.setInstances(d2); super.buildClassifier(d2); } @Override public double[] distributionForInstance(Instance instance) throws Exception { if (af != null) { Instance newInst = af.filterInstance(instance); return super.distributionForInstance(newInst); } else return super.distributionForInstance(instance); } public double[] getPredictions(Instances test) throws Exception { double[] pred = new double[test.numInstances()]; for (int i = 0; i < test.numInstances(); i++) pred[i] = classifyInstance(test.instance(i)); return pred; } public static void test1NNvsIB1(boolean norm) { System.out.println("FIRST BASIC SANITY TEST FOR THIS WRAPPER"); System.out.print("Compare 1-NN with IB1, normalisation turned"); String str = norm ? " on" : " off"; System.out.println(str); System.out.println("Compare on the UCI data sets"); System.out.print("If normalisation is off, then there may be differences"); kNN knn = new kNN(1); IBk ib1 = new IBk(1); knn.normalise(norm); int diff = 0; DecimalFormat df = new DecimalFormat("####.###"); for (String s : DatasetLists.uciFileNames) { Instances train = DatasetLoading.loadDataNullable("Z:/ArchiveData/Uci_arff/" + s + "/" + s + "-train"); Instances test = DatasetLoading.loadDataNullable("Z:/ArchiveData/Uci_arff/" + s + "/" + s + "-test"); try { knn.buildClassifier(train); // ib1.buildClassifier(train); ib1.buildClassifier(train); double a1 = ClassifierTools.accuracy(test, knn); double a2 = ClassifierTools.accuracy(test, ib1); if (a1 != a2) { diff++; System.out.println(s + ": 1-NN =" + df.format(a1) + " ib1=" + df.format(a2)); } } catch (Exception e) { System.out.println(" Exception builing a classifier"); System.exit(0); } } System.out.println("Total problems =" + DatasetLists.uciFileNames.length + " different on " + diff); } public static void testkNNvsIBk(boolean norm, boolean crossValidate) { System.out.println("FIRST BASIC SANITY TEST FOR THIS WRAPPER"); System.out.print("Compare 1-NN with IB1, normalisation turned"); String str = norm ? " on" : " off"; System.out.println(str); System.out.print("Cross validation turned"); str = crossValidate ? " on" : " off"; System.out.println(str); System.out.println("Compare on the UCI data sets"); System.out.print("If normalisation is off, then there may be differences"); kNN knn = new kNN(100); IBk ibk = new IBk(100); knn.normalise(norm); knn.setCrossValidate(crossValidate); ibk.setCrossValidate(crossValidate); int diff = 0; DecimalFormat df = new DecimalFormat("####.###"); for (String s : DatasetLists.uciFileNames) { Instances train = DatasetLoading.loadDataNullable("Z:/ArchiveData/Uci_arff/" + s + "\\" + s + "-train"); Instances test = DatasetLoading.loadDataNullable("Z:/ArchiveData/Uci_arff/" + s + "\\" + s + "-test"); try { knn.buildClassifier(train); // ib1.buildClassifier(train); ibk.buildClassifier(train); double a1 = ClassifierTools.accuracy(test, knn); double a2 = ClassifierTools.accuracy(test, ibk); if (a1 != a2) { diff++; System.out.println(s + ": 1-NN =" + df.format(a1) + " ibk=" + df.format(a2)); } } catch (Exception e) { System.out.println(" Exception builing a classifier"); System.exit(0); } } System.out.println("Total problems =" + DatasetLists.uciFileNames.length + " different on " + diff); } public static void main(String[] args) { //test1NNvsIB1(true); //test1NNvsIB1(false); // testkNNvsIBk(true,false); testkNNvsIBk(true, true); } //FILTER CODE boolean filterAttributes = false; double propAtts = 0.5; int nosAtts = 0; AttributeFilterBridge af; public void setFilterAttributes(boolean f) { filterAttributes = f; } // public void setEvaluator(ASEvaluation a){ eval=a;} public void setProportion(double f) { propAtts = f; } public void setNumber(int n) { nosAtts = n; } private Instances filter(Instances d) { //Search method: Simple rank, evaluating in isolation af = new AttributeFilterBridge(d); af.setProportionToKeep(propAtts); Instances d2 = af.filter(); // Instances d2=new Instances(d); //Remove all attributes not in the list. Are they sorted?? return d2; } }
8,338
35.256522
163
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/AbstractEnsemble.java
/* * Copyright (C) 2019 xmw13bzu * * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles; import evaluation.evaluators.Evaluator; import evaluation.evaluators.SamplingEvaluator; import evaluation.storage.ClassifierResults; import experiments.data.DatasetLoading; import java.io.File; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import tsml.classifiers.*; import tsml.transformers.Transformer; import utilities.DebugPrinting; import utilities.ErrorReport; import utilities.InstanceTools; import utilities.ThreadingUtilities; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; import machine_learning.classifiers.ensembles.voting.ModuleVotingScheme; import machine_learning.classifiers.ensembles.weightings.ModuleWeightingScheme; /** * This class defines the base functionality for an ensemble of Classifiers. * * Given a * - classifier list (Classifiers); * - a method of estimating error on a dataset (SamplingEvaluator); * - a method of weighting classifier outputs if needed (ModuleWeightingScheme, EqualWeighting if not needed); * = and a method of combining classifier outputs (ModuleVotingScheme); * * Extensions of this class will form an ensemble with all standard Classifier functionality, * as well as the following: * * Current functionality * - Can estimate own performance on train data * - Optional filewriting for individuals' and ensemble's results * - Can train from scratch, or build on results saved to file in ClassifierResults format * - Can thread the component evaluation/building, current just assigning one thread per base classifier * * TODO Expand javadoc * * @author James Large (james.large@uea.ac.uk) */ public abstract class AbstractEnsemble extends EnhancedAbstractClassifier implements DebugPrinting, MultiThreadable { //Main ensemble design decisions/variables protected String ensembleName; protected ModuleWeightingScheme weightingScheme; protected ModuleVotingScheme votingScheme; protected EnsembleModule[] modules; protected SamplingEvaluator trainEstimator; protected Transformer transform; protected Instances trainInsts; //protected ClassifierResults trainResults; inherited from EnhancedAbstractClassifier data generated during buildclassifier if above = true protected ClassifierResults testResults;//data generated during testing //saved after building so that it can be added to our test results, even if for some reason //we're not building/writing train results protected long buildTime = -1; //data info protected int numTrainInsts; protected int numAttributes; protected int numClasses; protected int testInstCounter = 0; protected int numTestInsts = -1; protected Instance prevTestInstance; //results file handling protected boolean readIndividualsResults = false; protected boolean writeIndividualsResults = false; protected boolean resultsFilesParametersInitialised; //MultiThreadable protected int numThreads = 1; protected boolean multiThread = false; /** * An annoying compromise to deal with base classfiers that dont produce dists * while getting their train estimate. Off by default, shouldnt be turned on for * mass-experiments, intended for cases where user knows that dists are missing * (for BOSS, in this case) but still just wants to get ensemble results anyway... */ protected boolean fillMissingDistsWithOneHotVectors; /** * if readResultsFilesDirectories.length == 1, all classifier's results read from that one path * else, resultsPaths.length must equal classifiers.length, with each index aligning * to the path to read the classifier's results from. * * e.g to read 2 classifiers from one directory, and another 2 from 2 different directories: * * Index | Paths | Classifier * -------------------------- * 0 | pathA | c1 * 1 | pathA | c2 * 2 | pathB | c3 * 3 | pathC | c4 * */ protected String readResultsFilesDirectories[]; /** * if resultsWritePath is not set, will default to resultsPaths[0] * i.e, if only reading from one directory, will write back the chosen results * under the same directory. if reading from multiple directories but a particular * write path not set, will simply pick the first one given. */ protected String writeResultsFilesDirectory; protected String datasetName; /** * resampleIdentifier is now deprecated, using ONLY the seed for both fold-file naming purposes and any internal * seeding required, e.g tie resolution */ //protected int resampleIdentifier; public AbstractEnsemble() { super(CAN_ESTIMATE_OWN_PERFORMANCE); setupDefaultEnsembleSettings(); } /** * Defines the default setup of any particular instantiation of this class, called in the * constructor. * * Minimum requirements for implementations of this method: * - A default name for this ensemble, as a String (e.g. "CAWPE", "HIVE-COTE") * - A ModuleWeightingScheme. If classifiers do not need to be weighted, * use EqualWeighting. * - A ModuleVotingScheme, to define how the base classifier outputs should be * combined. * - An Evaluator, to define the method of performance estimation on the train * set. If not required, either * TODO: set a dummy evaluator that performs no real work * TODO: or leave as null, so long as the ModuleWeightingScheme does not require train estimates * - Classifiers and their names, passed to setClassifiers(...) * * See CAWPE and HIVE-COTE for examples of particular instantiations of this method. */ public abstract void setupDefaultEnsembleSettings(); /** * Simple data type to hold a classifier and it's related information and results. */ public static class EnsembleModule implements DebugPrinting, Serializable { private Classifier classifier; private String moduleName; private String parameters; public ClassifierResults trainResults; public ClassifierResults testResults; //by default (and i imagine in the vast majority of cases) all prior weights are equal (i.e 1) //however may be circumstances where certain classifiers are themselves part of //a subensemble or something public double priorWeight = 1.0; //each module makes a vote, with a weight defined for this classifier when predicting this class //many weighting schemes will have weights for each class set to a single classifier equal, but some //will have e.g certain members being experts at classifying certain classes etc public double[] posteriorWeights; public EnsembleModule() { this.moduleName = "ensembleModule"; this.classifier = null; trainResults = null; testResults = null; } public EnsembleModule(String moduleName, Classifier classifier, String parameters) { this.classifier = classifier; this.moduleName = moduleName; this.parameters = parameters; trainResults = null; testResults = null; } public boolean isAbleToEstimateOwnPerformance() { return classifierAbleToEstimateOwnPerformance(classifier); } public boolean isEstimatingOwnPerformance() { return classifierIsEstimatingOwnPerformance(classifier); } public boolean isTrainTimeContractable() { return classifier instanceof TrainTimeContractable; } public boolean isTestTimeContractable() { return classifier instanceof TestTimeContractable; } public boolean isMultiThreadable() { return classifier instanceof MultiThreadable; } public boolean isCheckpointable() { return classifier instanceof Checkpointable; } public String getModuleName() { return moduleName; } public void setModuleName(String moduleName) { this.moduleName = moduleName; } public String getParameters() { //priority of parameter info: // 1) directly provided by setParameters or in the constructor (it's not already empty) // 2) from the classifier itself // 3) from existing train results (if read in from file e.g.) // 4) from existing test results (if only test results read from file) if (parameters == null || parameters.equals("")) { if (trainResults != null) parameters = trainResults.getParas(); else if (testResults != null) parameters = testResults.getParas(); else if (classifier instanceof SaveParameterInfo) parameters = ((SaveParameterInfo) classifier).getParameters(); else parameters = "NoParaInfoFound"; } return parameters; } public void setParameters(String parameters) { this.parameters = parameters; } public Classifier getClassifier() { return classifier; } public void setClassifier(Classifier classifier) { this.classifier = classifier; } @Override public String toString() { return moduleName; } } public Classifier[] getClassifiers(){ Classifier[] classifiers = new Classifier[modules.length]; for (int i = 0; i < modules.length; i++) classifiers[i] = modules[i].getClassifier(); return classifiers; } public void setClassifiersNamesForFileRead(String[] classifierNames) { setClassifiers(null, classifierNames, null); } public void setClassifiersForBuildingInMemory(Classifier[] classifiers) { setClassifiers(classifiers, null ,null); } /** * If building the ensemble from scratch, the minimum requirement for running is the * classifiers array, the others could be left null. * * If building the ensemble from the results files of individuals (i.e. setBuildIndividualsFromResultsFiles(true)), * the minimum requirement for running is the classifierNames list. * * @param classifiers array of classifiers to use * @param classifierNames if null, will use the classifiers' class names by default * @param classifierParameters if null, parameters of each classifier empty by default */ public void setClassifiers(Classifier[] classifiers, String[] classifierNames, String[] classifierParameters) { if (classifiers == null && classifierNames == null) { System.out.println("setClassifiers() was passed null for both the classifiers and classifiernames." + "If building the ensemble from scratch in memory (default), the classifiers are needed at minimum." + "Otherwise if building the ensemble from the saved results of base classifiers on disk, the " + "classifier names are needed at minimum. "); //ClassifierLists does not want to throw exceptions, killing here for now todo review System.exit(1); } if (classifiers == null) { classifiers = new Classifier[classifierNames.length]; for (int i = 0; i < classifiers.length; i++) classifiers[i] = null; } else { //If they are able to, make the classifiers estimate their own performance. This helps with contracting for (Classifier c : classifiers) { if (c instanceof EnhancedAbstractClassifier) if (((EnhancedAbstractClassifier) c).ableToEstimateOwnPerformance()) ((EnhancedAbstractClassifier) c).setEstimateOwnPerformance(true); } } if (classifierNames == null) { classifierNames = new String[classifiers.length]; for (int i = 0; i < classifiers.length; i++) classifierNames[i] = classifiers[i].getClass().getSimpleName(); } if (classifierParameters == null) { classifierParameters = new String[classifiers.length]; for (int i = 0; i < classifiers.length; i++) classifierParameters[i] = ""; } this.modules = new EnsembleModule[classifiers.length]; for (int m = 0; m < modules.length; m++) modules[m] = new EnsembleModule(classifierNames[m], classifiers[m], classifierParameters[m]); } protected void initialiseModules() throws Exception { //currently will only have file reading ON or OFF (not load some files, train the rest) //having that creates many, many, many annoying issues, especially when classifying test cases if (readIndividualsResults) { if (!resultsFilesParametersInitialised) throw new Exception("Trying to load "+ensembleName+" modules from file, but parameters for results file reading have not been initialised"); loadModules(); //will throw exception if a module cannot be loaded (rather than e.g training that individual instead) } else trainModules(); for (EnsembleModule module : modules) { //in case train results didnt have probability distributions, hack for old hive cote results tony todo clean module.trainResults.setNumClasses(trainInsts.numClasses()); if (fillMissingDistsWithOneHotVectors) module.trainResults.populateMissingDists(); module.trainResults.findAllStatsOnce(); } } protected synchronized void trainModules() throws Exception { //define the operations to build and evaluate each module, as a function //that will build the classifier and return train results for it, either //generated by the classifier itself or the trainEstimator List<Callable<ClassifierResults>> moduleBuilds = new ArrayList<>(); for (EnsembleModule module : modules) { final Classifier classifier = module.getClassifier(); final Evaluator eval = trainEstimator.cloneEvaluator(); Callable<ClassifierResults> moduleBuild = () -> { ClassifierResults trainResults = null; if (EnhancedAbstractClassifier.classifierIsEstimatingOwnPerformance(classifier)) { classifier.buildClassifier(trainInsts); trainResults = ((EnhancedAbstractClassifier)classifier).getTrainResults(); } else { trainResults = eval.evaluate(classifier, trainInsts); classifier.buildClassifier(trainInsts); } return trainResults; }; moduleBuilds.add(moduleBuild); } //complete the operations, either threaded via the executor service or //locally/sequentially List<ClassifierResults> results = new ArrayList<>(); if (multiThread) { ExecutorService executor = ThreadingUtilities.buildExecutorService(numThreads); boolean shutdownAfter = true; results = ThreadingUtilities.computeAll(executor, moduleBuilds, shutdownAfter); } else { for (Callable<ClassifierResults> moduleBuild : moduleBuilds) results.add(moduleBuild.call()); } //gather back the train results, write them if needed for (int i = 0; i < modules.length; i++) { modules[i].trainResults = results.get(i); if (writeIndividualsResults) { //if we're doing trainFold# file writing String params = modules[i].getParameters(); if (modules[i].getClassifier() instanceof EnhancedAbstractClassifier) params = ((EnhancedAbstractClassifier)modules[i].getClassifier()).getParameters(); writeResultsFile(modules[i].getModuleName(), params, modules[i].trainResults, "train"); //write results out } } } // protected void trainModules_unThreaded() throws Exception { // for (EnsembleModule module : modules) { // Classifier clf = module.getClassifier(); // if (clf instanceof TrainAccuracyEstimator) { // clf.buildClassifier(trainInsts); // // //these train results should also include the buildtime // module.trainResults = ((TrainAccuracyEstimator)clf).getTrainResults(); // module.trainResults.finaliseResults(); // // // TODO: should errorEstimateTime be forced to zero? by the intention of the interface, // // the estimate should have been produced during the normal process of building // // the classifier, but depending on how it was programmatically produced, // // the reported estimate time may have already been accounted for in the // // build time. Investigate when use cases arise // } // else { // printlnDebug(module.getModuleName() + " estimateing performance..."); // module.trainResults = trainEstimator.evaluate(module.getClassifier(), trainInsts); // module.trainResults.finaliseResults(); // // //assumption: classifiers that maintain a classifierResults object, which may be the same object that module.trainResults refers to, // //and which this subsequent building of the final classifier would tamper with, would have been handled as an instanceof TrainAccuracyEstimate above // long startTime = System.nanoTime(); // module.getClassifier().buildClassifier(trainInsts); // module.trainResults.setBuildTime(System.nanoTime() - startTime); // module.trainResults.setTimeUnit(TimeUnit.NANOSECONDS); // } // } // } protected void loadModules() throws Exception { //will look for all files and report all that are missing, instead of bailing on the first file not found //just helps debugging/running experiments a little ErrorReport errors = new ErrorReport("Errors while loading modules from file. Directories given: " + Arrays.toString(readResultsFilesDirectories)); //for each module for(int m = 0; m < this.modules.length; m++){ String readResultsFilesDirectory = readResultsFilesDirectories.length == 1 ? readResultsFilesDirectories[0] : readResultsFilesDirectories[m]; boolean trainResultsLoaded = false; boolean testResultsLoaded = false; //try and load in the train/test results for this module File moduleTrainResultsFile = findResultsFile(readResultsFilesDirectory, modules[m].getModuleName(), "train"); if (moduleTrainResultsFile != null) { printlnDebug(modules[m].getModuleName() + " train loading... " + moduleTrainResultsFile.getAbsolutePath()); modules[m].trainResults = new ClassifierResults(moduleTrainResultsFile.getAbsolutePath()); trainResultsLoaded = true; } File moduleTestResultsFile = findResultsFile(readResultsFilesDirectory, modules[m].getModuleName(), "test"); if (moduleTestResultsFile != null) { //of course these results not actually used at all during training, //only loaded for future use when classifying with ensemble printlnDebug(modules[m].getModuleName() + " test loading..." + moduleTestResultsFile.getAbsolutePath()); modules[m].testResults = new ClassifierResults(moduleTestResultsFile.getAbsolutePath()); numTestInsts = modules[m].testResults.numInstances(); testResultsLoaded = true; } if (!trainResultsLoaded) errors.log("\nTRAIN results files for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "' not found. "); else if (needIndividualTrainPreds() && modules[m].trainResults.getProbabilityDistributions().isEmpty()) errors.log("\nNo pred/distribution for instance data found in TRAIN results file for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "'. "); if (!testResultsLoaded) errors.log("\nTEST results files for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "' not found. "); else if (modules[m].testResults.numInstances()==0) errors.log("\nNo prediction data found in TEST results file for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "'. "); } errors.throwIfErrors(); } protected boolean needIndividualTrainPreds() { return getEstimateOwnPerformance() || weightingScheme.needTrainPreds || votingScheme.needTrainPreds; } protected File findResultsFile(String readResultsFilesDirectory, String classifierName, String trainOrTest) { File file = new File(readResultsFilesDirectory+classifierName+"/Predictions/"+datasetName+"/"+trainOrTest+"Fold"+seed+".csv"); if(!file.exists() || file.length() == 0) { File file2 = new File(readResultsFilesDirectory + classifierName + "/Predictions/" + datasetName + "/" + trainOrTest + "Resample" + seed + ".csv"); if (!file2.exists() || file2.length() == 0) return null; else return file2; } else return file; } //hack for handling train accuracy estimate. experiments is giving us the full path and filename //to write to, instead of just the folder and expecting us to fill in the +classifierName+"/Predictions/"+datasetName+filename; //when doing the interface overhaul, sort this stuff out. protected void writeEnsembleTrainAccuracyEstimateResultsFile() throws Exception { trainResults.writeFullResultsToFile(writeResultsFilesDirectory); } protected void writeResultsFile(String classifierName, String parameters, ClassifierResults results, String trainOrTest) throws Exception { String fullPath = writeResultsFilesDirectory+classifierName+"/Predictions/"+datasetName; new File(fullPath).mkdirs(); fullPath += "/" + trainOrTest + "Fold" + seed + ".csv"; results.setEstimatorName(classifierName); results.setDatasetName(datasetName); results.setFoldID(seed); results.setSplit(trainOrTest); results.setParas(parameters); results.writeFullResultsToFile(fullPath); } /** * must be called (this or the directory ARRAY overload) in order to build ensemble from results files or to write individual's * results files * * exitOnFilesNotFound defines whether the ensemble will simply throw exception/exit if results files * arnt found, or will try to carry on (e.g train the classifiers normally) */ public void setResultsFileLocationParameters(String individualResultsFilesDirectory, String datasetName, int resampleIdentifier) { setResultsFileLocationParameters(new String[] { individualResultsFilesDirectory }, datasetName, resampleIdentifier); } /** * must be called (this or the single directory string overload) in order to build ensemble from results files or to write individual's * results files * * exitOnFilesNotFound defines whether the ensemble will simply throw exception/exit if results files * arnt found, or will try to carry on (e.g train the classifiers normally) */ public void setResultsFileLocationParameters(String[] individualResultsFilesDirectories, String datasetName, int resampleIdentifier) { resultsFilesParametersInitialised = true; this.readResultsFilesDirectories = individualResultsFilesDirectories; this.datasetName = datasetName; setSeed(resampleIdentifier); } /** * if writing results of individuals/ensemble, but want to define a specific folder to write to as opposed to defaulting to the (only or first) * reading location */ public void setResultsFileWritingLocation(String writeResultsFilesDirectory) { this.writeResultsFilesDirectory = writeResultsFilesDirectory; } public void setBuildIndividualsFromResultsFiles(boolean b) { readIndividualsResults = b; if (b) writeIndividualsResults = false; } public void setWriteIndividualsTrainResultsFiles(boolean b) { writeIndividualsResults = b; if (b) readIndividualsResults = false; } protected ClassifierResults estimateEnsemblePerformance(Instances data) throws Exception { double actual, pred; double[] dist; ClassifierResults trainResults = new ClassifierResults(data.numClasses()); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); long estimateTimeStart = System.nanoTime(); //for each train inst for (int i = 0; i < numTrainInsts; i++) { long startTime = System.nanoTime(); dist = votingScheme.distributionForTrainInstance(modules, i); long predTime = System.nanoTime()- startTime; //time for ensemble to form vote for (EnsembleModule module : modules) // +time for each member's predictions predTime += module.trainResults.getPredictionTime(i); pred = findIndexOfMax(dist, rand); actual = data.instance(i).classValue(); trainResults.turnOffZeroTimingsErrors(); trainResults.addPrediction(actual, dist, pred, predTime, ""); trainResults.turnOnZeroTimingsErrors(); } long estimateTime = System.nanoTime() - estimateTimeStart; for (EnsembleModule module : modules) estimateTime += module.trainResults.getErrorEstimateTime(); trainResults.setEstimatorName(ensembleName); if (datasetName == null || datasetName.equals("")) datasetName = data.relationName(); trainResults.setDatasetName(datasetName); trainResults.setFoldID(seed); trainResults.setSplit("train"); trainResults.setParas(getParameters()); trainResults.setErrorEstimateTime(estimateTime); trainResults.setErrorEstimateMethod(modules[0].trainResults.getErrorEstimateMethod()); trainResults.finaliseResults(); return trainResults; } /** * If building individuals from scratch, i.e not read results from files, call this * after testing is complete to build each module's testResults (accessible by module.testResults) * * This will be done internally anyway if writeIndividualTestFiles(...) is called, this method * is made public only so that results can be accessed from memory during the same run if wanted */ public void finaliseIndividualModuleTestResults(double[] testSetClassVals) throws Exception { for (EnsembleModule module : modules) module.testResults.finaliseResults(testSetClassVals); //converts arraylists to double[]s and preps for writing } /** * If building individuals from scratch, i.e not read results from files, call this * after testing is complete to build each module's testResults (accessible by module.testResults) * * This will be done internally anyway if writeIndividualTestFiles(...) is called, this method * is made public only so that results can be accessed from memory during the same run if wanted */ public void finaliseEnsembleTestResults(double[] testSetClassVals) throws Exception { this.testResults.finaliseResults(testSetClassVals); } /** * @param throwExceptionOnFileParamsNotSetProperly added to make experimental code smoother, * i.e if false, can leave the call to writeIndividualTestFiles(...) in even if building from file, and this * function will just do nothing. else if actually intending to write test results files, pass true * for exceptions to be thrown in case of genuine missing parameter settings * @throws Exception */ public void writeIndividualTestFiles(double[] testSetClassVals, boolean throwExceptionOnFileParamsNotSetProperly) throws Exception { if (!writeIndividualsResults || !resultsFilesParametersInitialised) { if (throwExceptionOnFileParamsNotSetProperly) throw new Exception("to call writeIndividualTestFiles(), must have called setResultsFileLocationParameters(...) and setWriteIndividualsResultsFiles()"); else return; //do nothing } finaliseIndividualModuleTestResults(testSetClassVals); for (EnsembleModule module : modules) writeResultsFile(module.getModuleName(), module.getParameters(), module.testResults, "test"); } /** * @param throwExceptionOnFileParamsNotSetProperly added to make experimental code smoother, * i.e if false, can leave the call to writeIndividualTestFiles(...) in even if building from file, and this * function will just do nothing. else if actually intending to write test results files, pass true * for exceptions to be thrown in case of genuine missing parameter settings * @throws Exception */ public void writeEnsembleTrainTestFiles(double[] testSetClassVals, boolean throwExceptionOnFileParamsNotSetProperly) throws Exception { if (!resultsFilesParametersInitialised) { if (throwExceptionOnFileParamsNotSetProperly) throw new Exception("to call writeEnsembleTrainTestFiles(), must have called setResultsFileLocationParameters(...)"); else return; //do nothing } if (trainResults != null) //performed trainEstimator writeResultsFile(ensembleName, getParameters(), trainResults, "train"); this.testResults.finaliseResults(testSetClassVals); writeResultsFile(ensembleName, getParameters(), testResults, "test"); } public EnsembleModule[] getModules() { return modules; } public SamplingEvaluator getTrainEstimator() { return trainEstimator; } public void setTrainEstimator(SamplingEvaluator trainEstimator) { this.trainEstimator = trainEstimator; } public String[] getClassifierNames() { String[] classifierNames = new String[modules.length]; for (int m = 0; m < modules.length; m++) classifierNames[m] = modules[m].getModuleName(); return classifierNames; } public String getEnsembleName() { return ensembleName; } public void setEnsembleName(String ensembleName) { this.ensembleName = ensembleName; } public boolean getFillMissingDistsWithOneHotVectors() { return fillMissingDistsWithOneHotVectors; } public void setFillMissingDistsWithOneHotVectors(boolean fillMissingDistsWithOneHotVectors) { this.fillMissingDistsWithOneHotVectors = fillMissingDistsWithOneHotVectors; } public double[][] getPosteriorIndividualWeights() { double[][] weights = new double[modules.length][]; for (int m = 0; m < modules.length; ++m) weights[m] = modules[m].posteriorWeights; return weights; } public ModuleVotingScheme getVotingScheme() { return votingScheme; } public void setVotingScheme(ModuleVotingScheme votingScheme) { this.votingScheme = votingScheme; } public ModuleWeightingScheme getWeightingScheme() { return weightingScheme; } public void setWeightingScheme(ModuleWeightingScheme weightingScheme) { this.weightingScheme = weightingScheme; } public double[] getIndividualAccEstimates() { double [] accs = new double[modules.length]; for (int i = 0; i < modules.length; i++) accs[i] = modules[i].trainResults.getAcc(); return accs; } public double[] getPriorIndividualWeights() { double[] priors = new double[modules.length]; for (int i = 0; i < modules.length; i++) priors[i] = modules[i].priorWeight; return priors; } public void setPriorIndividualWeights(double[] priorWeights) throws Exception { if (priorWeights.length != modules.length) throw new Exception("Number of prior weights being set (" + priorWeights.length + ") not equal to the number of modules (" + modules.length + ")"); for (int i = 0; i < modules.length; i++) modules[i].priorWeight = priorWeights[i]; } private void setDefaultPriorWeights() { for (int i = 0; i < modules.length; i++) modules[i].priorWeight = 1.0; } public double[][] getIndividualEstimatePredictions() { double [][] preds = new double[modules.length][]; for (int i = 0; i < modules.length; i++) preds[i] = modules[i].trainResults.getPredClassValsAsArray(); return preds; } public Transformer getTransform(){ return this.transform; } public void setTransform(Transformer transform){ this.transform = transform; } @Override public ClassifierResults getTrainResults(){ return trainResults; } public ClassifierResults getTestResults(){ return testResults; } @Override public String getParameters(){ StringBuilder out = new StringBuilder(); out.append(weightingScheme.toString()).append(",").append(votingScheme.toString()).append(","); for (EnsembleModule module : modules) { out.append(module.getModuleName()).append("(").append(module.priorWeight); for (double weight : module.posteriorWeights) out.append("/").append(weight); out.append("),"); } for (EnsembleModule module : modules) out.append(module.getParameters()).append(",,"); return out.toString(); } // public void readParameters(String paramLine) { // String[] classifiers = paramLine.split(","); // // String[] classifierNames = new String[classifiers.length]; // double[] priorWeights = new double[classifiers.length]; // double[] postWeights = new double[classifiers.length]; // // for (int i = 0; i < classifiers.length; ++i) { // String[] parts = classifiers[i].split("("); // classifierNames[i] = parts[0]; // String[] weights = parts[1].split("/"); // priorWeights[i] = Integer.parseInt(weights[0]); // for (int j = 1; j < weights.length; ++j) // postWeights[j-1] = Integer.parseInt(weights[j]); // } // // } @Override public void buildClassifier(Instances data) throws Exception { printlnDebug("**ENSEMBLE TRAIN: " + ensembleName + "**"); //housekeeping if (resultsFilesParametersInitialised) { if (readResultsFilesDirectories.length > 1) if (readResultsFilesDirectories.length != modules.length) throw new Exception("Ensemble, " + this.getClass().getSimpleName() + ".buildClassifier: " + "more than one results path given, but number given does not align with the number of classifiers/modules."); if (writeResultsFilesDirectory == null) writeResultsFilesDirectory = readResultsFilesDirectories[0]; } // can classifier handle the data? getCapabilities().testWithFail(data); long startTime = System.nanoTime(); //transform data if specified if(this.transform==null){ this.trainInsts = data; }else{ printlnDebug(" Transform is being used: Transform = "+transform.getClass().getSimpleName()); this.trainInsts = transform.transform(data); printlnDebug(" Transform "+transform.getClass().getSimpleName()+" complete"); printlnDebug(" Transform "+transform.toString()); } //init this.numTrainInsts = trainInsts.numInstances(); this.numClasses = trainInsts.numClasses(); this.numAttributes = trainInsts.numAttributes(); //set up modules initialiseModules(); //if modules' results are being read in from file, ignore the i/o overhead //of loading the results, we'll sum the actual buildtimes of each module as //reported in the files if (readIndividualsResults) startTime = System.nanoTime(); //set up ensemble weightingScheme.defineWeightings(modules, numClasses); votingScheme.trainVotingScheme(modules, numClasses); buildTime = System.nanoTime() - startTime; if (readIndividualsResults) { //we need to sum the modules' reported build time as well as the weight //and voting definition time for (EnsembleModule module : modules) { if (needIndividualTrainPreds()) { if (module.trainResults.getBuildPlusEstimateTime() == -1 || module.trainResults.getBuildTimeInNanos() == -1){ //assumes estimate time is not included in the total build time long t = module.trainResults.getBuildTimeInNanos() == -1 ? module.testResults.getBuildTimeInNanos() : module.trainResults.getBuildTimeInNanos(); buildTime += t + module.trainResults.getErrorEstimateTimeInNanos(); } else{ buildTime += module.trainResults.getBuildPlusEstimateTimeInNanos(); } } else{ buildTime += module.trainResults.getBuildTimeInNanos() == -1 ? module.testResults.getBuildTimeInNanos() : module.trainResults.getBuildTimeInNanos(); } } } trainResults = new ClassifierResults(); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); if(getEstimateOwnPerformance()) trainResults = estimateEnsemblePerformance(data); //combine modules to find overall ensemble trainpreds //HACK FOR CAWPE_EXTENSION PAPER: //since experiments expects us to make a train results object //and for us to record our build time, going to record it here instead of //editing experiments to record the buildTime at that level //buildTime does not include the ensemble's trainEstimator in any case, only the work required to be ready for testing //time unit has been set in estimateEnsemblePerformance(data); trainResults.turnOffZeroTimingsErrors(); trainResults.setBuildTime(buildTime); trainResults.setBuildPlusEstimateTime(buildTime + trainResults.getErrorEstimateTime()); trainResults.turnOnZeroTimingsErrors(); this.testInstCounter = 0; //prep for start of testing this.prevTestInstance = null; } @Override public double[] distributionForInstance(Instance instance) throws Exception{ Instance ins = instance; if(this.transform!=null){ Instances rawContainer = new Instances(instance.dataset(),0); rawContainer.add(instance); // transform.setInputFormat(rawContainer); // Instances converted = Filter.useFilter(rawContainer,transform); Instances converted = transform.transform(rawContainer); ins = converted.instance(0); } if (testResults == null || (testInstCounter == 0 && prevTestInstance == null)) {//definitely the first call, not e.g the first inst being classified for the second time printlnDebug("\n**TEST**"); testResults = new ClassifierResults(numClasses); testResults.setTimeUnit(TimeUnit.NANOSECONDS); testResults.setBuildTime(buildTime); } if (readIndividualsResults && testInstCounter >= numTestInsts) //if no test files loaded, numTestInsts == -1 throw new Exception("Received more test instances than expected, when loading test results files, found " + numTestInsts + " test cases"); double[] dist; long startTime = System.nanoTime(); long predTime; if (readIndividualsResults) { //have results loaded from file dist = votingScheme.distributionForTestInstance(modules, testInstCounter); predTime = System.nanoTime() - startTime; //time for ensemble to form vote for (EnsembleModule module : modules) // +time for each member's predictions predTime += module.testResults.getPredictionTime(testInstCounter); } else {//need to classify them normally dist = votingScheme.distributionForInstance(modules, ins); predTime = System.nanoTime() - startTime; } testResults.turnOffZeroTimingsErrors(); testResults.addPrediction(dist, findIndexOfMax(dist, rand), predTime, ""); testResults.turnOnZeroTimingsErrors(); if (prevTestInstance != instance) ++testInstCounter; prevTestInstance = instance; return dist; } @Override public double classifyInstance(Instance instance) throws Exception { double[] dist = distributionForInstance(instance); return findIndexOfMax(dist, rand); } /** * @return the predictions of each individual module, i.e [0] = first module's vote, [1] = second... */ public double[] classifyInstanceByConstituents(Instance instance) throws Exception{ Instance ins = instance; if(this.transform!=null){ Instances rawContainer = new Instances(instance.dataset(),0); rawContainer.add(instance); // transform.setInputFormat(rawContainer); // Instances converted = Filter.useFilter(rawContainer,transform); Instances converted = transform.transform(rawContainer); ins = converted.instance(0); } double[] predsByClassifier = new double[modules.length]; for(int i=0;i<modules.length;i++) predsByClassifier[i] = modules[i].getClassifier().classifyInstance(ins); return predsByClassifier; } /** * @return the distributions of each individual module, i.e [0] = first module's dist, [1] = second... */ public double[][] distributionForInstanceByConstituents(Instance instance) throws Exception{ Instance ins = instance; if(this.transform!=null){ Instances rawContainer = new Instances(instance.dataset(),0); rawContainer.add(instance); Instances converted = transform.transform(rawContainer); ins = converted.instance(0); } double[][] distsByClassifier = new double[this.modules.length][]; for(int i=0;i<modules.length;i++){ distsByClassifier[i] = modules[i].getClassifier().distributionForInstance(ins); } return distsByClassifier; } @Override //MultiThreadable public void enableMultiThreading(int numThreads) { if (numThreads > 1) { this.numThreads = numThreads; this.multiThread = true; } else{ this.numThreads = 1; this.multiThread = false; } } public String produceEnsembleReport(boolean printPreds, boolean builtFromFile) { StringBuilder sb = new StringBuilder(); sb.append(ensembleName).append(" REPORT"); sb.append("\nname: ").append(ensembleName); sb.append("\nmodules: ").append(modules[0].getModuleName()); for (int i = 1; i < modules.length; i++) sb.append(",").append(modules[i].getModuleName()); sb.append("\nweight scheme: ").append(weightingScheme); sb.append("\nvote scheme: ").append(votingScheme); sb.append("\ndataset: ").append(datasetName); sb.append("\nfold: ").append(seed); sb.append("\ntrain acc: ").append(trainResults.getAcc()); sb.append("\ntest acc: ").append(builtFromFile ? testResults.getAcc() : "NA"); int precision = 4; int numWidth = precision+2; int trainAccColWidth = 8; int priorWeightColWidth = 12; int postWeightColWidth = 12; String moduleHeaderFormatString = "\n\n%20s | %"+(Math.max(trainAccColWidth, numWidth))+"s | %"+(Math.max(priorWeightColWidth, numWidth))+"s | %"+(Math.max(postWeightColWidth, this.numClasses*(numWidth+2)))+"s"; String moduleRowHeaderFormatString = "\n%20s | %"+trainAccColWidth+"."+precision+"f | %"+priorWeightColWidth+"."+precision+"f | %"+(Math.max(postWeightColWidth, this.numClasses*(precision+2)))+"s"; sb.append(String.format(moduleHeaderFormatString, "modules", "trainacc", "priorweights", "postweights")); for (EnsembleModule module : modules) { String postweights = String.format(" %."+precision+"f", module.posteriorWeights[0]); for (int c = 1; c < this.numClasses; c++) postweights += String.format(", %."+precision+"f", module.posteriorWeights[c]); sb.append(String.format(moduleRowHeaderFormatString, module.getModuleName(), module.trainResults.getAcc(), module.priorWeight, postweights)); } if (printPreds) { sb.append("\n\nensemble train preds: "); sb.append("\ntrain acc: ").append(trainResults.getAcc()); sb.append("\n"); for(int i = 0; i < trainResults.numInstances();i++) sb.append(produceEnsemblePredsLine(true, i)).append("\n"); sb.append("\n\nensemble test preds: "); sb.append("\ntest acc: ").append(builtFromFile ? testResults.getAcc() : "NA"); sb.append("\n"); for(int i = 0; i < testResults.numInstances();i++) sb.append(produceEnsemblePredsLine(false, i)).append("\n"); } return sb.toString(); } /** * trueClassVal,predClassVal,[empty],dist1,...,distC,#indpreddist1,...,indpreddistC,#module1pred,...,moduleMpred * split on "#" * [0] = normal results file format (true class, pred class, distforinst) * [1] = number of individual unweighted votes per class * [2] = the unweighted prediction of each module */ private String produceEnsemblePredsLine(boolean train, int index) { StringBuilder sb = new StringBuilder(); if (train) //pred sb.append(modules[0].trainResults.getTrueClassValue(index)).append(",").append(trainResults.getPredClassValue(index)).append(","); else sb.append(modules[0].testResults.getTrueClassValue(index)).append(",").append(testResults.getPredClassValue(index)).append(","); if (train){ //dist double[] pred=trainResults.getProbabilityDistribution(index); for (int j = 0; j < pred.length; j++) sb.append(",").append(pred[j]); } else{ double[] pred=testResults.getProbabilityDistribution(index); for (int j = 0; j < pred.length; j++) sb.append(",").append(pred[j]); } sb.append(","); double[] predDist = new double[numClasses]; //indpreddist for (int m = 0; m < modules.length; m++) { if (train) ++predDist[(int)modules[m].trainResults.getPredClassValue(index)]; else ++predDist[(int)modules[m].testResults.getPredClassValue(index)]; } for (int c = 0; c < numClasses; c++) sb.append(",").append(predDist[c]); sb.append(","); for (int m = 0; m < modules.length; m++) { if (train) sb.append(",").append(modules[m].trainResults.getPredClassValue(index)); else sb.append(",").append(modules[m].testResults.getPredClassValue(index)); } return sb.toString(); } protected static void testBuildingInds(int testID) throws Exception { System.out.println("testBuildingInds()"); (new File("C:/Temp/EnsembleTests"+testID+"/")).mkdirs(); int numFolds = 5; for (int fold = 0; fold < numFolds; fold++) { String dataset = "breast-cancer-wisc-prog"; // String dataset = "ItalyPowerDemand"; Instances all = DatasetLoading.loadDataNullable("C:/UCI Problems/"+dataset+"/"+dataset); // Instances train = ClassifierTools.loadDataThrowable("C:/tsc problems/"+dataset+"/"+dataset+"_TRAIN"); // Instances test = ClassifierTools.loadDataThrowable("C:/tsc problems/"+dataset+"/"+dataset+"_TEST"); Instances[] insts = InstanceTools.resampleInstances(all, fold, 0.5); Instances train = insts[0]; Instances test = insts[1]; CAWPE cawpe = new CAWPE(); cawpe.setResultsFileLocationParameters("C:/Temp/EnsembleTests"+testID+"/", dataset, fold); cawpe.setWriteIndividualsTrainResultsFiles(true); cawpe.setEstimateOwnPerformance(true); //now defaults to true cawpe.setSeed(fold); cawpe.buildClassifier(train); double acc = .0; for (Instance instance : test) { if (instance.classValue() == cawpe.classifyInstance(instance)) acc++; } acc/=test.numInstances(); cawpe.writeIndividualTestFiles(test.attributeToDoubleArray(test.classIndex()), true); cawpe.writeEnsembleTrainTestFiles(test.attributeToDoubleArray(test.classIndex()), true); System.out.println("TrainAcc="+cawpe.getTrainResults().getAcc()); System.out.println("TestAcc="+cawpe.getTestResults().getAcc()); } } protected static void testLoadingInds(int testID) throws Exception { System.out.println("testBuildingInds()"); (new File("C:/Temp/EnsembleTests"+testID+"/")).mkdirs(); int numFolds = 5; for (int fold = 0; fold < numFolds; fold++) { String dataset = "breast-cancer-wisc-prog"; // String dataset = "ItalyPowerDemand"; Instances all = DatasetLoading.loadDataNullable("C:/UCI Problems/"+dataset+"/"+dataset); // Instances train = ClassifierTools.loadDataThrowable("C:/tsc problems/"+dataset+"/"+dataset+"_TRAIN"); // Instances test = ClassifierTools.loadDataThrowable("C:/tsc problems/"+dataset+"/"+dataset+"_TEST"); Instances[] insts = InstanceTools.resampleInstances(all, fold, 0.5); Instances train = insts[0]; Instances test = insts[1]; CAWPE cawpe = new CAWPE(); cawpe.setResultsFileLocationParameters("C:/Temp/EnsembleTests"+testID+"/", dataset, fold); cawpe.setBuildIndividualsFromResultsFiles(true); cawpe.setEstimateOwnPerformance(true); //now defaults to true cawpe.setSeed(fold); cawpe.buildClassifier(train); double acc = .0; for (Instance instance : test) { if (instance.classValue() == cawpe.classifyInstance(instance)) acc++; } acc/=test.numInstances(); cawpe.finaliseEnsembleTestResults(test.attributeToDoubleArray(test.classIndex())); System.out.println("TrainAcc="+cawpe.getTrainResults().getAcc()); System.out.println("TestAcc="+cawpe.getTestResults().getAcc()); } } }
54,016
41.266823
219
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/CAWPE.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles; import experiments.ClassifierExperiments; import experiments.CollateResults; import experiments.ExperimentalArguments; import evaluation.MultipleEstimatorEvaluation; import machine_learning.classifiers.ensembles.weightings.TrainAcc; import machine_learning.classifiers.ensembles.weightings.TrainAccByClass; import tsml.transformers.SAX; import tsml.transformers.Transformer; import machine_learning.classifiers.ensembles.voting.MajorityVote; import java.io.File; import utilities.ClassifierTools; import evaluation.evaluators.CrossValidationEvaluator; import evaluation.evaluators.SingleTestSetEvaluator; import evaluation.evaluators.StratifiedResamplesEvaluator; import evaluation.storage.ClassifierResults; import utilities.InstanceTools; import weka.classifiers.Classifier; import weka.classifiers.bayes.NaiveBayes; import weka.classifiers.functions.SMO; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.meta.RotationForest; import weka.classifiers.trees.J48; import weka.classifiers.trees.RandomForest; import weka.core.EuclideanDistance; import weka.core.Instances; import experiments.data.DatasetLoading; import machine_learning.classifiers.ensembles.voting.MajorityConfidence; import weka.classifiers.functions.Logistic; import weka.classifiers.functions.MultilayerPerceptron; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import machine_learning.classifiers.kNN; /** * Can be constructed and will be ready for use from the default constructor like any other classifier. * Default settings are equivalent to the CAWPE in the paper. See exampleCAWPEUsage() for more detailed options on defining different component sets, ensemble schemes, and file handling For examples of file creation and results analysis for reproduction purposes, see buildCAWPEPaper_AllResultsForFigure3() CLASSIFICATION SETTINGS: Default setup is defined by setupDefaultEnsembleSettings(), i.e: Comps: SVML, MLP, NN, Logistic, C4.5 Weight: TrainAcc(4) (train accuracies to the power 4) Vote: MajorityConfidence (summing probability distributions) For the original settings used in an older version of cote, call setupOriginalHESCASettings(), i.e: Comps: NN, SVML, SVMQ, C4.5, NB, bayesNet, RotF, RandF Weight: TrainAcc Vote: MajorityVote EXPERIMENTAL USAGE: By default will build/trainEstimator members normally, and perform no file reading/writing. To turn on file handling of any kind, call setResultsFileLocationParameters(...) 1) Can build ensemble and classify from results files of its members, call setBuildIndividualsFromResultsFiles(true) 2) If members built from scratch, can write the results files of the individuals with setWriteIndividualsTrainResultsFiles(true) and writeIndividualTestFiles(...) after testing is complete 3) And can write the ensemble train/testing files with writeEnsembleTrainTestFiles(...) after testing is complete There are a bunch of little intricacies if you want to do stuff other than a bog standard run Best bet will be to email me for any specific usage questions. * * @author James Large (james.large@uea.ac.uk) * */ public class CAWPE extends AbstractEnsemble implements TechnicalInformationHandler { @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE); result.setValue(TechnicalInformation.Field.AUTHOR, "J. Large, J. Lines and A. Bagnall"); result.setValue(TechnicalInformation.Field.YEAR, "2019"); result.setValue(TechnicalInformation.Field.MONTH, "June"); result.setValue(TechnicalInformation.Field.TITLE, "A probabilistic classifier ensemble weighting scheme based on cross-validated accuracy estimates"); result.setValue(TechnicalInformation.Field.JOURNAL, "Data Mining and Knowledge Discovery"); result.setValue(TechnicalInformation.Field.URL, "https://link.springer.com/article/10.1007/s10618-019-00638-y"); result.setValue(TechnicalInformation.Field.ISSN, "1573-756X"); return result; } public CAWPE() { super(); } /** * Uses the 'basic UCI' set up: * Comps: SVML, MLP, NN, Logistic, C4.5 * Weight: TrainAcc(4) (train accuracies to the power 4) * Vote: MajorityConfidence (summing probability distributions) */ @Override //Abstract Ensemble public final void setupDefaultEnsembleSettings() { this.ensembleName = "CAWPE"; this.weightingScheme = new TrainAcc(4); this.votingScheme = new MajorityConfidence(); this.transform = null; CrossValidationEvaluator cv = new CrossValidationEvaluator(seed, false, false, false, false); cv.setNumFolds(10); this.trainEstimator = cv; Classifier[] classifiers = new Classifier[5]; String[] classifierNames = new String[5]; SMO smo = new SMO(); smo.turnChecksOff(); smo.setBuildLogisticModels(true); PolyKernel kl = new PolyKernel(); kl.setExponent(1); smo.setKernel(kl); smo.setRandomSeed(seed); classifiers[0] = smo; classifierNames[0] = "SVML"; kNN k=new kNN(100); k.setCrossValidate(true); k.normalise(false); k.setDistanceFunction(new EuclideanDistance()); classifiers[1] = k; classifierNames[1] = "NN"; classifiers[2] = new J48(); classifierNames[2] = "C4.5"; classifiers[3] = new Logistic(); classifierNames[3] = "Logistic"; classifiers[4] = new MultilayerPerceptron(); classifierNames[4] = "MLP"; setClassifiers(classifiers, classifierNames, null); } /** * Uses the 'basic UCI' set up: * Comps: SVML, MLP, NN, Logistic, C4.5 * Weight: TrainAcc(4) (train accuracies to the power 4) * Vote: MajorityConfidence (summing probability distributions) */ public final void setupDefaultSettings_NoLogistic() { this.ensembleName = "CAWPE-NoLogistic"; this.weightingScheme = new TrainAcc(4); this.votingScheme = new MajorityConfidence(); CrossValidationEvaluator cv = new CrossValidationEvaluator(seed, false, false, false, false); cv.setNumFolds(10); this.trainEstimator = cv; Classifier[] classifiers = new Classifier[4]; String[] classifierNames = new String[4]; SMO smo = new SMO(); smo.turnChecksOff(); smo.setBuildLogisticModels(true); PolyKernel kl = new PolyKernel(); kl.setExponent(1); smo.setKernel(kl); smo.setRandomSeed(seed); classifiers[0] = smo; classifierNames[0] = "SVML"; kNN k=new kNN(100); k.setCrossValidate(true); k.normalise(false); k.setDistanceFunction(new EuclideanDistance()); classifiers[1] = k; classifierNames[1] = "NN"; classifiers[2] = new J48(); classifierNames[2] = "C4.5"; classifiers[3] = new MultilayerPerceptron(); classifierNames[3] = "MLP"; setClassifiers(classifiers, classifierNames, null); } public final void setupAdvancedSettings() { this.ensembleName = "CAWPE-A"; this.weightingScheme = new TrainAcc(4); this.votingScheme = new MajorityConfidence(); CrossValidationEvaluator cv = new CrossValidationEvaluator(seed, false, false, false, false); cv.setNumFolds(10); this.trainEstimator = cv; Classifier[] classifiers = new Classifier[3]; String[] classifierNames = new String[3]; SMO smo = new SMO(); smo.turnChecksOff(); smo.setBuildLogisticModels(true); PolyKernel kl = new PolyKernel(); kl.setExponent(2); smo.setKernel(kl); smo.setRandomSeed(seed); classifiers[0] = smo; classifierNames[0] = "SVMQ"; RandomForest rf= new RandomForest(); rf.setNumTrees(500); classifiers[1] = rf; classifierNames[1] = "RandF"; RotationForest rotf=new RotationForest(); rotf.setNumIterations(200); classifiers[2] = rotf; classifierNames[2] = "RotF"; setClassifiers(classifiers, classifierNames, null); } /** * Comps: NN, SVML, SVMQ, C4.5, NB, RotF, RandF, BN, * Weight: TrainAcc * Vote: MajorityVote * * As used originally in ST_HESCA, COTE. * NOTE the original also contained Bayes Net (BN). We have removed it because the classifier crashes * unpredictably when discretising features (due to lack of variance in the feature, but not easily detected and * dealt with * */ public final void setupOriginalHESCASettings() { this.ensembleName = "HESCA"; this.weightingScheme = new TrainAcc(); this.votingScheme = new MajorityVote(); CrossValidationEvaluator cv = new CrossValidationEvaluator(seed, false, false, false, false); cv.setNumFolds(10); this.trainEstimator = cv; int numClassifiers=7; Classifier[] classifiers = new Classifier[numClassifiers]; String[] classifierNames = new String[numClassifiers]; kNN k=new kNN(100); k.setCrossValidate(true); k.normalise(false); k.setDistanceFunction(new EuclideanDistance()); classifiers[0] = k; classifierNames[0] = "NN"; classifiers[1] = new NaiveBayes(); classifierNames[1] = "NB"; classifiers[2] = new J48(); classifierNames[2] = "C45"; SMO svml = new SMO(); svml.turnChecksOff(); PolyKernel kl = new PolyKernel(); kl.setExponent(1); svml.setKernel(kl); svml.setRandomSeed(seed); classifiers[3] = svml; classifierNames[3] = "SVML"; SMO svmq =new SMO(); //Assumes no missing, all real valued and a discrete class variable svmq.turnChecksOff(); PolyKernel kq = new PolyKernel(); kq.setExponent(2); svmq.setKernel(kq); svmq.setRandomSeed(seed); classifiers[4] =svmq; classifierNames[4] = "SVMQ"; RandomForest r=new RandomForest(); r.setNumTrees(500); r.setSeed(seed); classifiers[5] = r; classifierNames[5] = "RandF"; RotationForest rf=new RotationForest(); rf.setNumIterations(50); rf.setSeed(seed); classifiers[6] = rf; classifierNames[6] = "RotF"; // classifiers[7] = new BayesNet(); // classifierNames[7] = "bayesNet"; setClassifiers(classifiers, classifierNames, null); } public static void exampleCAWPEUsage() throws Exception { String datasetName = "ItalyPowerDemand"; Instances train = DatasetLoading.loadDataNullable("c:/tsc problems/"+datasetName+"/"+datasetName+"_TRAIN"); Instances test = DatasetLoading.loadDataNullable("c:/tsc problems/"+datasetName+"/"+datasetName+"_TEST"); //Uses predefined default settings. This is the CAWPE classifier built on 'simple' components in the paper, equivalent to setupDefaultEnsembleSettings() CAWPE cawpe = new CAWPE(); //Setting a transform (not used in CAWPE paper, mostly for COTE/HiveCOTE or particular applications) Transformer transform = new SAX(); //TODO: come back and fix this! //cawpe.setTransform(transform); cawpe.setTransform(null); //back to null for this example //Setting member classifiers Classifier[] classifiers = new Classifier[] { new kNN() }; String [] names = new String[] { "NN" }; String [] params = new String[] { "k=1" }; cawpe.setClassifiers(classifiers, names, params); //see setClassifiers(...) javadoc //Setting ensemble schemes cawpe.setWeightingScheme(new TrainAccByClass()); //or set new methods cawpe.setVotingScheme(new MajorityVote()); //some voting schemes require dist for inst to be defined //Using predefined default settings. This is the CAWPE classifier in the paper, equivalent to default constructor cawpe.setupDefaultEnsembleSettings(); int resampleID = 0; cawpe.setSeed(resampleID); //File handling cawpe.setResultsFileLocationParameters("CAWPETest/", datasetName, resampleID); //use this to set the location for any results file reading/writing cawpe.setBuildIndividualsFromResultsFiles(true); //turns on file reading, will read from location provided in setResultsFileLocationParameters(...) cawpe.setWriteIndividualsTrainResultsFiles(true); //include this to turn on file writing for individuals trainFold# files //can only have one of these (or neither) set to true at any one time (internally, setting one to true //will automatically set the other to false) //Then build/test as normal cawpe.buildClassifier(train); System.out.println(ClassifierTools.accuracy(test, cawpe)); //Call these after testing is complete for fill writing of the individuals test files, and ensemble train AND test files. boolean throwExceptionOnFileParamsNotSetProperly = false; cawpe.writeIndividualTestFiles(test.attributeToDoubleArray(test.classIndex()), throwExceptionOnFileParamsNotSetProperly); cawpe.writeEnsembleTrainTestFiles(test.attributeToDoubleArray(test.classIndex()), throwExceptionOnFileParamsNotSetProperly); } /** * This will build all the base classifier results * * @param dataHeaders e.g { "UCI", "UCR" } * @param dataPaths e.g { "C:/Data/UCI/", "C:/Data/UCR/" } * @param datasetNames for each datapath, a list of the dataset names located there to be used [archive][dsetnames] * @param classifiers the names of classifiers that can all be found in ClassifierExperiments.setClassifier(...) * @param baseWritePath e.g { "C:/Results/" } */ protected static void buildCAWPEPaper_BuildClassifierResultsFiles(String baseWritePath, String[] dataHeaders, String[] dataPaths, String[][] datasetNames, String[] classifiers, int numFolds) throws Exception { for (int archive = 0; archive < dataHeaders.length; archive++) { for (String classifier : classifiers) { if (!ClassifierExperiments.beQuiet) System.out.println("\t" + classifier); for (String dset : datasetNames[archive]) { if (!ClassifierExperiments.beQuiet) System.out.println(dset); for (int fold = 0; fold < numFolds; fold++) { /*1: Problem path args[0] 2. Results path args[1] 3. booleanw Whether to generate train files (true/false) 4. Classifier =args[3]; 5. String problem=args[4]; 6. int fold=Integer.parseInt(args[5])-1; Optional: 7. boolean whether to checkpoint parameter search for applicable tuned classifiers (true/false) 8. integer for specific parameter search (0 indicates ignore this) */ ClassifierExperiments.main(new String[] { "-dp="+dataPaths[archive], "-rp="+baseWritePath+dataHeaders[archive]+"/", "-cn="+classifier, "-dn="+dset, "-f="+(fold+1), "-gtf=true"}); } } } } } /** * This method would build all the results files leading up to figure 3 of * https://link.springer.com/article/10.1007/s10618-019-00638-y, * the heterogeneous ensemble comparison on the basic classifiers. * * It would take a long time to run, almost all of which is comprised of * building the base classifiers. * * The experiments and results presented in the paper were distributed on the HPC cluster at UEA, * this method is to demonstrate the experimental procedure and to provide a base to copy/edit for * full results reproduction of everything in the paper. * * There are also cases that can't be entirely captured neatly in a method like this, despite * my best efforts. For example, while we can call matlab code from here to build diagrams for * the analysis, the implementation of the DNN requires that to be run separately. Likewise, while * a lot of the legwork of analysis is done programmatically, the deeper exploratory analysis * cannot really be done automatically. * * Still, the idea of getting as close a possible to being able to reproduce the entirety * of a paper's results and statistics in a single function call is nice, especially for a * paper as extensive and empirically-driven as CAWPE's. * * For inquiries into specific details of reproduction, best bet is to email us * james.large@uea.ac.uk * anthony.bagnall@uea.ac.uk */ public static void buildCAWPEPaper_AllResultsForFigure3(String writePathBase) throws Exception { if (writePathBase == null) writePathBase = "C:/Temp/MCEUpdateTests/CAWPEReprodmem2/"; //default for unit tests, running on e.g. travis String[] dataHeaders = { "UCI", }; String[] dataPaths = { "src/main/java/experiments/data/uci/" }; String[][] datasets = { { "hayes-roth", "iris", "teaching" } }; String writePathResults = writePathBase + "Results/"; String writePathAnalysis = writePathBase + "Analysis/"; int numFolds = 3; // //init, edit the paths for local running ofc // String[] dataHeaders = { "UCI", }; // String[] dataPaths = { "C:/UCI Problems/", }; // String[][] datasets = { { "hayes-roth", "pittsburg-bridges-T-OR-D", "teaching", "wine" } }; // String writePathResults = writePathBase + "Results/"; // String writePathAnalysis = writePathBase + "Analysis/"; // int numFolds = 5; // String[] dataHeaders = { "UCI", }; // String[] dataPaths = { "Z:/Data/UCIDelgado/", }; // String[][] datasets = { DataSets.UCIContinuousFileNames, }; // String writePathResults = writePathBase + "Results/"; // String writePathAnalysis = writePathBase + "Analysis/"; // int numFolds = 30; //build the base classifiers String[] baseClassifiers = { "NN", "C45", "MLP", "Logistic", "SVML" }; buildCAWPEPaper_BuildClassifierResultsFiles(writePathResults, dataHeaders, dataPaths, datasets, baseClassifiers, numFolds); //build the ensembles String[] ensembleIDsInStorage = { "CAWPE_BasicClassifiers", "EnsembleSelection_BasicClassifiers", "SMLR_BasicClassifiers", "SMLRE_BasicClassifiers", "SMM5_BasicClassifiers", "PickBest_BasicClassifiers", "MajorityVote_BasicClassifiers", "WeightMajorityVote_BasicClassifiers", "RecallCombiner_BasicClassifiers", "NaiveBayesCombiner_BasicClassifiers" }; String[] ensembleIDsOnFigures = { "CAWPE", "ES", "SMLR", "SMLRE", "SMM5", "PB", "MV", "WMV", "RC", "NBC" }; String pkg = "machine_learning.classifiers.ensembles."; Class[] ensembleClasses = { Class.forName(pkg + "CAWPE"), Class.forName(pkg + "EnsembleSelection"), Class.forName(pkg + "stackers.SMLR"), Class.forName(pkg + "stackers.SMLRE"), Class.forName(pkg + "stackers.SMM5"), Class.forName(pkg + "weightedvoters.CAWPE_PickBest"), Class.forName(pkg + "weightedvoters.CAWPE_MajorityVote"), Class.forName(pkg + "weightedvoters.CAWPE_WeightedMajorityVote"), Class.forName(pkg + "weightedvoters.CAWPE_RecallCombiner"), Class.forName(pkg + "weightedvoters.CAWPE_NaiveBayesCombiner"), }; for (int ensemble = 0; ensemble < ensembleIDsInStorage.length; ensemble++) buildCAWPEPaper_BuildEnsembleFromResultsFiles(writePathResults, dataHeaders, dataPaths, datasets, baseClassifiers, numFolds, ensembleIDsInStorage[ensemble], ensembleClasses[ensemble]); //build the results analysis sheets and figures for (int archive = 0; archive < dataHeaders.length; archive++) { String analysisName = dataHeaders[archive] + "CAWPEvsHeteroEnsembles_BasicClassifiers"; buildCAWPEPaper_BuildResultsAnalysis(writePathResults+dataHeaders[archive]+"/", writePathAnalysis, analysisName, ensembleIDsInStorage, ensembleIDsOnFigures, datasets[archive], numFolds); } //done! } protected static void buildCAWPEPaper_BuildResultsAnalysis(String resultsReadPath, String analysisWritePath, String analysisName, String[] classifiersInStorage, String[] classifiersOnFigs, String[] datasets, int numFolds) throws Exception { if (!ClassifierExperiments.beQuiet) System.out.println("buildCAWPEPaper_BuildResultsAnalysis"); new MultipleEstimatorEvaluation(analysisWritePath, analysisName, numFolds). setTestResultsOnly(true). // setBuildMatlabDiagrams(true). // setUseAccuracyOnly(). setBuildMatlabDiagrams(false). setDatasets(datasets). readInEstimators(classifiersInStorage, classifiersOnFigs, resultsReadPath). runComparison(); } protected static void buildCAWPEPaper_BuildEnsembleFromResultsFiles(String baseWritePath, String[] dataHeaders, String[] dataPaths, String[][] datasetNames, String[] baseClassifiers, int numFolds, String ensembleID, Class ensembleClass) throws Exception { Instances train = null, test = null, all = null; //UCR has predefined train/test splits, UCI data just comes as a whole, so are loaded/resampled differently Instances[] data = null; //however it's loaded/resampled, will eventually end up here, { train, test } for (int archive = 0; archive < dataHeaders.length; archive++) { String writePath = baseWritePath + dataHeaders[archive] + "/"; for (String dset : datasetNames[archive]) { if (!ClassifierExperiments.beQuiet) System.out.println(dset); if (dataHeaders[archive].equals("UCI")) all = DatasetLoading.loadDataNullable(dataPaths[archive] + dset + "/" + dset + ".arff"); else if ((dataHeaders[archive].contains("UCR"))) { train = DatasetLoading.loadDataNullable(dataPaths[archive] + dset + "/" + dset + "_TRAIN.arff"); test = DatasetLoading.loadDataNullable(dataPaths[archive] + dset + "/" + dset + "_TEST.arff"); } for (int fold = 0; fold < numFolds; fold++) { //building particular ensembles with different parameters is a bit //more involved so we skip some of the automated stages (especically setClassifier(...) in the //experiments class to build the particular format wanted. //in this example code, i've jsut assumed that default parameters //(aside from the base classifiers) are being used. //this code could ofc be editted to build whatever particular classifiers //you want, instead of using the janky reflection String predictions = writePath+ensembleID+"/Predictions/"+dset+"/"; File f=new File(predictions); if(!f.exists()) f.mkdirs(); //Check whether fold already exists, if so, dont do it, just quit if(!CollateResults.validateSingleFoldFile(predictions+"/testFold"+fold+".csv")){ if (dataHeaders[archive].equals("UCI")) data = InstanceTools.resampleInstances(all, fold, .5); else if ((dataHeaders[archive].contains("UCR"))) data = InstanceTools.resampleTrainAndTestInstances(train, test, fold); //cawpe is the base class from which all the heterogeneous ensembles are implemented, since this //already has the base classifier file reading/writing built in etcetc. CAWPE c = (CAWPE) ensembleClass.getConstructor().newInstance(); c.setEnsembleName(ensembleID); c.setClassifiers(null, baseClassifiers, null); c.setBuildIndividualsFromResultsFiles(true); c.setResultsFileLocationParameters(writePath, dset, fold); c.setSeed(fold); c.setEstimateOwnPerformance(true); //'custom' classifier built, now put it back in the normal experiments pipeline ExperimentalArguments exp = new ExperimentalArguments(); exp.estimatorName = ensembleID; exp.datasetName = dset; exp.foldId = fold; exp.generateErrorEstimateOnTrainSet = true; exp.testFoldFileName = predictions+"/testFold"+fold+".csv"; exp.trainFoldFileName = predictions+"/trainFold"+fold+".csv"; // exp.performTimingBenchmark = true; ClassifierExperiments.runExperiment(exp,data[0],data[1],c); } } } } } public static void test_basic() throws Exception { System.out.println("test_basic()"); int seed = 0; Instances[] data = DatasetLoading.sampleItalyPowerDemand(seed); // Instances[] data = DatasetLoading.sampleBeef(seed); StratifiedResamplesEvaluator trainEval = new StratifiedResamplesEvaluator(); trainEval.setNumFolds(30); trainEval.setPropInstancesInTrain(0.5); trainEval.setSeed(seed); CAWPE c = new CAWPE(); c.setSeed(seed); // c.setTrainEstimator(trainEval); long t1 = System.currentTimeMillis(); c.buildClassifier(data[0]); t1 = System.currentTimeMillis() - t1; SingleTestSetEvaluator eval = new SingleTestSetEvaluator(); eval.setSeed(seed); ClassifierResults res = eval.evaluate(c, data[1]); System.out.println("acc="+res.getAcc() + " buildtime="+t1+"ms"); System.out.print("BaseClassifier train accs: "); for (EnsembleModule module : c.getModules()) System.out.print(module.getModuleName() + ":" +module.trainResults.getAcc() + ", "); System.out.println(""); System.out.println("IPD_CrossValidation: " + 0.9650145772594753); System.out.println("IPD_StratifiedResample: " + 0.9630709426627794); } public static void test_threaded() throws Exception { System.out.println("test_threaded()"); int seed = 0; Instances[] data = DatasetLoading.sampleItalyPowerDemand(seed); // Instances[] data = DatasetLoading.sampleBeef(seed); StratifiedResamplesEvaluator trainEval = new StratifiedResamplesEvaluator(); trainEval.setNumFolds(30); trainEval.setPropInstancesInTrain(0.5); trainEval.setSeed(seed); CAWPE c = new CAWPE(); c.setSeed(seed); // c.setTrainEstimator(trainEval); c.enableMultiThreading(); long t1 = System.currentTimeMillis(); c.buildClassifier(data[0]); t1 = System.currentTimeMillis() - t1; SingleTestSetEvaluator eval = new SingleTestSetEvaluator(); eval.setSeed(seed); ClassifierResults res = eval.evaluate(c, data[1]); System.out.println("acc="+res.getAcc() + " buildtime="+t1+"ms"); System.out.print("BaseClassifier train accs: "); for (EnsembleModule module : c.getModules()) System.out.print(module.getModuleName() + ":" +module.trainResults.getAcc() + ", "); System.out.println(""); System.out.println("IPD_CrossValidation: " + 0.9650145772594753); System.out.println("IPD_StratifiedResample: " + 0.9630709426627794); } public static void main(String[] args) throws Exception { // exampleCAWPEUsage(); buildCAWPEPaper_AllResultsForFigure3(null); // System.out.println(ClassifierTools.testUtils_getIPDAcc(new CAWPE())); // System.out.println(ClassifierTools.testUtils_confirmIPDReproduction(new CAWPE(), 0.9650145772594753, "2019_09_25")); // test_basic(); // System.out.println(""); // test_threaded(); //run: //test_basic() //acc=0.9650145772594753 buildtime=1646ms //BaseClassifier train accs: SVML:0.9701492537313433, NN:0.9552238805970149, C4.5:0.9552238805970149, Logistic:0.9402985074626866, MLP:0.9701492537313433, //IPD_CrossValidation: 0.9650145772594753 //IPD_StratifiedResample: 0.9630709426627794 // //test_threaded() //acc=0.9650145772594753 buildtime=532ms //BaseClassifier train accs: SVML:0.9701492537313433, NN:0.9552238805970149, C4.5:0.9552238805970149, Logistic:0.9402985074626866, MLP:0.9701492537313433, //IPD_CrossValidation: 0.9650145772594753 //IPD_StratifiedResample: 0.9630709426627794 //BUILD SUCCESSFUL (total time: 2 seconds) // testBuildingInds(3); // testLoadingInds(2); } }
31,190
42.746143
202
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/ContractRotationForest.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ /* * ContractRotationForest.java. An adaptation of Rotation Forest, 2008 Juan Jose Rodriguez * Contract Version by @author Tony Bagnall, Michael Flynn, first implemented 2018, updated 2019 (checkpointable) * and 2020 (conform to structure) * * We have cloned the code from RotationForest rather than extend it because core changes occur in most methods, and * to decouple from Weka, which has removed random forest from the latest releases. * */ package machine_learning.classifiers.ensembles; import java.io.File; import java.io.FileInputStream; import java.io.ObjectInputStream; import java.io.Serializable; import java.util.ArrayList; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.Randomizable; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.PrincipalComponents; import weka.filters.unsupervised.attribute.RemoveUseless; import weka.filters.unsupervised.instance.RemovePercentage; import java.util.Random; import java.util.concurrent.TimeUnit; import tsml.classifiers.EnhancedAbstractClassifier; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.DenseInstance; import tsml.classifiers.Checkpointable; import tsml.classifiers.TrainTimeContractable; public class ContractRotationForest extends EnhancedAbstractClassifier implements TrainTimeContractable, Checkpointable, Serializable{ Classifier baseClassifier; ArrayList<Classifier> classifiers; /** for serialization */ static final long serialVersionUID = -3255631880798499936L; /** The minimum size of a group */ protected int minGroup = 3; /** The maximum size of a group */ protected int maxGroup = 3; /** The percentage of instances to be removed */ protected int removedPercentage = 50; /** The attributes of each group */ ArrayList< int[][]> groups; /** The type of projection filter */ protected Filter projectionFilter; /** The projection filters */ protected ArrayList<Filter []> projectionFilters; /** Headers of the transformed dataset */ protected ArrayList<Instances> headers; /** Headers of the reduced datasets */ protected ArrayList<Instances []> reducedHeaders; /** Filter that remove useless attributes */ protected RemoveUseless removeUseless = null; /** Filter that normalized the attributes */ protected Normalize normalize = null; protected static double CHECKPOINTINTERVAL=2.0; //Minimum interval between checkpointing private boolean trainTimeContract = false; transient private long trainContractTimeNanos =0; double contractHours=0; //Defaults to no contract //Added features double estSingleTree; int numTrees=0; int minNumTrees=50; int maxNumTrees=200; int maxNumAttributes; String checkpointPath=null; boolean checkpoint=false; TimingModel tm; double timeUsed; double alpha=0.2;//Learning rate for timing update double perForBag = 0.5; /** * Constructor. */ public ContractRotationForest() { super(CANNOT_ESTIMATE_OWN_PERFORMANCE); baseClassifier = new weka.classifiers.trees.J48(); projectionFilter = defaultFilter(); tm=new TimingModel(); checkpointPath=null; timeUsed=0; } /** * Default projection method. */ protected Filter defaultFilter() { PrincipalComponents filter = new PrincipalComponents(); //filter.setNormalize(false); filter.setVarianceCovered(1.0); return filter; } /** * Sets the minimum size of a group. * * @param minGroup the minimum value. * of attributes. */ public void setMinGroup( int minGroup ) throws IllegalArgumentException { if( minGroup <= 0 ) throw new IllegalArgumentException( "MinGroup has to be positive." ); this.minGroup = minGroup; } /** * Gets the minimum size of a group. * * @return the minimum value. */ public int getMinGroup() { return minGroup; } public void setMaxNumTrees(int t) throws IllegalArgumentException { if( t <= 0 ) throw new IllegalArgumentException( "maxNumTrees has to be positive." ); maxNumTrees=t; } public void setMinNumTrees(int t) throws IllegalArgumentException { if( t <= 0 ) throw new IllegalArgumentException( "minNumTrees has to be positive." ); minNumTrees=t; } /** * Sets the maximum size of a group. * * @param maxGroup the maximum value. * of attributes. */ public void setMaxGroup( int maxGroup ) throws IllegalArgumentException { if( maxGroup <= 0 ) throw new IllegalArgumentException( "MaxGroup has to be positive." ); this.maxGroup = maxGroup; } /** * Gets the maximum size of a group. * * @return the maximum value. */ public int getMaxGroup() { return maxGroup; } /** * Sets the percentage of instance to be removed * * @param removedPercentage the percentage. */ public void setRemovedPercentage( int removedPercentage ) throws IllegalArgumentException { if( removedPercentage < 0 ) throw new IllegalArgumentException( "RemovedPercentage has to be >=0." ); if( removedPercentage >= 100 ) throw new IllegalArgumentException( "RemovedPercentage has to be <100." ); this.removedPercentage = removedPercentage; } /** * Gets the percentage of instances to be removed * * @return the percentage. */ public int getRemovedPercentage() { return removedPercentage; } /** * Sets the filter used to project the data. * * @param projectionFilter the filter. */ public void setProjectionFilter( Filter projectionFilter ) { this.projectionFilter = projectionFilter; } /** * Gets the filter used to project the data. * * @return the filter. */ public Filter getProjectionFilter() { return projectionFilter; } /** * Gets the filter specification string, which contains the class name of * the filter and any options to the filter * * @return the filter string. */ /* Taken from FilteredClassifier */ protected String getProjectionFilterSpec() { Filter c = getProjectionFilter(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } @Override public String toString() { return "toString not implemented for ContractRotationForest"; } /** * builds the classifier. * * @param data the training data to be used for generating the * classifier. * @throws Exception if the classifier could not be built successfully */ @Override public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? These default capabilities // only allow real valued series and classification. To be adjusted getCapabilities().testWithFail(data); long startTime=System.nanoTime(); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); //Set up the results file super.buildClassifier(data); String relationName=data.relationName(); data = new Instances( data ); File file = new File(checkpointPath + "RotF" + seed + ".ser"); //if checkpointing and serialised files exist load said files if (checkpoint && file.exists()){ //Configure from file printLineDebug("Loading from checkpoint file"); loadFromFile(checkpointPath + "RotF" + seed + ".ser"); // checkpointTimeElapsed -= System.nanoTime()-t1; } else{ //Initialise if (baseClassifier == null) { throw new Exception("A base classifier has not been specified!"); } // m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, m_NumIterations); checkMinMax(data); //Initialise everything to the max size, then do in batches. //At the end we reduce back to numTrees groups=new ArrayList<>(); // These arrays keep the information of the transformed data set headers =new ArrayList<>(); //Store the PCA transforms projectionFilters =new ArrayList<>(); reducedHeaders = new ArrayList<>(); classifiers=new ArrayList<>(); numTrees = 0; } if (getEstimateOwnPerformance()) { estimateOwnPerformance(data); this.setTrainTimeLimit(TimeUnit.NANOSECONDS, (long) ((trainContractTimeNanos * (1.0 / perForBag)))); //Do we need to do this again? groups=new ArrayList<>(); // These arrays keep the information of the transformed data set headers =new ArrayList<>(); //Store the PCA transforms projectionFilters =new ArrayList<>(); reducedHeaders = new ArrayList<>(); classifiers=new ArrayList<>(); numTrees = 0; } rand = new Random(seed); //This is from the RotationForest: remove zero variance and normalise attributes. //Do this before loading from file, so we can perform checks of dataset? removeUseless = new RemoveUseless(); removeUseless.setInputFormat(data); data = Filter.useFilter(data, removeUseless); normalize = new Normalize(); normalize.setInputFormat(data); data = Filter.useFilter(data, normalize); int numClasses = data.numClasses(); // Split the instances according to their class. // Does not handle regression for clarity Instances [] instancesOfClass; instancesOfClass = new Instances[numClasses]; for( int i = 0; i < instancesOfClass.length; i++ ) { instancesOfClass[ i ] = new Instances( data, 0 ); } for(Instance instance:data) { if( instance.classIsMissing() ) continue; //Ignore instances with missing class value else{ int c = (int)instance.classValue(); instancesOfClass[c].add( instance ); } } int n=data.numInstances(); int m=data.numAttributes()-1; double treeTime; //Re-estimate even if loading serialised, may be different hardware .... estSingleTree=tm.estimateSingleTreeHours(n,m); System.out.println(" debug = "+debug); printLineDebug("n ="+n+" m = "+m+" estSingleTree = "+estSingleTree); printLineDebug("Contract time ="+trainContractTimeNanos/1000000000+" seconds and contractHours "+contractHours); int maxAtts=m; //CASE 1: think we can build the minimum number of trees with full data. if(contractHours==0 || (estSingleTree*minNumTrees)<contractHours){ printLineDebug("Think we are able to build at least 50 trees"); boolean buildFullTree=true; int size; //Option to build in batches for smaller data, but not used at the moment int batchSize=1;//setBatchSize(estSingleTree); //Set larger for smaller data // printLineDebug("Batch size = "+batchSize); long startBuild=System.nanoTime(); while((contractHours==0 || timeUsed<contractHours) && numTrees<maxNumTrees){ long singleTreeStartTime=System.nanoTime(); if(buildFullTree) size=m; else{ maxAtts=tm.estimateMaxAttributes(m,minNumTrees-numTrees,estSingleTree,contractHours); size=rand.nextInt(maxAtts/2)+maxAtts/2; } if(batchSize+numTrees>maxNumTrees) batchSize=maxNumTrees-numTrees; for(int i=0;i<batchSize;i++) buildTreeAttSample(data,instancesOfClass,numTrees++,m); //Update time used long newTime=System.nanoTime(); timeUsed=(newTime-startBuild)/(1000000000.0*60.0*60.0); treeTime=(newTime-singleTreeStartTime)/(1000000000.0*60.0*60.0); // Update single tree estimate estSingleTree=updateTreeTime(estSingleTree,treeTime,alpha,size,m); //Taking much longer than we thought! if(contractHours>0 && estSingleTree*minNumTrees>contractHours) buildFullTree=false; else buildFullTree=true; //Checkpoint here printLineDebug("Built tree number "+numTrees+" in "+timeUsed+" hours "); if(checkpointPath!=null){ //save the serialised version try{ File f=new File(checkpointPath); if(!f.isDirectory()) f.mkdirs(); saveToFile(checkpointPath+relationName+"ContractRotationForest.ser"); printLineDebug("CHECKPOINTED: Saved to "+checkpointPath+relationName+"ContractRotationForest.ser"); } catch(Exception e){ System.out.println("Serialisation to "+checkpointPath+"/"+relationName+"ContractRotationForest.ser FAILED"); } } } } //CASE 2 and 3: dont think we can build min number of trees else{ printLineDebug("Dont think we can build 50 trees in the time allowed "); //If m > n: SAMPLE ATTRIBUTES if(m>n){ //estimate maximum number of attributes allowed, x, to get minNumberOfTrees. maxAtts=m; long startBuild=System.currentTimeMillis(); while(timeUsed<contractHours && numTrees<minNumTrees){ maxAtts=tm.estimateMaxAttributes(m,minNumTrees-numTrees,estSingleTree,contractHours); int size=rand.nextInt(maxAtts/2)+maxAtts/2; printLineDebug("Max estimated attributes ="+maxAtts); printLineDebug(" using "+size+" attributes, building single tree at a time. Total time used ="+timeUsed); long sTime=System.currentTimeMillis(); buildTreeAttSample(data,instancesOfClass,numTrees++,size); //Update time used long newTime=System.currentTimeMillis(); timeUsed=(newTime-startBuild)/(1000.0*60.0*60.0); treeTime=(newTime-sTime)/(1000.0*60.0*60.0); estSingleTree=updateTreeTime(estSingleTree,treeTime,alpha,size,m); // (1-alpha)*estSingleTree+alpha*treeTime; printLineDebug(" actual time used ="+timeUsed+" new est single tree = "+estSingleTree); //Checkpoint here } //Use up any time left here on randomised trees while(timeUsed<contractHours && numTrees<maxNumTrees){ int size=tm.estimateMaxAttributes(m, 1, estSingleTree,contractHours-timeUsed); // if(estSingleTree<timeUsed-contractHours || size>m)//Build a whole treee // size=m; maxAtts*=2; if(maxAtts>size) maxAtts=size; printLineDebug("OVERTIME: using "+size+" attributes, building single tree at a time. Time used -"+timeUsed); buildTreeAttSample(data,instancesOfClass,numTrees++,maxAtts); //Update time used long newTime=System.currentTimeMillis(); timeUsed=(newTime-startBuild)/(1000.0*60.0*60.0); //Checkpoint here printLineDebug("Built tree number "+numTrees+" in "+timeUsed+" hours "); } } else{ //n>m //estimate maximum number of cases we can use int maxCases=tm.estimateMaxCases(n,minNumTrees,estSingleTree,contractHours); printLineDebug("using max "+maxCases+" case, building single tree at a time"); long startBuild=System.currentTimeMillis(); while(timeUsed<contractHours && numTrees<minNumTrees){ int size=rand.nextInt(maxCases/2)+maxCases/2; buildTreeCaseSample(data,instancesOfClass,numTrees++,size); //Update time used long newTime=System.currentTimeMillis(); timeUsed=(newTime-startBuild)/(1000.0*60.0*60.0); //Checkpoint here } //Use up any time left here on randomised trees while(timeUsed<contractHours && numTrees<maxNumTrees){ int size=tm.estimateMaxCases(n, 1, estSingleTree,contractHours-timeUsed); buildTreeCaseSample(data,instancesOfClass,numTrees++,size); //Update time used long newTime=System.currentTimeMillis(); timeUsed=(newTime-startBuild)/(1000.0*60.0*60.0); //Checkpoint here printLineDebug("Built tree number "+numTrees+" in "+timeUsed+" hours "); } } } trainResults.setBuildTime(System.nanoTime()-startTime); trainResults.setParas(getParameters()); // printLineDebug("*************** Finished Contract RotF Build with " + numTrees + " Trees built in " + (System.nanoTime() - startTime) / 1000000000 + " Seconds ***************"); } double updateTreeTime(double estSingleTree,double obsTreeTime,double alpha,int numAtts,int m){ double t=(1-alpha)*estSingleTree; t+=alpha*(m/(double)numAtts)*obsTreeTime; if(t<0) return estSingleTree; return t; } private int[][] generateBags(int numBags, int bagProp, Instances data){ int[][] bags = new int[numBags][data.size()]; Random random = new Random(seed); for (int i = 0; i < numBags; i++) { for (int j = 0; j < data.size() * (bagProp/100.0); j++) { bags[i][random.nextInt(data.size())]++; } } return bags; } private void estimateOwnPerformance(Instances data) throws Exception { trainResults.setTimeUnit(TimeUnit.NANOSECONDS); trainResults.setEstimatorName(getClassifierName()); trainResults.setDatasetName(data.relationName()); trainResults.setFoldID(seed); //int numTrees = 200; int bagProp = 100; int treeCount = 0; Classifier[] classifiers = new Classifier[maxNumTrees]; int[] timesInTest = new int[data.size()]; double[][][] distributions = new double[maxNumTrees][data.size()][(int) data.numClasses()]; double[][] finalDistributions = new double[data.size()][(int) data.numClasses()]; int[][] bags; ArrayList[] testIndexs = new ArrayList[maxNumTrees]; double[] bagAccuracies = new double[maxNumTrees]; this.trainContractTimeNanos = (long) ((double) trainContractTimeNanos * perForBag); //Grimness starts here. rand = new Random(seed); //This is from the RotationForest: remove zero variance and normalise attributes. //Do this before loading from file, so we can perform checks of dataset? removeUseless = new RemoveUseless(); removeUseless.setInputFormat(data); data = Filter.useFilter(data, removeUseless); normalize = new Normalize(); normalize.setInputFormat(data); data = Filter.useFilter(data, normalize); int numClasses = data.numClasses(); bags = generateBags(maxNumTrees, bagProp, data); // Split the instances according to their class. // Does not handle regression for clarity /*Instances [] instancesOfClass; instancesOfClass = new Instances[numClasses]; for( int i = 0; i < instancesOfClass.length; i++ ) { instancesOfClass[ i ] = new Instances( data, 0 ); } for(Instance instance:data) { if( instance.classIsMissing() ) continue; //Ignore instances with missing class value else{ int c = (int)instance.classValue(); instancesOfClass[c].add( instance ); } }*/ int n = data.numInstances(); int m = data.numAttributes() - 1; double treeTime; //Re-estimate even if loading serialised, may be different hardware .... estSingleTree = tm.estimateSingleTreeHours(n, m); printLineDebug("n =" + n + " m = " + m + " estSingleTree = " + estSingleTree); printLineDebug("Contract time =" + contractHours + " hours "); int maxAtts = m; //CASE 1: think we can build the minimum number of trees with full data. if (contractHours == 0 || (estSingleTree * minNumTrees) < contractHours) { if (debug) System.out.println("Think we are able to build at least 50 trees"); boolean buildFullTree = true; int size; //Option to build in batches for smaller data, but not used at the moment int batchSize = 1;//setBatchSize(estSingleTree); //Set larger for smaller data // if(debug) // System.out.println("Batch size = "+batchSize); long startBuild = System.currentTimeMillis(); while ((contractHours == 0 || timeUsed < contractHours) && numTrees < maxNumTrees) { long singleTreeStartTime = System.currentTimeMillis(); Instances trainHeader = new Instances(data, 0); Instances testHeader = new Instances(data, 0); ArrayList<Integer> indexs = new ArrayList<>(); for (int j = 0; j < bags[numTrees].length; j++) { if (bags[numTrees][j] == 0) { testHeader.add(data.get(j)); timesInTest[j]++; indexs.add(j); } for (int k = 0; k < bags[numTrees][j]; k++) { trainHeader.add(data.get(j)); } } testIndexs[numTrees] = indexs; Instances[] instancesOfClass; instancesOfClass = new Instances[numClasses]; for (int i = 0; i < instancesOfClass.length; i++) { instancesOfClass[i] = new Instances(trainHeader, 0); } for (Instance instance : trainHeader) { if (instance.classIsMissing()) continue; //Ignore instances with missing class value else { int c = (int) instance.classValue(); instancesOfClass[c].add(instance); } } if (buildFullTree) size = trainHeader.size(); else { maxAtts = tm.estimateMaxAttributes(trainHeader.size(), minNumTrees - numTrees, estSingleTree, contractHours); size = rand.nextInt(maxAtts / 2) + maxAtts / 2; } if (batchSize + numTrees > maxNumTrees) batchSize = maxNumTrees - numTrees; for (int i = 0; i < batchSize; i++) buildTreeAttSample(trainHeader, instancesOfClass, numTrees++, m); //test testing(testHeader, distributions, numTrees, bagAccuracies, indexs); trainHeader.clear(); testHeader.clear(); //Update time used long newTime = System.currentTimeMillis(); timeUsed = (newTime - startBuild) / (1000.0 * 60.0 * 60.0); treeTime = (newTime - singleTreeStartTime) / (1000.0 * 60.0 * 60.0); // Update single tree estimate estSingleTree = updateTreeTime(estSingleTree, treeTime, alpha, size, m); //Taking much longer than we thought! if (contractHours > 0 && estSingleTree * minNumTrees > contractHours) buildFullTree = false; else buildFullTree = true; //Checkpoint here if (debug) System.out.println("Built tree number " + numTrees + " in " + timeUsed + " hours "); } } //CASE 2 and 3: dont think we can build min number of trees else { if (debug) System.out.println("Dont think we can build 50 trees in the time allowed "); //If m > n: SAMPLE ATTRIBUTES if (m > n) { //estimate maximum number of attributes allowed, x, to get minNumberOfTrees. maxAtts = m; long startBuild = System.currentTimeMillis(); while (timeUsed < contractHours && numTrees < minNumTrees) { Instances trainHeader = new Instances(data, 0); Instances testHeader = new Instances(data, 0); ArrayList<Integer> indexs = new ArrayList<>(); for (int j = 0; j < bags[numTrees].length; j++) { if (bags[numTrees][j] == 0) { testHeader.add(data.get(j)); timesInTest[j]++; indexs.add(j); } for (int k = 0; k < bags[numTrees][j]; k++) { trainHeader.add(data.get(j)); } } testIndexs[numTrees] = indexs; Instances[] instancesOfClass; instancesOfClass = new Instances[numClasses]; for (int i = 0; i < instancesOfClass.length; i++) { instancesOfClass[i] = new Instances(trainHeader, 0); } for (Instance instance : trainHeader) { if (instance.classIsMissing()) continue; //Ignore instances with missing class value else { int c = (int) instance.classValue(); instancesOfClass[c].add(instance); } } maxAtts = tm.estimateMaxAttributes(trainHeader.size(), minNumTrees - numTrees, estSingleTree, contractHours); int size = rand.nextInt(maxAtts / 2) + maxAtts / 2; if (debug) { System.out.print("Max estimated attributes =" + maxAtts); System.out.println(" using " + size + " attributes, building single tree at a time. Total time used =" + timeUsed); } long sTime = System.currentTimeMillis(); buildTreeAttSample(trainHeader, instancesOfClass, numTrees++, size); //test testing(testHeader, distributions, numTrees, bagAccuracies, indexs); trainHeader.clear(); testHeader.clear(); //Update time used long newTime = System.currentTimeMillis(); timeUsed = (newTime - startBuild) / (1000.0 * 60.0 * 60.0); treeTime = (newTime - sTime) / (1000.0 * 60.0 * 60.0); estSingleTree = updateTreeTime(estSingleTree, treeTime, alpha, size, m); // (1-alpha)*estSingleTree+alpha*treeTime; if (debug) System.out.println(" actual time used =" + timeUsed + " new est single tree = " + estSingleTree); //Checkpoint here } //Use up any time left here on randomised trees while (timeUsed < contractHours && numTrees < maxNumTrees) { Instances trainHeader = new Instances(data, 0); Instances testHeader = new Instances(data, 0); ArrayList<Integer> indexs = new ArrayList<>(); for (int j = 0; j < bags[numTrees].length; j++) { if (bags[numTrees][j] == 0) { testHeader.add(data.get(j)); timesInTest[j]++; indexs.add(j); } for (int k = 0; k < bags[numTrees][j]; k++) { trainHeader.add(data.get(j)); } } testIndexs[numTrees] = indexs; Instances[] instancesOfClass; instancesOfClass = new Instances[numClasses]; for (int i = 0; i < instancesOfClass.length; i++) { instancesOfClass[i] = new Instances(trainHeader, 0); } for (Instance instance : trainHeader) { if (instance.classIsMissing()) continue; //Ignore instances with missing class value else { int c = (int) instance.classValue(); instancesOfClass[c].add(instance); } } int size = tm.estimateMaxAttributes(trainHeader.size(), 1, estSingleTree, contractHours - timeUsed); // if(estSingleTree<timeUsed-contractHours || size>m)//Build a whole treee // size=m; maxAtts *= 2; if (maxAtts > size) maxAtts = size; if (debug) System.out.println("OVERTIME: using " + size + " attributes, building single tree at a time. Time used -" + timeUsed); buildTreeAttSample(trainHeader, instancesOfClass, numTrees++, maxAtts); //test testing(testHeader, distributions, numTrees, bagAccuracies, indexs); trainHeader.clear(); testHeader.clear(); //Update time used long newTime = System.currentTimeMillis(); timeUsed = (newTime - startBuild) / (1000.0 * 60.0 * 60.0); //Checkpoint here if (debug) System.out.println("Built tree number " + numTrees + " in " + timeUsed + " hours "); } } else { //n>m //estimate maximum number of cases we can use int maxCases = tm.estimateMaxCases(n, minNumTrees, estSingleTree, contractHours); if (debug) System.out.println("using max " + maxCases + " case, building single tree at a time"); long startBuild = System.currentTimeMillis(); while (timeUsed < contractHours && numTrees < minNumTrees) { Instances trainHeader = new Instances(data, 0); Instances testHeader = new Instances(data, 0); ArrayList<Integer> indexs = new ArrayList<>(); for (int j = 0; j < bags[numTrees].length; j++) { if (bags[numTrees][j] == 0) { testHeader.add(data.get(j)); timesInTest[j]++; indexs.add(j); } for (int k = 0; k < bags[numTrees][j]; k++) { trainHeader.add(data.get(j)); } } testIndexs[numTrees] = indexs; Instances[] instancesOfClass; instancesOfClass = new Instances[numClasses]; for (int i = 0; i < instancesOfClass.length; i++) { instancesOfClass[i] = new Instances(trainHeader, 0); } for (Instance instance : trainHeader) { if (instance.classIsMissing()) continue; //Ignore instances with missing class value else { int c = (int) instance.classValue(); instancesOfClass[c].add(instance); } } int size = rand.nextInt(maxCases / 2) + maxCases / 2; buildTreeCaseSample(trainHeader, instancesOfClass, numTrees++, size); //test testing(testHeader, distributions, numTrees, bagAccuracies, indexs); trainHeader.clear(); testHeader.clear(); //Update time used long newTime = System.currentTimeMillis(); timeUsed = (newTime - startBuild) / (1000.0 * 60.0 * 60.0); //Checkpoint here } //Use up any time left here on randomised trees while (timeUsed < contractHours && numTrees < maxNumTrees) { Instances trainHeader = new Instances(data, 0); Instances testHeader = new Instances(data, 0); ArrayList<Integer> indexs = new ArrayList<>(); for (int j = 0; j < bags[numTrees].length; j++) { if (bags[numTrees][j] == 0) { testHeader.add(data.get(j)); timesInTest[j]++; indexs.add(j); } for (int k = 0; k < bags[numTrees][j]; k++) { trainHeader.add(data.get(j)); } } testIndexs[numTrees] = indexs; Instances[] instancesOfClass; instancesOfClass = new Instances[numClasses]; for (int i = 0; i < instancesOfClass.length; i++) { instancesOfClass[i] = new Instances(trainHeader, 0); } for (Instance instance : trainHeader) { if (instance.classIsMissing()) continue; //Ignore instances with missing class value else { int c = (int) instance.classValue(); instancesOfClass[c].add(instance); } } int size = tm.estimateMaxCases(n, 1, estSingleTree, contractHours - timeUsed); buildTreeCaseSample(trainHeader, instancesOfClass, numTrees++, size); //test testing(testHeader, distributions, numTrees, bagAccuracies, indexs); trainHeader.clear(); testHeader.clear(); //Update time used long newTime = System.currentTimeMillis(); timeUsed = (newTime - startBuild) / (1000.0 * 60.0 * 60.0); //Checkpoint here if (debug) System.out.println("Built tree number " + numTrees + " in " + timeUsed + " hours "); } } } for (int i = 0; i < bags.length; i++) { for (int j = 0; j < bags[i].length; j++) { if (bags[i][j] == 0) { for (int k = 0; k < finalDistributions[j].length; k++) { finalDistributions[j][k] += distributions[i][j][k]; } } } } for (int i = 0; i < finalDistributions.length; i++) { if (timesInTest[i] > 1) { for (int j = 0; j < finalDistributions[i].length; j++) { finalDistributions[i][j] /= timesInTest[i]; } } } //Add to trainResults. double acc = 0.0; for (int i = 0; i < finalDistributions.length; i++) { double predClass = findIndexOfMax(finalDistributions[i], rand); trainResults.addPrediction(data.get(i).classValue(), finalDistributions[i], predClass, 0, ""); } } private void testing (Instances testHeader, double[][][] distributions, int treeCount, double[] bagAccuracies, ArrayList<Integer> indexs) throws Exception { treeCount -= 1; for (int j = 0; j < testHeader.size(); j++) { Instance test = convertInstance(testHeader.get(j), treeCount); try { distributions[treeCount][indexs.get(j)] = classifiers.get(treeCount).distributionForInstance(test); if (classifiers.get(treeCount).classifyInstance(test) == testHeader.get(j).classValue()) { bagAccuracies[treeCount]++; } } catch (Exception e) { e.printStackTrace(); } } bagAccuracies[treeCount] /= testHeader.size(); } /** Build a rotation forest tree on a random subsample of the attributes * * @param data * @param instancesOfClass * @param i * @param numAtts * @throws Exception */ public void buildTreeAttSample(Instances data, Instances [] instancesOfClass,int i, int numAtts) throws Exception{ int[][] g=generateGroupFromSize(data, rand,numAtts); Filter[] projection=Filter.makeCopies(projectionFilter, g.length ); projectionFilters.add(projection); groups.add(g); Instances[] reducedHeaders = new Instances[ g.length ]; this.reducedHeaders.add(reducedHeaders); ArrayList<Attribute> transformedAttributes = new ArrayList<>( data.numAttributes() ); // Construction of the dataset for each group of attributes for( int j = 0; j < g.length; j++ ) { ArrayList<Attribute> fv = new ArrayList<>( g[j].length + 1 ); for( int k = 0; k < g[j].length; k++ ) { String newName = data.attribute( g[j][k] ).name() + "_" + k; fv.add(data.attribute( g[j][k] ).copy(newName) ); } fv.add( (Attribute)data.classAttribute( ).copy() ); Instances dataSubSet = new Instances( "rotated-" + i + "-" + j + "-", fv, 0); dataSubSet.setClassIndex( dataSubSet.numAttributes() - 1 ); // Select instances for the dataset reducedHeaders[j] = new Instances( dataSubSet, 0 ); boolean [] selectedClasses = selectClasses( instancesOfClass.length, rand ); for( int c = 0; c < selectedClasses.length; c++ ) { if( !selectedClasses[c] ) continue; for(Instance instance:instancesOfClass[c]) { Instance newInstance = new DenseInstance(dataSubSet.numAttributes()); newInstance.setDataset( dataSubSet ); for( int k = 0; k < g[j].length; k++ ) { newInstance.setValue( k, instance.value( g[j][k] ) ); } newInstance.setClassValue( instance.classValue( ) ); dataSubSet.add( newInstance ); } } dataSubSet.randomize(rand); // Remove a percentage of the instances Instances originalDataSubSet = dataSubSet; dataSubSet.randomize(rand); RemovePercentage rp = new RemovePercentage(); rp.setPercentage(removedPercentage ); rp.setInputFormat( dataSubSet ); dataSubSet = Filter.useFilter( dataSubSet, rp ); if( dataSubSet.numInstances() < 2 ) { dataSubSet = originalDataSubSet; } // Project the data projection[j].setInputFormat( dataSubSet ); Instances projectedData = null; do { try { projectedData = Filter.useFilter( dataSubSet, projection[j] ); } catch ( Exception e ) { // The data could not be projected, we add some random instances addRandomInstances( dataSubSet, 10, rand ); } } while( projectedData == null ); // Include the projected attributes in the attributes of the // transformed dataset for( int a = 0; a < projectedData.numAttributes() - 1; a++ ) { String newName = projectedData.attribute(a).name() + "_" + j; transformedAttributes.add( projectedData.attribute(a).copy(newName)); } } transformedAttributes.add((Attribute)data.classAttribute().copy() ); Instances buildClas = new Instances( "rotated-" + i + "-", transformedAttributes, 0 ); buildClas.setClassIndex( buildClas.numAttributes() - 1 ); headers.add(new Instances( buildClas, 0 )); // Project all the training data for(Instance instance:data) { Instance newInstance = convertInstance( instance, i ); buildClas.add( newInstance ); } Classifier c= AbstractClassifier.makeCopy(baseClassifier); // Build the base classifier if (c instanceof Randomizable) { ((Randomizable) c).setSeed(rand.nextInt()); } c.buildClassifier( buildClas ); classifiers.add(c); } /** Build a rotation forest tree on a random subsample of the instances * * @param data * @param instancesOfClass * @param i * @param numCases * @throws Exception */ public void buildTreeCaseSample(Instances data, Instances [] instancesOfClass,int i, int numCases) throws Exception{ int[][] g=generateGroupFromSize(data, rand,data.numAttributes()-1); Filter[] projection=Filter.makeCopies(projectionFilter, g.length ); projectionFilters.add(projection); groups.add(g); Instances[] reducedHeaders = new Instances[ g.length ]; this.reducedHeaders.add(reducedHeaders); data=new Instances(data); int m=data.numInstances(); for(int k=0;k<m-numCases;k++) data.remove(rand.nextInt(data.numInstances())); ArrayList<Attribute> transformedAttributes = new ArrayList<>( data.numAttributes() ); // Construction of the dataset for each group of attributes for( int j = 0; j < g.length; j++ ) { ArrayList<Attribute> fv = new ArrayList<>( g[j].length + 1 ); for( int k = 0; k < g[j].length; k++ ) { String newName = data.attribute( g[j][k] ).name() + "_" + k; fv.add( data.attribute( g[j][k] ).copy(newName) ); } fv.add((Attribute)data.classAttribute( ).copy() ); Instances dataSubSet = new Instances( "rotated-" + i + "-" + j + "-", fv, 0); dataSubSet.setClassIndex( dataSubSet.numAttributes() - 1 ); // Select instances for the dataset reducedHeaders[j] = new Instances( dataSubSet, 0 ); boolean [] selectedClasses = selectClasses( instancesOfClass.length, rand ); for( int c = 0; c < selectedClasses.length; c++ ) { if( !selectedClasses[c] ) continue; for(Instance instance:instancesOfClass[c]) { Instance newInstance = new DenseInstance(dataSubSet.numAttributes()); newInstance.setDataset( dataSubSet ); for( int k = 0; k < g[j].length; k++ ) { newInstance.setValue( k, instance.value( g[j][k] ) ); } newInstance.setClassValue( instance.classValue( ) ); dataSubSet.add( newInstance ); } } dataSubSet.randomize(rand); // Remove a percentage of the instances Instances originalDataSubSet = dataSubSet; dataSubSet.randomize(rand); RemovePercentage rp = new RemovePercentage(); rp.setPercentage(removedPercentage ); rp.setInputFormat( dataSubSet ); dataSubSet = Filter.useFilter( dataSubSet, rp ); if( dataSubSet.numInstances() < 2 ) { dataSubSet = originalDataSubSet; } // Project the data projection[j].setInputFormat( dataSubSet ); Instances projectedData = null; do { try { projectedData = Filter.useFilter( dataSubSet, projection[j] ); } catch ( Exception e ) { // The data could not be projected, we add some random instances addRandomInstances( dataSubSet, 10, rand ); } } while( projectedData == null ); // Include the projected attributes in the attributes of the // transformed dataset for( int a = 0; a < projectedData.numAttributes() - 1; a++ ) { String newName = projectedData.attribute(a).name() + "_" + j; transformedAttributes.add( projectedData.attribute(a).copy(newName)); } } transformedAttributes.add((Attribute)data.classAttribute().copy() ); Instances buildClas = new Instances( "rotated-" + i + "-", transformedAttributes, 0 ); buildClas.setClassIndex( buildClas.numAttributes() - 1 ); headers.add(new Instances( buildClas, 0 )); // Project all the training data for(Instance instance:data) { Instance newInstance = convertInstance( instance, i ); buildClas.add( newInstance ); } Classifier c= AbstractClassifier.makeCopy(baseClassifier); // Build the base classifier if (c instanceof Randomizable) { ((Randomizable) c).setSeed(rand.nextInt()); } c.buildClassifier( buildClas ); classifiers.add(c); } private int setBatchSize(double singleTreeHours){ if(singleTreeHours> CHECKPOINTINTERVAL) return 1; int hrs=(int)(CHECKPOINTINTERVAL/singleTreeHours); return hrs; } /** * Adds random instances to the dataset. * * @param dataset the dataset * @param numInstances the number of instances * @param random a random number generator */ protected void addRandomInstances( Instances dataset, int numInstances, Random random ) { int n = dataset.numAttributes(); double [] v = new double[ n ]; for( int i = 0; i < numInstances; i++ ) { for( int j = 0; j < n; j++ ) { Attribute att = dataset.attribute( j ); if( att.isNumeric() ) { v[ j ] = random.nextDouble(); } else if ( att.isNominal() ) { v[ j ] = random.nextInt( att.numValues() ); } } dataset.add( new DenseInstance( 1, v ) ); } } /** * Checks minGroup and maxGroup * * @param data the dataset */ protected void checkMinMax(Instances data) { if( minGroup > maxGroup ) { int tmp = maxGroup; maxGroup = minGroup; minGroup = tmp; } int n = data.numAttributes(); if( maxGroup >= n ) maxGroup = n - 1; if( minGroup >= n ) minGroup = n - 1; } /** * Selects a non-empty subset of the classes * * @param numClasses the number of classes * @param random the random number generator. * @return a random subset of classes */ protected boolean [] selectClasses( int numClasses, Random random ) { int numSelected = 0; boolean selected[] = new boolean[ numClasses ]; for( int i = 0; i < selected.length; i++ ) { if(random.nextBoolean()) { selected[i] = true; numSelected++; } } if( numSelected == 0 ) { selected[random.nextInt( selected.length )] = true; } return selected; } /** * generates the groups of attributes, given their minimum and maximum * sizes. * * @param data the training data to be used for generating the * groups. * @param random the random number generator. */ protected int[][] generateGroupFromSize(Instances data, Random random, int maxAtts) { int[][] groups; int [] permutation = attributesPermutation(data.numAttributes(), data.classIndex(), random, maxAtts); // The number of groups that have a given size int [] numGroupsOfSize = new int[maxGroup - minGroup + 1]; int numAttributes = 0; int numGroups; // Select the size of each group for( numGroups = 0; numAttributes < permutation.length; numGroups++ ) { int n = random.nextInt( numGroupsOfSize.length ); numGroupsOfSize[n]++; numAttributes += minGroup + n; } groups = new int[numGroups][]; int currentAttribute = 0; int currentSize = 0; for( int j = 0; j < numGroups; j++ ) { while( numGroupsOfSize[ currentSize ] == 0 ) currentSize++; numGroupsOfSize[ currentSize ]--; int n = minGroup + currentSize; groups[j] = new int[n]; for( int k = 0; k < n; k++ ) { if( currentAttribute < permutation.length ) groups[j][k] = permutation[ currentAttribute ]; else // For the last group, it can be necessary to reuse some attributes groups[j][k] = permutation[ random.nextInt( permutation.length ) ]; currentAttribute++; } } return groups; } final protected int [] attributesPermutation(int numAttributes, int classAttribute, Random random, int maxNumAttributes) { int [] permutation = new int[numAttributes-1]; int i = 0; //This just ignores the class attribute for(; i < classAttribute; i++){ permutation[i] = i; } for(; i < permutation.length; i++){ permutation[i] = i + 1; } permute( permutation, random ); if(numAttributes>maxNumAttributes){ //TRUNCTATE THE PERMATION TO CONSIDER maxNumAttributes. // we could do this more efficiently, but this is the simplest way. int[] temp = new int[maxNumAttributes]; System.arraycopy(permutation, 0, temp, 0, maxNumAttributes); permutation=temp; } return permutation; } /** * permutes the elements of a given array. * * @param v the array to permute * @param random the random number generator. */ protected void permute( int v[], Random random ) { for(int i = v.length - 1; i > 0; i-- ) { int j = random.nextInt( i + 1 ); if( i != j ) { int tmp = v[i]; v[i] = v[j]; v[j] = tmp; } } } /** * prints the groups. */ protected void printGroups( ) { for( int i = 0; i < groups.size(); i++ ) { for( int j = 0; j < groups.get(i).length; j++ ) { System.err.print( "( " ); for( int k = 0; k < groups.get(i)[j].length; k++ ) { System.err.print(groups.get(i)[j][k] ); System.err.print( " " ); } System.err.print( ") " ); } System.err.println( ); } } /** * Transforms an instance for the i-th classifier. * * @param instance the instance to be transformed * @param i the base classifier number * @return the transformed instance * @throws Exception if the instance can't be converted successfully */ protected Instance convertInstance( Instance instance, int i ) throws Exception { Instance newInstance = new DenseInstance( headers.get(i).numAttributes( ) ); newInstance.setWeight(instance.weight()); newInstance.setDataset(headers.get(i)); int currentAttribute = 0; // Project the data for each group int[][] g=groups.get(i); for( int j = 0; j < g.length; j++ ) { Instance auxInstance = new DenseInstance(g[j].length + 1 ); int k; for( k = 0; k < g[j].length; k++ ) { auxInstance.setValue( k, instance.value( g[j][k] ) ); } auxInstance.setValue( k, instance.classValue( ) ); auxInstance.setDataset(reducedHeaders.get(i)[ j ] ); Filter[] projection=projectionFilters.get(i); projection[j].input( auxInstance ); auxInstance = projection[j].output( ); projection[j].batchFinished(); for( int a = 0; a < auxInstance.numAttributes() - 1; a++ ) { newInstance.setValue( currentAttribute++, auxInstance.value( a ) ); } } newInstance.setClassValue( instance.classValue() ); return newInstance; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { removeUseless.input(instance); instance =removeUseless.output(); removeUseless.batchFinished(); normalize.input(instance); instance =normalize.output(); normalize.batchFinished(); double [] sums = new double [instance.numClasses()], newProbs; for (int i = 0; i < classifiers.size(); i++) { Instance convertedInstance = convertInstance(instance, i); if (instance.classAttribute().isNumeric() == true) { sums[0] += classifiers.get(i).classifyInstance(convertedInstance); } else { newProbs = classifiers.get(i).distributionForInstance(convertedInstance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } } if (instance.classAttribute().isNumeric() == true) { sums[0] /= (double)classifiers.size(); return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } @Override public String getParameters() { String result="BuildTime,"+trainResults.getBuildTime()+",RemovePercent,"+this.getRemovedPercentage()+",NumFeatures,"+this.getMaxGroup(); result+=",numTrees,"+numTrees; return result; } @Override //Checkpointable public boolean setCheckpointPath(String path) { boolean validPath=Checkpointable.super.createDirectories(path); if(validPath){ checkpointPath = path; checkpoint = true; } return validPath; } @Override public void copyFromSerObject(Object obj) throws Exception { if(!(obj instanceof ContractRotationForest)) throw new Exception("The SER file is not an instance of ContractRotationForest"); //To change body of generated methods, choose Tools | Templates. ContractRotationForest saved= ((ContractRotationForest)obj); //Copy RotationForest attributes baseClassifier=saved.baseClassifier; classifiers=saved.classifiers; minGroup = saved.minGroup; maxGroup = saved.maxGroup; removedPercentage = saved.removedPercentage; groups = saved.groups; projectionFilter = saved.projectionFilter; projectionFilters = saved.projectionFilters; headers = saved.headers; reducedHeaders = saved.reducedHeaders; removeUseless = saved.removeUseless; normalize = saved.normalize; //Copy ContractRotationForest attributes. Not su this.contractHours=saved.contractHours; trainResults=saved.trainResults; minNumTrees=saved.minNumTrees; maxNumTrees=saved.maxNumTrees; maxNumAttributes=saved.maxNumAttributes; checkpointPath=saved.checkpointPath; debug=saved.debug; tm=saved.tm; timeUsed=saved.timeUsed; numTrees=saved.numTrees; } /** * abstract methods from TrainTimeContractable interface * @param amount */ @Override public void setTrainTimeLimit(long amount) { printLineDebug(" Setting ContractRotationForest contract to be "+amount); if(amount>0) { trainContractTimeNanos = amount; trainTimeContract = true; contractHours=trainContractTimeNanos/1000000000/60.0/60.0; } else trainTimeContract = false; } @Override public boolean withinTrainContract(long start) { return start<trainContractTimeNanos; } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) throws Exception { ContractRotationForest cf =new ContractRotationForest(); Class cls=cf.getClass(); System.out.println("Class canonical name ="+cls.getCanonicalName()+" class simple name "+cls.getSimpleName()+" class full name ="+cls.getName()); String path="C:/temp/ItalyPowerDemandContractRotationForest.ser"; FileInputStream fis = new FileInputStream(path); ObjectInputStream in = new ObjectInputStream(fis); Object crf =in.readObject(); in.close(); TimingModel tm=cf.new TimingModel(); } private class TimingModel implements Serializable{ double b0,b1,b2,b3,b4; double predictionInterval=3.67; double baseNumberOfTrees=200; //Time taken to do a standard operation on the model build computer static final double BASEFACTOR=1; double normalisingFactor; public TimingModel(){ //Default model an+bm+cmn b0=0.679693678; b1=0.000132076; //n b2=0.000245885;//m b3=1.23057E-06;//mn normalisingFactor=normalise(); } double estimateSingleTreeHours(int n, int m){ //Estimate time double t=b0+b1*n+b2*m+b3*n*m+predictionInterval; t*=normalisingFactor/BASEFACTOR; t/=baseNumberOfTrees; //Normalise for this computer return t; //This is a fraction of an hour! so .1 ==6 minutes } final double normalise(){ return 1.0; } //estimate of the number of possible attributes to build numTrees given a contract time int estimateMaxAttributes(int m, int numTrees, double singleTreeTime, double contractTime){ double estM=(m*contractTime)/(numTrees*singleTreeTime); if(estM<3) estM=3; else if(estM>m) estM=m; return (int)(estM); } int estimateMaxCases(int n, int numTrees, double singleTreeTime, double contractTime){ double estN=(n*contractTime)/(numTrees*singleTreeTime); if(estN<3) estN=3; else if(estN>n) estN=n; return (int)(estN); } } }
60,237
38.998672
188
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/EnhancedRotationForest.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ /* * Restructure of ContractRotationForest.java to making bagging easier. An adaptation of Rotation Forest, 2008 Juan Jose Rodriguez * Contract Version by @author Tony Bagnall, Michael Flynn, first implemented 2018, updated 2019 (checkpointable) * and 2020 (conform to structure) * * * We have cloned the code from RotationForest rather than extend it because core changes occur in most methods, and * to decouple from Weka, which has removed random forest from the latest releases. * */ package machine_learning.classifiers.ensembles; import evaluation.evaluators.CrossValidationEvaluator; import tsml.classifiers.Checkpointable; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.classifiers.TrainTimeContractable; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.*; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.PrincipalComponents; import weka.filters.unsupervised.attribute.RemoveUseless; import weka.filters.unsupervised.instance.RemovePercentage; import java.io.File; import java.io.Serializable; import java.util.ArrayList; import java.util.Random; import java.util.concurrent.TimeUnit; public class EnhancedRotationForest extends EnhancedAbstractClassifier implements TrainTimeContractable, Checkpointable, Serializable{ Classifier baseClassifier; ArrayList<Classifier> classifiers; /** for serialization */ static final long serialVersionUID = -3255631880798499936L; /** The minimum size of a group */ protected int minGroup = 3; /** The maximum size of a group */ protected int maxGroup = 3; /** The percentage of instances to be removed */ protected int removedPercentage = 50; /** The attributes of each group */ protected double probPerClass =0.5; ArrayList< int[][]> groups; /** The type of projection filter */ protected Filter projectionFilter; /** The projection filters */ protected ArrayList<Filter []> projectionFilters; /** Headers of the transformed dataset */ protected ArrayList<Instances> headers; /** Headers of the reduced datasets */ protected ArrayList<Instances []> reducedHeaders; /** Filter that remove useless attributes */ protected RemoveUseless removeUseless = null; /** Filter that normalized the attributes */ protected Normalize normalize = null; private boolean trainTimeContract = false; transient private long trainContractTimeNanos =0; transient private long trainEstimateContractTimeNanos =0; //Added features private double estSingleTree; //Stores the actual number of trees after the build, may vary with contract private int numTrees=0; private int minNumTrees=200; private int maxNumTrees=200; int maxNumAttributes; String checkpointPath=null; boolean checkpoint=false; double timeUsed; Instances trainData; /** Flags and data required if Bagging **/ private boolean bagging = false; private int[] oobCounts; private double[][] trainDistributions; /** data information **/ private int seriesLength; private int numInstances; /** * Constructor. */ public EnhancedRotationForest() { super(CAN_ESTIMATE_OWN_PERFORMANCE); trainEstimateMethod = TrainEstimateMethod.OOB; baseClassifier = new weka.classifiers.trees.J48(); projectionFilter = defaultFilter(); checkpointPath=null; timeUsed=0; } /** * Default projection method. */ protected Filter defaultFilter() { PrincipalComponents filter = new PrincipalComponents(); //filter.setNormalize(false); filter.setVarianceCovered(1.0); return filter; } public boolean isContracted(){ return trainTimeContract;} /** * Sets the minimum size of a group. * * @param minGroup the minimum value. * of attributes. */ public void setMinGroup( int minGroup ) throws IllegalArgumentException { if( minGroup <= 0 ) throw new IllegalArgumentException( "MinGroup has to be positive." ); this.minGroup = minGroup; } /** * Gets the minimum size of a group. * * @return the minimum value. */ public int getMinGroup() { return minGroup; } public void setMaxNumTrees(int t) throws IllegalArgumentException { if( t <= 0 ) throw new IllegalArgumentException( "maxNumTrees has to be positive." ); maxNumTrees=t; } public void setMinNumTrees(int t) throws IllegalArgumentException { if( t <= 0 ) throw new IllegalArgumentException( "minNumTrees has to be positive." ); minNumTrees=t; } /** * Sets the maximum size of a group. * * @param maxGroup the maximum value. * of attributes. */ public void setMaxGroup( int maxGroup ) throws IllegalArgumentException { if( maxGroup <= 0 ) throw new IllegalArgumentException( "MaxGroup has to be positive." ); this.maxGroup = maxGroup; } /** * Gets the maximum size of a group. * * @return the maximum value. */ public int getMaxGroup() { return maxGroup; } /** * Sets the percentage of instance to be removed * * @param removedPercentage the percentage. */ public void setRemovedPercentage( int removedPercentage ) throws IllegalArgumentException { if( removedPercentage < 0 ) throw new IllegalArgumentException( "RemovedPercentage has to be >=0." ); if( removedPercentage >= 100 ) throw new IllegalArgumentException( "RemovedPercentage has to be <100." ); this.removedPercentage = removedPercentage; } public void setProbabilityClassSelection( double p ) throws IllegalArgumentException { if( p <= 0 ) throw new IllegalArgumentException( "Probability of class selection has to be >0." ); if( p > 1 ) throw new IllegalArgumentException( "Probability of class selection has to be <=1." ); this.probPerClass = p; } /** * Gets the percentage of instances to be removed * * @return the percentage. */ public int getRemovedPercentage() { return removedPercentage; } /** * Sets the filter used to project the data. * * @param projectionFilter the filter. */ public void setProjectionFilter( Filter projectionFilter ) { this.projectionFilter = projectionFilter; } /** * Gets the filter used to project the data. * * @return the filter. */ public Filter getProjectionFilter() { return projectionFilter; } /** * Gets the filter specification string, which contains the class name of * the filter and any options to the filter * * @return the filter string. */ /* Taken from FilteredClassifier */ protected String getProjectionFilterSpec() { Filter c = getProjectionFilter(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } @Override public String toString() { return "toString not implemented for ContractRotationForest"; } /** * builds the classifier. * * @param data the training data to be used for generating the * classifier. * @throws Exception if the classifier could not be built successfully */ @Override public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? These default capabilities // only allow real valued series and classification. To be adjusted getCapabilities().testWithFail(data); long startTime=System.nanoTime(); //Set up the results file super.buildClassifier(data); data = new Instances( data ); trainData = data; File file = new File(checkpointPath + "RotF" + seed + ".ser"); //if checkpointing and serialised files exist load said files if (checkpoint && file.exists()){ //Configure from file printLineDebug("Loading from checkpoint file"); loadFromFile(checkpointPath + "RotF" + seed + ".ser"); // checkpointTimeElapsed -= System.nanoTime()-t1; } else{ //Initialise if (baseClassifier == null) { throw new Exception("A base classifier has not been specified!"); }//Initialise: groups=new ArrayList<>(); // These arrays keep the information of the transformed data set headers =new ArrayList<>(); //Store the PCA transforms projectionFilters =new ArrayList<>(); reducedHeaders = new ArrayList<>(); classifiers=new ArrayList<>(); numTrees = 0; } rand = new Random(seed); //This is from the RotationForest: remove zero variance and normalise attributes. //Do this before loading from file, so we can perform checks of dataset? removeUseless = new RemoveUseless(); removeUseless.setInputFormat(data); data = Filter.useFilter(data, removeUseless); normalize = new Normalize(); normalize.setInputFormat(data); data = Filter.useFilter(data, normalize); seriesLength = data.numAttributes()-1; numInstances = data.numInstances(); numClasses = data.numClasses(); //Set up for Bagging if required if(bagging && getEstimateOwnPerformance()) { trainDistributions = new double[numInstances][numClasses]; oobCounts = new int[numInstances]; } //Can do this just once if not bagging Instances [] instancesOfClass; instancesOfClass = new Instances[numClasses]; for( int i = 0; i < instancesOfClass.length; i++ ) { instancesOfClass[i] = new Instances( data, 0 ); } for(Instance instance:data) { if( instance.classIsMissing() ) continue; //Ignore instances with missing class value else{ int c = (int)instance.classValue(); instancesOfClass[c].add( instance ); } } if(isContracted()&& getEstimateOwnPerformance() && !bagging){ //Split the contract to train and estimate time //Split in half if OOB switch(trainEstimateMethod){ case NONE: case TRAIN: //do nothing, the overhead is none or minimal break; case OOB: //Split in half trainContractTimeNanos /=2; printLineDebug(" TRAIN TIME SPLIT IN HALF FOR ESTIMATION In EnhancedRotationForest = "+trainContractTimeNanos/1000000000+" secs "); trainEstimateContractTimeNanos = trainContractTimeNanos; break; case CV: //Split 1/4 full and 3/4 Train trainEstimateContractTimeNanos = 3*trainContractTimeNanos/4; trainContractTimeNanos /=4; break; } } long singleTreeTime; long currentTime=System.nanoTime()-startTime; do{//Always build at least one tree //Formed bag data set if bagging singleTreeTime=System.nanoTime(); Instances trainD=data; boolean[] inBag=null; if(bagging){ //Resample data with replacement // long t1 = System.nanoTime(); inBag = new boolean[data.numInstances()]; trainD = data.resampleWithWeights(rand, inBag); instancesOfClass = new Instances[numClasses]; for( int i = 0; i < instancesOfClass.length; i++ ) { instancesOfClass[i] = new Instances( trainD, 0 ); } for(Instance instance:trainD) { int c = (int)instance.classValue(); instancesOfClass[c].add( instance ); } } //TO DO: Alter the num attributes or cases for very big data int numAtts=trainD.numAttributes()-1; printLineDebug(" Building tree "+(numTrees+1)+" with "+numAtts+" attributes current total build time = "+currentTime/1000000000+" seconds contract time = "+trainContractTimeNanos/1000000000); Classifier c= buildTree(trainD,instancesOfClass,numTrees, numAtts); classifiers.add(c); if(bagging) { // Get bagged distributions for(int i=0;i<data.numInstances();i++){ if(!inBag[i]){ oobCounts[i]++; try { Instance convertedInstance = convertInstance(data.instance(i), numTrees); double[] dist = c.distributionForInstance(convertedInstance); for(int j=0;j<dist.length;j++) trainDistributions[i][j]+=dist[j]; }catch(Exception e){ System.out.println(" Exception thrown for instance "+i+" using the following tree = "+c); // System.out.println(" Train data = "+trainData+ " original data = "+data); System.out.println(" Instance it crashes on ="+data.instance(i)); for(int k=i+1;k<data.numInstances();k++){ System.out.println(" instance k "+k+" prediction = "+c.classifyInstance(data.instance(k))); } System.exit(1); } } } } numTrees++; //If the first one takes too long, adjust length parameter //Not used yet long endTreeTime=System.nanoTime(); singleTreeTime=endTreeTime-singleTreeTime; currentTime=System.nanoTime()-startTime; }while((!trainTimeContract || withinTrainContract(currentTime)) && classifiers.size() < minNumTrees); //Build the classifier trainResults.setBuildTime(System.nanoTime()-startTime); trainResults.setParas(getParameters()); printLineDebug(" Finished train build "); if (getEstimateOwnPerformance()) { printLineDebug(" Estimating own performance with contract estimate train build = "+trainEstimateContractTimeNanos/1000000001); long est1 = System.nanoTime(); estimateOwnPerformance(data); long est2 = System.nanoTime(); if (bagging) trainResults.setErrorEstimateTime(est2 - est1 + trainResults.getErrorEstimateTime()); else trainResults.setErrorEstimateTime(est2 - est1); trainResults.setBuildPlusEstimateTime(trainResults.getBuildTime() + trainResults.getErrorEstimateTime()); } trainResults.setTimeUnit(TimeUnit.NANOSECONDS); trainResults.setParas(getParameters()); // printLineDebug("*************** Finished Enhanced RotF Build with " + numTrees + " Trees built in " + (System.nanoTime() - startTime) / 1000000000 + " Seconds ***************"); } private void estimateOwnPerformance(Instances data) throws Exception { if(trainDistributions==null)//This can happen if option is NONE or TRAIN trainDistributions = new double[numInstances][numClasses]; if (bagging) { // Bagging used to build the final model, so use bag data, counts normalised to probabilities printLineDebug(" Ussing OOB estimates from a model already constructed"); double[] preds = new double[data.numInstances()]; double[] actuals = new double[data.numInstances()]; long[] predTimes = new long[data.numInstances()];//Dummy variable, need something for (int j = 0; j < data.numInstances(); j++) { long predTime = System.nanoTime(); if (oobCounts[j] > 0) for (int k = 0; k < trainDistributions[j].length; k++) trainDistributions[j][k] /= oobCounts[j]; preds[j] = findIndexOfMax(trainDistributions[j], rand); actuals[j] = data.instance(j).classValue(); predTimes[j] = System.nanoTime() - predTime; } trainResults.addAllPredictions(actuals, preds, trainDistributions, predTimes, null); trainResults.setEstimatorName("RotFBagging"); trainResults.setDatasetName(data.relationName()); trainResults.setSplit("train"); trainResults.setFoldID(seed); trainResults.finaliseResults(actuals); trainResults.setErrorEstimateMethod("OOB"); } else if(trainEstimateMethod == TrainEstimateMethod.TRAIN|| trainEstimateMethod == TrainEstimateMethod.NONE){ //Just use the final printLineDebug("Finding the train set estimates from the full train model "); double[] preds = new double[data.numInstances()]; double[] actuals = new double[data.numInstances()]; long[] predTimes = new long[data.numInstances()];//Dummy variable, need something for (int j = 0; j < data.numInstances(); j++) { long predTime = System.nanoTime(); trainDistributions[j]=distributionForInstance(trainData.instance(j)); preds[j] = findIndexOfMax(trainDistributions[j], rand); actuals[j] = data.instance(j).classValue(); predTimes[j] = System.nanoTime() - predTime; } trainResults.addAllPredictions(actuals, preds, trainDistributions, predTimes, null); trainResults.setEstimatorName("RotFTrain"); trainResults.setDatasetName(data.relationName()); trainResults.setSplit("train"); trainResults.setFoldID(seed); trainResults.finaliseResults(actuals); trainResults.setErrorEstimateMethod("TRAIN"); } //The other options involve building ne model(s) on the train data. TO DO SORT OUT CONTRACT else if (trainEstimateMethod == TrainEstimateMethod.CV ) { // Defaults to 10 or numInstances, whichever is smaller. int numFolds = setNumberOfFolds(data); CrossValidationEvaluator cv = new CrossValidationEvaluator(); if (seedClassifier) cv.setSeed(seed * 5); cv.setNumFolds(numFolds); EnhancedRotationForest rotf = new EnhancedRotationForest(); rotf.copyParameters(this); rotf.setDebug(this.debug); if (seedClassifier) rotf.setSeed(seed * 100); rotf.setEstimateOwnPerformance(false); rotf.setTrainTimeLimit(trainEstimateContractTimeNanos/10); // if (trainTimeContract)//Need to split the contract time, will give time/(numFolds+2) to each fio // rotf.setTrainTimeLimit(buildtrainContractTimeNanos / numFolds); printLineDebug(" Doing CV evaluation estimate performance with " + rotf.getTrainContractTimeNanos() / 1000000000 + " secs per fold."); long buildTime = trainResults.getBuildTime(); trainResults = cv.evaluate(rotf, data); trainResults.setBuildTime(buildTime); trainResults.setEstimatorName("RotFCV"); trainResults.setErrorEstimateMethod("CV_" + numFolds); } else if (trainEstimateMethod == TrainEstimateMethod.OOB) { // Build a single new TSF using Bagging, and extract the estimate from this printLineDebug(" Estimating with a separate OOB classifier"); EnhancedRotationForest rotf = new EnhancedRotationForest(); // rotf.setTrainTimeLimit(); rotf.copyParameters(this); rotf.setDebug(this.debug); rotf.setSeed(seed*33); rotf.setEstimateOwnPerformance(true); rotf.setTrainTimeLimit(trainEstimateContractTimeNanos); rotf.setBagging(true); // rotf.setRemovedPercentage(10); // tsf.setTrainTimeLimit(finalBuildtrainContractTimeNanos); rotf.buildClassifier(data); long buildTime = trainResults.getBuildTime(); trainResults = rotf.trainResults; trainResults.setBuildTime(buildTime); trainResults.setEstimatorName("RotFOOB"); trainResults.setErrorEstimateMethod("OOB"); } } /** Build a rotation forest tree, possibly not using all the attributes to speed things up * * @param data * @param instancesOfClass * @param i * @param numAtts * @throws Exception */ public Classifier buildTree(Instances data, Instances [] instancesOfClass, int i, int numAtts) throws Exception{ int[][] g=generateGroupFromSize(data, rand,numAtts); Filter[] projection=Filter.makeCopies(projectionFilter, g.length ); projectionFilters.add(projection); groups.add(g); Instances[] reducedHeaders = new Instances[ g.length ]; this.reducedHeaders.add(reducedHeaders); ArrayList<Attribute> transformedAttributes = new ArrayList<>( data.numAttributes() ); // Construction of the dataset for each group of attributes for( int j = 0; j < g.length; j++ ) { ArrayList<Attribute> fv = new ArrayList<>( g[j].length + 1 ); for( int k = 0; k < g[j].length; k++ ) { String newName = data.attribute( g[j][k] ).name() + "_" + k; fv.add(data.attribute( g[j][k] ).copy(newName) ); } fv.add( (Attribute)data.classAttribute( ).copy() ); Instances dataSubSet = new Instances( "rotated-" + i + "-" + j + "-", fv, 0); dataSubSet.setClassIndex( dataSubSet.numAttributes() - 1 ); // Select instances for the dataset reducedHeaders[j] = new Instances( dataSubSet, 0 ); boolean [] selectedClasses = selectClasses( instancesOfClass.length, rand ); for( int c = 0; c < selectedClasses.length; c++ ) { if( !selectedClasses[c] ) continue; for(Instance instance:instancesOfClass[c]) { Instance newInstance = new DenseInstance(dataSubSet.numAttributes()); newInstance.setDataset( dataSubSet ); for( int k = 0; k < g[j].length; k++ ) { newInstance.setValue( k, instance.value( g[j][k] ) ); } newInstance.setClassValue( instance.classValue( ) ); dataSubSet.add( newInstance ); } } dataSubSet.randomize(rand); // Remove a percentage of the instances Instances originalDataSubSet = dataSubSet; dataSubSet.randomize(rand); RemovePercentage rp = new RemovePercentage(); rp.setPercentage(100-removedPercentage ); rp.setInputFormat( dataSubSet ); dataSubSet = Filter.useFilter( dataSubSet, rp ); if( dataSubSet.numInstances() < 2 ) { dataSubSet = originalDataSubSet; } // Project the data projection[j].setInputFormat( dataSubSet ); Instances projectedData = null; do { try { projectedData = Filter.useFilter( dataSubSet, projection[j] ); } catch ( Exception e ) { // The data could not be projected, we add some random instances addRandomInstances( dataSubSet, 10, rand ); } } while( projectedData == null ); // Include the projected attributes in the attributes of the // transformed dataset for( int a = 0; a < projectedData.numAttributes() - 1; a++ ) { String newName = projectedData.attribute(a).name() + "_" + j; transformedAttributes.add( projectedData.attribute(a).copy(newName)); } } transformedAttributes.add((Attribute)data.classAttribute().copy() ); Instances buildClas = new Instances( "rotated-" + i + "-", transformedAttributes, 0 ); buildClas.setClassIndex( buildClas.numAttributes() - 1 ); headers.add(new Instances( buildClas, 0 )); // Project all the training data for(Instance instance:data) { Instance newInstance = convertInstance( instance, i ); buildClas.add( newInstance ); } Classifier c= AbstractClassifier.makeCopy(baseClassifier); // Build the base classifier if (c instanceof Randomizable) { ((Randomizable) c).setSeed(rand.nextInt()); } c.buildClassifier( buildClas ); return c; } private void copyParameters(EnhancedRotationForest other) { this.minNumTrees = other.minNumTrees; this.maxNumTrees = other.maxNumTrees; this.baseClassifier = other.baseClassifier; this.minGroup = other.minGroup; this.maxGroup = other.maxGroup; this.removedPercentage = other.removedPercentage; this.maxGroup = other.maxGroup; this.minGroup = other.minGroup; this.maxGroup = other.maxGroup; this.removedPercentage=other.removedPercentage; } /** * Adds random instances to the dataset. * * @param dataset the dataset * @param numInstances the number of instances * @param random a random number generator */ protected void addRandomInstances( Instances dataset, int numInstances, Random random ) { int n = dataset.numAttributes(); double [] v = new double[ n ]; for( int i = 0; i < numInstances; i++ ) { for( int j = 0; j < n; j++ ) { Attribute att = dataset.attribute( j ); if( att.isNumeric() ) { v[ j ] = random.nextDouble(); } else if ( att.isNominal() ) { v[ j ] = random.nextInt( att.numValues() ); } } dataset.add( new DenseInstance( 1, v ) ); } } /** * Checks minGroup and maxGroup * * @param data the dataset */ protected void checkMinMax(Instances data) { if( minGroup > maxGroup ) { int tmp = maxGroup; maxGroup = minGroup; minGroup = tmp; } int n = data.numAttributes(); if( maxGroup >= n ) maxGroup = n - 1; if( minGroup >= n ) minGroup = n - 1; } /** * Selects a non-empty subset of the classes * * @param numClasses the number of classes * @param random the random number generator. * @return a random subset of classes */ protected boolean [] selectClasses( int numClasses, Random random ) { int numSelected = 0; boolean selected[] = new boolean[ numClasses ]; for( int i = 0; i < selected.length; i++ ) { if(random.nextDouble()<probPerClass) { selected[i] = true; numSelected++; } } if( numSelected == 0 ) { selected[random.nextInt( selected.length )] = true; } return selected; } /** * generates the groups of attributes, given their minimum and maximum * sizes. * * @param data the training data to be used for generating the * groups. * @param random the random number generator. */ protected int[][] generateGroupFromSize(Instances data, Random random, int maxAtts) { int[][] groups; int [] permutation = attributesPermutation(data.numAttributes(), data.classIndex(), random, maxAtts); // The number of groups that have a given size int [] numGroupsOfSize = new int[maxGroup - minGroup + 1]; int numAttributes = 0; int numGroups; // Select the size of each group for( numGroups = 0; numAttributes < permutation.length; numGroups++ ) { int n = random.nextInt( numGroupsOfSize.length ); numGroupsOfSize[n]++; numAttributes += minGroup + n; } groups = new int[numGroups][]; int currentAttribute = 0; int currentSize = 0; for( int j = 0; j < numGroups; j++ ) { while( numGroupsOfSize[ currentSize ] == 0 ) currentSize++; numGroupsOfSize[ currentSize ]--; int n = minGroup + currentSize; groups[j] = new int[n]; for( int k = 0; k < n; k++ ) { if( currentAttribute < permutation.length ) groups[j][k] = permutation[ currentAttribute ]; else // For the last group, it can be necessary to reuse some attributes groups[j][k] = permutation[ random.nextInt( permutation.length ) ]; currentAttribute++; } } return groups; } final protected int [] attributesPermutation(int numAttributes, int classAttribute, Random random, int maxNumAttributes) { int [] permutation = new int[numAttributes-1]; int i = 0; //This just ignores the class attribute for(; i < classAttribute; i++){ permutation[i] = i; } for(; i < permutation.length; i++){ permutation[i] = i + 1; } permute( permutation, random ); if(numAttributes>maxNumAttributes){ //TRUNCTATE THE PERMATION TO CONSIDER maxNumAttributes. // we could do this more efficiently, but this is the simplest way. int[] temp = new int[maxNumAttributes]; System.arraycopy(permutation, 0, temp, 0, maxNumAttributes); permutation=temp; } return permutation; } /** * permutes the elements of a given array. * * @param v the array to permute * @param random the random number generator. */ protected void permute( int v[], Random random ) { for(int i = v.length - 1; i > 0; i-- ) { int j = random.nextInt( i + 1 ); if( i != j ) { int tmp = v[i]; v[i] = v[j]; v[j] = tmp; } } } /** * prints the groups. */ protected void printGroups( ) { for( int i = 0; i < groups.size(); i++ ) { for( int j = 0; j < groups.get(i).length; j++ ) { System.err.print( "( " ); for( int k = 0; k < groups.get(i)[j].length; k++ ) { System.err.print(groups.get(i)[j][k] ); System.err.print( " " ); } System.err.print( ") " ); } System.err.println( ); } } /** * Transforms an instance for the i-th classifier. * * @param instance the instance to be transformed * @param i the base classifier number * @return the transformed instance * @throws Exception if the instance can't be converted successfully */ protected Instance convertInstance( Instance instance, int i ) throws Exception { Instance newInstance = new DenseInstance( headers.get(i).numAttributes( ) ); newInstance.setWeight(instance.weight()); newInstance.setDataset(headers.get(i)); int currentAttribute = 0; // Project the data for each group int[][] g=groups.get(i); for( int j = 0; j < g.length; j++ ) { Instance auxInstance = new DenseInstance(g[j].length + 1 ); int k; for( k = 0; k < g[j].length; k++ ) { auxInstance.setValue( k, instance.value( g[j][k] ) ); } auxInstance.setValue( k, instance.classValue( ) ); auxInstance.setDataset(reducedHeaders.get(i)[ j ] ); Filter[] projection=projectionFilters.get(i); projection[j].input( auxInstance ); auxInstance = projection[j].output( ); projection[j].batchFinished(); for( int a = 0; a < auxInstance.numAttributes() - 1; a++ ) { newInstance.setValue( currentAttribute++, auxInstance.value( a ) ); } } newInstance.setClassValue( instance.classValue() ); return newInstance; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { removeUseless.input(instance); instance =removeUseless.output(); removeUseless.batchFinished(); normalize.input(instance); instance =normalize.output(); normalize.batchFinished(); double [] sums = new double [instance.numClasses()], newProbs; for (int i = 0; i < classifiers.size(); i++) { Instance convertedInstance = convertInstance(instance, i); if (instance.classAttribute().isNumeric() == true) { sums[0] += classifiers.get(i).classifyInstance(convertedInstance); } else { newProbs = classifiers.get(i).distributionForInstance(convertedInstance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } } if (instance.classAttribute().isNumeric() == true) { sums[0] /= (double)classifiers.size(); return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } @Override public String getParameters() { String result="RemovePercent,"+this.getRemovedPercentage()+",NumFeatures,"+this.getMaxGroup(); result+=",numTrees,"+numTrees; return result; } @Override //Checkpointable public boolean setCheckpointPath(String path) { boolean validPath=Checkpointable.super.createDirectories(path); if(validPath){ checkpointPath = path; checkpoint = true; } return validPath; } @Override public void copyFromSerObject(Object obj) throws Exception { if(!(obj instanceof EnhancedRotationForest)) throw new Exception("The SER file is not an instance of ContractRotationForest"); //To change body of generated methods, choose Tools | Templates. EnhancedRotationForest saved= ((EnhancedRotationForest)obj); //Copy RotationForest attributes baseClassifier=saved.baseClassifier; classifiers=saved.classifiers; minGroup = saved.minGroup; maxGroup = saved.maxGroup; removedPercentage = saved.removedPercentage; groups = saved.groups; projectionFilter = saved.projectionFilter; projectionFilters = saved.projectionFilters; headers = saved.headers; reducedHeaders = saved.reducedHeaders; removeUseless = saved.removeUseless; normalize = saved.normalize; //Copy ContractRotationForest attributes. Not su trainResults=saved.trainResults; minNumTrees=saved.minNumTrees; maxNumTrees=saved.maxNumTrees; maxNumAttributes=saved.maxNumAttributes; checkpointPath=saved.checkpointPath; debug=saved.debug; timeUsed=saved.timeUsed; numTrees=saved.numTrees; } /** * abstract methods from TrainTimeContractable interface * @param amount */ @Override public void setTrainTimeLimit(long amount) { if(amount>0) { trainContractTimeNanos = amount; trainTimeContract = true; } else trainTimeContract = false; } @Override public boolean withinTrainContract(long start) { return start<trainContractTimeNanos; } @Override public long getTrainContractTimeNanos() { return trainContractTimeNanos; } public void setBagging(boolean b){ bagging =b;} }
37,908
38.001029
203
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/EnsembleSelection.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles; import experiments.ClassifierExperiments; import experiments.CollateResults; import experiments.ExperimentalArguments; import experiments.data.DatasetLists; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.LinkedList; import java.util.List; import java.util.Random; import machine_learning.classifiers.ensembles.voting.MajorityVote; import machine_learning.classifiers.ensembles.weightings.EqualWeighting; import evaluation.storage.ClassifierResults; import experiments.data.DatasetLoading; import tsml.classifiers.EnhancedAbstractClassifier; import static utilities.GenericTools.indexOfMax; import utilities.InstanceTools; import weka.core.Instances; import weka.core.TechnicalInformation; /** * Implementation of ensemble selection * * @inproceedings{caruana2004ensemble, * title={Ensemble selection from libraries of models}, * author={Caruana, Rich and Niculescu-Mizil, Alexandru and Crew, Geoff and Ksikes, Alex}, * booktitle={Proceedings of the twenty-first international conference on Machine learning}, * pages={18}, * year={2004}, * organization={ACM} * } * * * Built on top of hesca for it's classifierresults file building/handling capabilities. * In this relatively naive implementation, the ensemble after build classifier still actually has the entire library in it, * however one or more of the models may have a (PRIOR) weighting of 0 * For the purposes we will be using this for (with something on the order of a couple dozen classifiers at most) this will work fine * in terms of runtime etc. * * However in the future refactors for optimisation purposes may occur if e.g we intend to handle much larger libraries (e.g, ensembling * over large para-space searched results) * * * @author James Large (james.large@uea.ac.uk) */ public class EnsembleSelection extends CAWPE { @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(TechnicalInformation.Type.INPROCEEDINGS); result.setValue(TechnicalInformation.Field.AUTHOR, "R. Caruana, A. Niculescu-Mizil, G. Crew and A. Ksikes"); result.setValue(TechnicalInformation.Field.YEAR, "2004"); result.setValue(TechnicalInformation.Field.TITLE, "Ensemble selection from libraries of models"); result.setValue(TechnicalInformation.Field.BOOKTITLE, "Proceedings of the twenty-first international conference on Machine learning"); result.setValue(TechnicalInformation.Field.PAGES, "18"); result.setValue(TechnicalInformation.Field.ORGANIZATION, "ACM"); return result; } // Integer numBags = null; //default 2 * floor(log(sizeOfLibrary)), i.e 22 classifiers gives 8 bags. Paper says 20 bags from 2000 models, so definitely seems fair Integer numBags = null; //default 10. Paper says 20 bags from 2000 models, so definitely seems fair Double propOfModelsInEachBag = null; //aka p, default 0.5. value used through exps in paper, though some suggestion that p around 0.1 to 0.3 may be better, future work Integer numOfTopModelsToInitialiseBagWith = null; //aka N, default value set to 2 for now, since only 22 classifiers being used atm. paper suggested around 5-25 for library of 2000 models //we currently intend to use only a library of 22 classifiers, from which we'll sample 11 (p=0.5). 100 models is more than any sampling with replacement //run should take, but jsut as a safeguard against minutely incrementing accuracy because of double precision shenanigans, have this in as a second stopping condition final int MAX_SUBENSEMBLE_SIZE = 100; Random rng; public EnsembleSelection() { super(); //sets default classifiers etc //overwriting relevant parts ensembleName = "EnsembleSelection"; // votingScheme = new MajorityConfidence(); votingScheme = new MajorityVote(); weightingScheme = new EqualWeighting(); rng = new Random(0); } public Integer getNumBags() { return numBags; } public void setNumBags(Integer numBags) { this.numBags = numBags; } public Double getPropOfModelsInEachBag() { return propOfModelsInEachBag; } public void setPropOfModelsInEachBag(Double propOfModelsInEachBag) { this.propOfModelsInEachBag = propOfModelsInEachBag; } public Integer getNumOfTopModelsToInitialiseBagWith() { return numOfTopModelsToInitialiseBagWith; } public void setNumOfTopModelsToInitialiseBagWith(Integer numOfTopModelsToInitialiseBagWith) { this.numOfTopModelsToInitialiseBagWith = numOfTopModelsToInitialiseBagWith; } @Override public void setSeed(int seed) { super.setSeed(seed); rng = new Random(seed); } @Override public void buildClassifier(Instances data) throws Exception { printlnDebug("**EnsembleSelection TRAIN**"); //housekeeping if (resultsFilesParametersInitialised) { if (readResultsFilesDirectories.length > 1) if (readResultsFilesDirectories.length != modules.length) throw new Exception("EnsembleSelection.buildClassifier: more than one results path given, but number given does not align with the number of classifiers/modules."); if (writeResultsFilesDirectory == null) writeResultsFilesDirectory = readResultsFilesDirectories[0]; } long startTime = System.nanoTime(); //transform data if specified if(this.transform==null){ this.trainInsts = new Instances(data); }else{ this.trainInsts = transform.transform(data); //TODO: this could call fit? } //init this.numTrainInsts = trainInsts.numInstances(); this.numClasses = trainInsts.numClasses(); this.numAttributes = trainInsts.numAttributes(); //set up modules initialiseModules(); //these won't actually do anything at this stage, except some basic initialisation //mostly still calling these as a relic from hesca for potential future proofing weightingScheme.defineWeightings(modules, numClasses); votingScheme.trainVotingScheme(modules, numClasses); //NOW THE ACTUAL SELECTION STUFF //have 'library' of models size L //init: list of sub ensembles //for each 'bag' in the bagging of ensemble stage (b in B ?) //init model set with p*L random models, is fraction of models in each bag //initialise this sub-ensemble with top N classifiers in this bag (accuracy only for now) //calc initial ensemble accuracy (even weighting over models) //while acc is improving //test the addition of *each* model to the ensemble-so-far //if an increase in ensemble performance can be acheived, add the model that gives the biggest to the ensemble //can play with the prior weights to simulate additional models of the same kind being added? (selection with replacement) //add finalised sub-ensemble to the bag of ensembles //now have a set of ensembles, which are effectively each a weighted average of the base classifiers (weighted by the number of tiems there were included) //so now jsut average over these ensembles again to get the final weighted ensemble //a lot of this code could be easily refactored for efficiency, however will leave it as is (a pretty naive implementation) //for ease of understanding/maintenance //init the params if not already set by user if (numBags == null) // numBags = (int) (Math.log(modules.length) / Math.log(2)) * 2; numBags = 10; if (propOfModelsInEachBag == null) propOfModelsInEachBag = .5; if (numOfTopModelsToInitialiseBagWith == null) numOfTopModelsToInitialiseBagWith = 1; // log(sizeOfBag) ? int numModelsInEachBag = Math.max(1, (int)Math.round(propOfModelsInEachBag * modules.length)); //will hold the actual ensembles as they go along List<List<EnsembleModule>> subensembles = new ArrayList<>(numBags); ClassifierResults globalEnsembleResults = null; for (int bagID = 0; bagID < numBags; bagID++) { List<EnsembleModule> bagOfModels = sample(modules, numModelsInEachBag); //todo check this, treeset should do sorting for us, however unsure if ordering is maintined durign toarray() List<EnsembleModule> subensemble = new ArrayList<>(); ClassifierResults subEnsembleResults = null; if (numOfTopModelsToInitialiseBagWith!=null && numOfTopModelsToInitialiseBagWith > 0) { int lastInd = bagOfModels.size()-1; EnsembleModule model = bagOfModels.get(lastInd); //best in trainEstimator subensemble.add(model); subEnsembleResults = model.trainResults; for (int i = 1; i < numOfTopModelsToInitialiseBagWith; i++) { model = bagOfModels.get(lastInd - i); //next highest trainEstimator score subensemble.add(model); subEnsembleResults = combinePredictions(subEnsembleResults, i, model.trainResults); } } //initialisation of subensemble done, start the forward selection double accSoFar; double newAcc = subEnsembleResults == null ? .0 : subEnsembleResults.getAcc(); boolean finished; do { finished = true; accSoFar = newAcc; ClassifierResults[] candidateResults = new ClassifierResults[bagOfModels.size()]; double[] accs = new double[bagOfModels.size()]; for (int modelID = 0; modelID < bagOfModels.size(); modelID++) { candidateResults[modelID] = combinePredictions(subEnsembleResults, subensemble.size(), bagOfModels.get(modelID).trainResults); accs[modelID] = candidateResults[modelID].getAcc(); } int maxAccInd = (int)utilities.GenericTools.indexOfMax(accs); newAcc = accs[maxAccInd]; if (newAcc > accSoFar) { finished = false; subEnsembleResults = candidateResults[maxAccInd]; subensemble.add(bagOfModels.get(maxAccInd)); if (subensemble.size() >= MAX_SUBENSEMBLE_SIZE) finished = true; } } while (!finished); subensembles.add(subensemble); if (globalEnsembleResults == null) globalEnsembleResults = subEnsembleResults; else globalEnsembleResults = combinePredictions(globalEnsembleResults, bagID, subEnsembleResults); } //have sub ensembles, now to produce the final weighted ensemble //i think easiest way is to just continue using the hesca architecture via //'equalweighting' and majorityconfidence, but fix the PRIOR weights to the abundance of the classifiers here //init all modules to have prior weight of 0.0 for (EnsembleModule module : modules) module.priorWeight = 0.0; //for all models in all subsembles, increment that model's prior weight in the final ensemble essentially for (List<EnsembleModule> subensemble : subensembles) { for (EnsembleModule model : subensemble) { int ind = 0; for ( ; ind < modules.length; ind++) if (model == modules[ind]) //by reference should work break; assert(ind != modules.length); modules[ind].priorWeight++; } } //END OF THE ACTUAL SELECTION STUFF trainResults = globalEnsembleResults; trainResults.setEstimatorName("EnsembleSelection"); trainResults.setDatasetName(datasetName); trainResults.setFoldID(seed); trainResults.setSplit("train"); long buildTime = System.nanoTime() - startTime; if (readIndividualsResults) { //we need to sum the modules' reported build time as well as the weight //and voting definition time for (EnsembleModule module : modules) { buildTime += module.trainResults.getBuildTimeInNanos(); //TODO see other todo in trainModules also. Currently working under //assumption that the estimate time is already accounted for in the build //time of TrainAccuracyEstimators, i.e. those classifiers that will //estimate their own accuracy during the normal course of training if (!EnhancedAbstractClassifier.classifierIsEstimatingOwnPerformance(module.getClassifier())) buildTime += module.trainResults.getErrorEstimateTime(); } } trainResults.setBuildTime(buildTime); //store the buildtime to be saved this.testInstCounter = 0; //prep for start of testing } public List<EnsembleModule> sample(final EnsembleModule[] pool, int numToPick) { //todo refactor... LinkedList<EnsembleModule> pooll = new LinkedList<>(); for (EnsembleModule module : pool) pooll.add(module); List<EnsembleModule> res = new ArrayList<>(numToPick); for (int i = 0; i < numToPick; i++) { int toRemove = rng.nextInt(pooll.size()); res.add(pooll.remove(toRemove)); } return res; } public static class SortByTrainAcc implements Comparator<EnsembleModule> { @Override public int compare(EnsembleModule o1, EnsembleModule o2) { return Double.compare(o1.trainResults.getAcc(), o2.trainResults.getAcc()); } } public ClassifierResults combinePredictions(final ClassifierResults ensembleSoFarResults, int ensembleSizeSoFar, final ClassifierResults newModelResults) throws Exception { ClassifierResults newResults = new ClassifierResults(numClasses); assert(ensembleSoFarResults.getTimeUnit().equals(newModelResults.getTimeUnit())); newResults.setTimeUnit(ensembleSoFarResults.getTimeUnit()); for (int inst = 0; inst < ensembleSoFarResults.getProbabilityDistributions().size(); inst++) { double[] ensDist = ensembleSoFarResults.getProbabilityDistribution(inst); double[] indDist = newModelResults.getProbabilityDistribution(inst); assert(ensDist.length == numClasses); assert(indDist.length == numClasses); double[] newDist = new double[numClasses]; for (int c = 0; c < numClasses; c++) newDist[c] = ((ensDist[c] * ensembleSizeSoFar) + indDist[c]) / (ensembleSizeSoFar+1); //expand existing average, add in new model, and divide again //todo: exactly how to time train-instance predictions for this classifier is very debatable. going with this for now long predTime = ensembleSoFarResults.getPredictionTime(inst) + newModelResults.getPredictionTime(inst); newResults.addPrediction(newDist, indexOfMax(newDist), predTime, ""); } newResults.finaliseResults(ensembleSoFarResults.getTrueClassValsAsArray()); return newResults; } public static void main(String[] args) throws Exception { // tests(); // ana(); } public static void tests() { String resPath = "C:/JamesLPHD/HESCA/UCI/UCIResults/"; int numfolds = 30; String[] dsets = DatasetLists.UCIContinuousFileNames; // String[] skipDsets = new String[] { "adult", "chess-krvk", "chess-krvkp", "connect-4", "miniboone", }; // String[] dsets = new String[] { "hayes-roth" }; String[] skipDsets = new String[] { }; String classifier = "EnsembleSelectionAll22Classifiers_Preds"; for (String dset : dsets) { if (Arrays.asList(skipDsets).contains(dset)) continue; System.out.println(dset); Instances all = DatasetLoading.loadDataNullable("C:/UCI Problems/" + dset + "/" + dset + ".arff"); for (int fold = 0; fold < numfolds; fold++) { String predictions = resPath+classifier+"/Predictions/"+dset; File f=new File(predictions); if(!f.exists()) f.mkdirs(); //Check whether fold already exists, if so, dont do it, just quit if(!CollateResults.validateSingleFoldFile(predictions+"/testFold"+fold+".csv")){ Instances[] data = InstanceTools.resampleInstances(all, fold, .5); EnsembleSelection c = new EnsembleSelection(); //for full kitchen sink classifier list, set the init # models to 2 from each bag, there's only 22 total, so 11 in each bag c.setClassifiers(null, CAWPE_bigClassifierList, null); c.setNumOfTopModelsToInitialiseBagWith(2); //for just the hesca models, use default of 1 // c.setClassifiers(null, PAPER_HESCA, null); // c.setClassifiers(null, CAWPE_MajorityVote.HESCAplus_V4_Classifiers, null); c.setBuildIndividualsFromResultsFiles(true); c.setResultsFileLocationParameters(resPath, dset, fold); c.setSeed(fold); c.setEstimateOwnPerformance(true); c.setResultsFileWritingLocation(resPath); ExperimentalArguments exp = new ExperimentalArguments(); exp.estimatorName = classifier; exp.datasetName = dset; exp.foldId = fold; exp.generateErrorEstimateOnTrainSet = true; exp.testFoldFileName = predictions+"/testFold"+fold+".csv"; exp.trainFoldFileName = predictions+"/trainFold"+fold+".csv"; // exp.performTimingBenchmark = true; ClassifierExperiments.runExperiment(exp,data[0],data[1],c); } } } } public static String[] CAWPE_basic = new String[] { "NN", "SVML", "C4.5", "Logistic", "MLP" }; public static String[] CAWPE_bigClassifierList= new String[] { //original "RotFDefault", "RandF", "SVMQ", "NN", "SVML", "C4.5", "NB", "bayesNet", //homoensembles "DaggingDefault", "MultiBoostABDefault", "AdaBoostM1Default", "BaggingDefault", "LogitBoostDefault", "DecorateDefault", "ENDDefault", "RandomCommitteeDefault", //extra "Logistic", "MLP", "DNN", "1NN", "DecisionTable", "REPTree", }; }
20,799
43.635193
191
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/HomogeneousContractCAWPE.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles; import evaluation.storage.ClassifierResults; import machine_learning.classifiers.ensembles.weightings.TrainAcc; import weka.classifiers.Classifier; import weka.core.Instances; import java.util.Arrays; import java.util.Random; import static utilities.Utilities.argMax; public class HomogeneousContractCAWPE extends CAWPE { Random rand = new Random(); public void setRandom(Random rand){ this.rand = rand; } public void addToEnsemble(Classifier c, double[][] probs, double[] classVals) throws Exception { modules = Arrays.copyOf(modules, modules.length + 1); int idx = modules.length-1; modules[idx] = new EnsembleModule(c.getClass().getSimpleName(), c, ""); modules[idx].trainResults = new ClassifierResults(numClasses); modules[idx].trainResults.setEstimatorName(c.getClass().getSimpleName()); modules[idx].trainResults.setDatasetName(trainInsts.relationName()); modules[idx].trainResults.setFoldID(seed); modules[idx].trainResults.setSplit("train"); for (int i = 0; i < probs.length; i++){ double pred = argMax(probs[i], rand); modules[idx].trainResults.addPrediction(probs[i], pred, -1, ""); } modules[idx].trainResults.finaliseResults(classVals); if (weightingScheme instanceof TrainAcc){ modules[idx].posteriorWeights = ((TrainAcc)weightingScheme).defineWeighting(modules[idx], numClasses); votingScheme.trainVotingScheme(modules, numClasses); } else{ weightingScheme.defineWeightings(modules, numClasses); votingScheme.trainVotingScheme(modules, numClasses); } modules[idx].trainResults.findAllStatsOnce(); } public void remove(int idx){ EnsembleModule[] temp = new EnsembleModule[modules.length - 1]; System.arraycopy(modules, 0, temp, 0, idx); System.arraycopy(modules, idx + 1, temp, idx, modules.length - idx - 1); modules = temp; } @Override public void buildClassifier(Instances data) throws Exception { trainInsts = data; numTrainInsts = trainInsts.numInstances(); numClasses = trainInsts.numClasses(); numAttributes = trainInsts.numAttributes(); modules = new EnsembleModule[0]; this.testInstCounter = 0; } }
3,173
36.341176
114
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/SaveableEnsemble.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles; /** *Interface that facilitates the saving of the internal state of the classifier, including parameters that may have been set by CV or some other means * @author ajb */ public interface SaveableEnsemble { void saveResults(String tr, String te); String getParameters(); }
1,099
35.666667
80
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/SingleTransformEnsembles.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles; import tsml.transformers.RowNormalizer; import tsml.transformers.Transformer; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instances; public class SingleTransformEnsembles extends AbstractClassifier{ enum TransformType {TIME,PS,ACF}; TransformType t = TransformType.TIME; Transformer transform; Classifier[] classifiers; Instances train; public SingleTransformEnsembles(){ super(); initialise(); } public final void initialise(){ //Transform switch(t){ case TIME: transform=new RowNormalizer(); break; } } @Override public void buildClassifier(Instances data){ } @Override public String getRevision() { // TODO Auto-generated method stub return null; } public static void main(String[] args){ //Load up Beefand test only on that } }
1,793
25.776119
76
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/TransformEnsembles.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles; import tsml.transformers.PowerSpectrum; import tsml.transformers.ACF; import java.util.ArrayList; import java.util.Random; import weka.attributeSelection.PrincipalComponents; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import tsml.classifiers.distance_based.DTWCV; import machine_learning.classifiers.kNN; import weka.core.Instance; import weka.core.Instances; import tsml.transformers.ColumnNormalizer; import weka.filters.SimpleBatchFilter; public class TransformEnsembles extends AbstractClassifier{ enum TransformType {TIME,PS,ACF}; SimpleBatchFilter transform; Classifier[] classifiers; boolean useWeights=false; boolean normaliseAtts=false; ArrayList<Instances> train=new ArrayList<Instances>(); double[] transformWeights; double[] cvWeights; //Store these once only. int numInstances; Classifier[] all; Classifier base=new kNN(1); //Default Base classifier Classifier baseTime=new DTWCV(); //Default Base classifier for time domain PowerSpectrum ps; ACF acf; PrincipalComponents pca; ColumnNormalizer nPs,nAcf,nPca; ArrayList<double[][]> predictions=new ArrayList<double[][]>(); static double CRITICAL=2.32; int testPos=0; int nosTransforms=4; double weightPower=1; boolean rebuild=true; public TransformEnsembles(){ super(); } public void setWeightPower(double x){weightPower=x;} public void rebuildClassifier(boolean x){rebuild =x;} public enum WeightType{EQUAL,CV,BEST,STEP} private WeightType w=WeightType.EQUAL; public void setWeightType(int x){ switch(x){ case 0: w=WeightType.EQUAL; break; case 1: w=WeightType.CV; break; case 2: w=WeightType.BEST; break; case 3: w=WeightType.STEP; break; } } public void setWeightType(WeightType x){ w=x; } public void setBaseClassifier(Classifier c){ base=c; } private static final double THRESHOLD1=100; private static final double THRESHOLD2=1000; private void init(Instances data){ numInstances=data.numInstances(); base=new kNN(1); //Default Base classifier train=new ArrayList<Instances>(); ps=new PowerSpectrum(); acf=new ACF(); acf.setMaxLag((int)(data.numAttributes()-data.numAttributes()*.1)); pca=new PrincipalComponents (); predictions=new ArrayList<double[][]>(); } public void findWeights() throws Exception{ transformWeights=new double[nosTransforms]; testPos=0; switch(w){ case EQUAL: for(int i=0;i<nosTransforms;i++) transformWeights[i]=1.0/nosTransforms; break; case BEST: if(cvWeights==null) findCVWeights(); //Set max to 1, rest to zero. If zero, data type will not be built in buildClassifier or used in distributionForInstance int max=0; for(int i=1;i<cvWeights.length;i++){ if(cvWeights[i]>cvWeights[max]) max=i; } for(int i=0;i<transformWeights.length;i++){ if(i==max) transformWeights[i]=1.0; else transformWeights[i]=0.0; } System.out.print("Best Weight is "); switch(max){ case 0: System.out.println("TIME"); break; case 1: System.out.println("POWERSPECTRUM"); break; case 2: System.out.println("ACF"); break; case 3: System.out.println("PCA"); break; } break; case CV: System.out.println("CV Weights"); if(cvWeights==null) findCVWeights(); //Set transform weights with CV double sum=0; for(int i=0;i<cvWeights.length;i++){ sum+=Math.pow(cvWeights[i],weightPower); } for(int i=0;i<cvWeights.length;i++) transformWeights[i]=Math.pow(cvWeights[i],weightPower)/sum; break; case STEP: System.out.println("STEP Weights"); if(cvWeights==null) findCVWeights(); //Find the difference between each accuracy, ignore those significantly worse than the best at 10% level //Find the most accurate max=0; for(int i=1;i<cvWeights.length;i++){ if(cvWeights[i]>cvWeights[max]) max=i; } //2.1 Work out critical region for alpha int n=numInstances; double p=cvWeights[max], q=(1-p); double sd=p*q; sd/=n; sd=Math.sqrt(sd); for(int j=0;j<cvWeights.length;j++){ if(j==max) transformWeights[j]=Math.pow(cvWeights[j],weightPower); else{ double z=(cvWeights[max]-cvWeights[j])/sd; System.out.println(" Max trans ="+max+" z value for "+j+" = "+z); if(z<CRITICAL) //Cant reject H0, keep this transform transformWeights[j]=Math.pow(cvWeights[j],weightPower); else //Reject this one transformWeights[j]=0; } } //Normalise sum=transformWeights[0]; for(int i=1;i<transformWeights.length;i++) sum+=transformWeights[i]; for(int i=0;i<transformWeights.length;i++) transformWeights[i]/=sum; break; } } public void buildClassifier(Instances data) throws Exception { //Sometimes I just want to re-weight it, which must be done with findWeights(). // rebuild stays true by default unless explicitly set by rebuildClassifier(boolean f) // this is just a bit of a hack to speed up experiments, if(rebuild){ System.out.println("Build whole ..."); init(data); //Assume its already standardised train.add(data); Instances t1=ps.transform(data); Instances t2=acf.transform(data); if(normaliseAtts){ nPs=new ColumnNormalizer(t1); t1=nPs.transform(t1); nAcf=new ColumnNormalizer(t2); t2=nAcf.transform(t2); } pca.buildEvaluator(data); Instances t3=pca.transformedData(data); train.add(t1); // train.add(t2); train.add(t3); nosTransforms=train.size(); findWeights(); all= AbstractClassifier.makeCopies(base,train.size()); all[0]=AbstractClassifier.makeCopy(baseTime); for(int i=0;i<all.length;i++){ all[i].buildClassifier(train.get(i)); } } } public double[] distributionForInstance(Instance ins) throws Exception{ double[][] preds; if(rebuild){ preds=new double[nosTransforms][]; if(all[0]!=null) preds[0]=all[0].distributionForInstance(ins); //Nasty hack because I've implemented them as batch filters Instances temp=new Instances(train.get(0),0); temp.add(ins); Instances temp2; if(all[1]!=null){ temp2=ps.transform(temp); if(normaliseAtts){ temp2=nPs.transform(temp2); } preds[1]=all[1].distributionForInstance(temp2.instance(0)); } if(all[2]!=null){ temp2=acf.transform(temp); if(normaliseAtts){ temp2=nAcf.transform(temp2); } preds[2]=all[2].distributionForInstance(temp2.instance(0)); } if(all[3]!=null){ Instance t= pca.convertInstance(ins); preds[3]=all[3].distributionForInstance(t); } predictions.add(preds); } else{ preds=predictions.get(testPos); testPos++; } //Weight each double[] dist=new double[ins.numClasses()]; for(int i=0;i<nosTransforms;i++){ if(transformWeights[i]>0){ //Equivalent to all[i]!=null for(int j=0;j<dist.length;j++) dist[j]+=transformWeights[i]*preds[i][j]; } } return dist; } //This ALWAYS recalculates the CV accuracy public void findCVWeights() throws Exception { cvWeights=new double[nosTransforms]; int folds=numInstances; if(folds>THRESHOLD1){ folds=10; } System.out.print("\n Finding CV Accuracy: "); for(int i=0;i<nosTransforms;i++){ Evaluation evaluation = new Evaluation(train.get(i)); if(i==0) evaluation.crossValidateModel(AbstractClassifier.makeCopy(baseTime), train.get(i), folds, new Random()); else evaluation.crossValidateModel(AbstractClassifier.makeCopy(base), train.get(i), folds, new Random()); cvWeights[i]=1-evaluation.errorRate(); System.out.print(","+cvWeights[i]); } System.out.print("\n"); } public String getWeights(){ String str=""; for(int i=0;i<transformWeights.length;i++) str+=transformWeights[i]+","; return str; } public String getCV(){ String str=""; for(int i=0;i<cvWeights.length;i++) str+=cvWeights[i]+","; return str; } @Override public String getRevision() { // TODO Auto-generated method stub return null; } public static void main(String[] args){ //Load up Beefand test only on that } }
9,092
27.23913
132
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/stackers/SMLR.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles.stackers; import machine_learning.classifiers.ensembles.voting.stacking.StackingOnDists; import machine_learning.classifiers.ensembles.weightings.EqualWeighting; import machine_learning.classifiers.ensembles.CAWPE; import machine_learning.classifiers.MultiLinearRegression; /** * Stacking with multi-response linear regression (MLR), Ting and Witten (1999) * * @author James Large (james.large@uea.ac.uk) */ public class SMLR extends CAWPE { public SMLR() { super(); //sets default classifiers etc //overwriting relevant parts ensembleName = "SMLR"; weightingScheme = new EqualWeighting(); votingScheme = new StackingOnDists(new MultiLinearRegression()); } }
1,541
38.538462
80
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/stackers/SMLRE.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles.stackers; import machine_learning.classifiers.ensembles.voting.stacking.StackingOnExtendedSetOfFeatures; import machine_learning.classifiers.ensembles.weightings.EqualWeighting; import machine_learning.classifiers.ensembles.CAWPE; import machine_learning.classifiers.MultiLinearRegression; /** * Stacking with MLR and an extended set of meta-level attributes, Dzeroski and Zenko (2004) * * @author James Large (james.large@uea.ac.uk) */ public class SMLRE extends CAWPE{ public SMLRE() { super(); //sets default classifiers etc //overwriting relevant parts ensembleName = "SMLRE"; weightingScheme = new EqualWeighting(); votingScheme = new StackingOnExtendedSetOfFeatures(new MultiLinearRegression()); } }
1,586
38.675
94
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/stackers/SMM5.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles.stackers; import machine_learning.classifiers.ensembles.voting.stacking.StackingOnDists; import machine_learning.classifiers.ensembles.weightings.EqualWeighting; import machine_learning.classifiers.ensembles.CAWPE; import machine_learning.classifiers.MultiResponseModelTrees; /** * Stacking with multi-response model trees. M5 is used to induce the * model trees at the meta level. Dzeroski and Zenko (2004) * * @author James Large (james.large@uea.ac.uk) */ public class SMM5 extends CAWPE { public SMM5() { super(); //sets default classifiers etc //overwriting relevant parts ensembleName = "SMM5"; weightingScheme = new EqualWeighting(); votingScheme = new StackingOnDists(new MultiResponseModelTrees()); } }
1,592
37.853659
78
java
tsml-java
tsml-java-master/src/main/java/machine_learning/classifiers/ensembles/voting/AverageVoteByConfidence.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package machine_learning.classifiers.ensembles.voting; import machine_learning.classifiers.ensembles.AbstractEnsemble.EnsembleModule; import static utilities.GenericTools.indexOfMax; import weka.core.Instance; /** * Each class' probability is defined as the average of each classifier that predicts this class' weighted * confidence that the instance is of this class * * @author James Large */ public class AverageVoteByConfidence extends ModuleVotingScheme { public AverageVoteByConfidence() { } public AverageVoteByConfidence(int numClasses) { this.numClasses = numClasses; } @Override public void trainVotingScheme(EnsembleModule[] modules, int numClasses) { this.numClasses = numClasses; } @Override public double[] distributionForTrainInstance(EnsembleModule[] modules, int trainInstanceIndex) { double[] preds = new double[numClasses]; int[] numPredsForClass = new int[numClasses]; int pred; for(int m = 0; m < modules.length; m++){ pred = (int) modules[m].trainResults.getPredClassValue(trainInstanceIndex); ++numPredsForClass[pred]; double[] p=modules[m].trainResults.getProbabilityDistribution(trainInstanceIndex); preds[pred] += modules[m].priorWeight*modules[m].posteriorWeights[pred]*p[pred]; } for (int c = 0; c < numClasses; c++) if (numPredsForClass[c] != 0) preds[c]/=numPredsForClass[c]; return normalise(preds); } @Override public double[] distributionForTestInstance(EnsembleModule[] modules, int testInstanceIndex) { double[] preds = new double[numClasses]; int[] numPredsForClass = new int[numClasses]; int pred; for(int m = 0; m < modules.length; m++){ pred = (int) modules[m].testResults.getPredClassValue(testInstanceIndex); ++numPredsForClass[pred]; double[] p=modules[m].testResults.getProbabilityDistribution(testInstanceIndex); preds[pred] += modules[m].priorWeight * modules[m].posteriorWeights[pred] * p[pred]; } for (int c = 0; c < numClasses; c++) if (numPredsForClass[c] != 0) preds[c]/=numPredsForClass[c]; return normalise(preds); } @Override public double[] distributionForInstance(EnsembleModule[] modules, Instance testInstance) throws Exception { double[] preds = new double[numClasses]; int[] numPredsForClass = new int[numClasses]; double[] dist; int pred; for(int m = 0; m < modules.length; m++){ dist = distributionForNewInstance(modules[m], testInstance); pred = (int)indexOfMax(dist); ++numPredsForClass[pred]; preds[pred] += modules[m].priorWeight * modules[m].posteriorWeights[pred] * dist[pred]; } for (int c = 0; c < numClasses; c++) if (numPredsForClass[c] != 0) preds[c]/=numPredsForClass[c]; return normalise(preds); } }
4,050
35.495495
111
java