repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/items/LazyAssessNNEarlyAbandon.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Class for LazyAssessNN distance introduced in our SDM18 paper. * It implements a "lazy" UCR Suites for our KDD12 competitor * It is used in CascadeLB.java replacing the original KDD12 code * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class LazyAssessNNEarlyAbandon implements Comparable<LazyAssessNNEarlyAbandon> { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Internal types // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public enum RefineReturnType { Pruned_with_LB, Pruned_with_DTW, New_best } public enum LBStatus { LB_Kim, Partial_LB_KeoghQR, Full_LB_KeoghQR, Partial_LB_KeoghRQ, Full_LB_KeoghRQ, Previous_Window_LB, Previous_Window_DTW, Full_DTW } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- protected final static int RIEN = -1; protected final static int DIAGONALE = 0; protected final static int GAUCHE = 1; protected final static int HAUT = 2; SequenceStatsCache cache; // Cache to store the information for the sequences SymbolicSequence query, reference; // Query and reference sequences public int indexQuery, indexReference; // Index for query and reference int indexStoppedLB, oldIndexStoppedLB; // Index where we stop LB int currentW; // Current warping window int minWindowValidityFullDTW; // Minimum window validity for DTW int nOperationsLBKim; // Number of operations for LB Kim double minDist,LBKeogh1,LBKeogh2,bestMinDist,EuclideanDist; // Distances LBStatus status; // Status of Lower Bound public static double[] ubPartials; // Partial Upper Bound for PrunedDTW // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public LazyAssessNNEarlyAbandon(SymbolicSequence query, int index, SymbolicSequence reference, int indexReference, SequenceStatsCache cache) { if (index < indexReference) { this.query = query; this.indexQuery = index; this.reference = reference; this.indexReference = indexReference; } else { this.query = reference; this.indexQuery = indexReference; this.reference = query; this.indexReference = index; } this.minDist = 0.0; this.cache = cache; tryLBKim(); this.bestMinDist= minDist; this.status = LBStatus.LB_Kim; } public LazyAssessNNEarlyAbandon(SequenceStatsCache cache){ this.cache = cache; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Method // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- /** * Initialise the distance between query and reference * Reset all parameters * Compute LB Kim * @param query * @param index * @param reference * @param indexReference */ public void set (SymbolicSequence query, int index, SymbolicSequence reference, int indexReference) { // --- OTHER RESET indexStoppedLB = oldIndexStoppedLB = 0; currentW = 0; minWindowValidityFullDTW = 0; nOperationsLBKim = 0; LBKeogh1 = LBKeogh2 = 0; // --- From constructor if (index < indexReference) { this.query = query; this.indexQuery = index; this.reference = reference; this.indexReference = indexReference; } else { this.query = reference; this.indexQuery = indexReference; this.reference = query; this.indexReference = index; } this.minDist = 0.0; tryLBKim(); this.bestMinDist = minDist; this.status = LBStatus.LB_Kim; } /** * Initialise Upper Bound array for PrunedDTW */ public void setUBPartial() { ubPartials = new double[query.getNbTuples()+1]; } /** * Set the best minimum distance * @param bestMinDist */ public void setBestMinDist(double bestMinDist) { this.bestMinDist = bestMinDist; } /** * Set current warping window * @param currentW */ public void setCurrentW(int currentW) { if (this.currentW != currentW) { this.currentW = currentW; if (status == LBStatus.Full_DTW){ if(this.currentW >= minWindowValidityFullDTW) { this.status = LBStatus.Full_DTW; }else{ this.status = LBStatus.Previous_Window_DTW; } } else { this.status = LBStatus.Previous_Window_LB; this.oldIndexStoppedLB = indexStoppedLB; } } } /** * Compute Euclidean Distance as Upper Bound for PrunedDTW * @param scoreToBeat * @return */ public RefineReturnType tryEuclidean(double scoreToBeat) { if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } if(EuclideanDist >= scoreToBeat) { return RefineReturnType.Pruned_with_DTW; } ubPartials[query.getNbTuples()] = 0; for (int i = query.getNbTuples()-1; i >= 0; i--) { ubPartials[i] = ubPartials[i+1] + query.getItem(i).squaredDistance(reference.getItem(i)); } EuclideanDist = ubPartials[0]; return RefineReturnType.New_best; } /** * Run LB Kim using data from cache */ protected void tryLBKim() { double diffFirsts = query.sequence[0].squaredDistance(reference.sequence[0]); double diffLasts = query.sequence[query.getNbTuples() - 1].squaredDistance(reference.sequence[reference.getNbTuples() - 1]); minDist = diffFirsts + diffLasts; nOperationsLBKim = 2; if(!cache.isMinFirst(indexQuery)&&!cache.isMinFirst(indexReference) && !cache.isMinLast(indexQuery) && !cache.isMinLast(indexReference)){ double diffMin = cache.getMin(indexQuery)-cache.getMin(indexReference); minDist += diffMin*diffMin; nOperationsLBKim++; } if(!cache.isMaxFirst(indexQuery)&&!cache.isMaxFirst(indexReference)&& !cache.isMaxLast(indexQuery) && !cache.isMaxLast(indexReference)){ double diffMax = cache.getMax(indexQuery)-cache.getMax(indexReference); minDist += diffMax*diffMax; nOperationsLBKim++; } status = LBStatus.LB_Kim; } /** * Run Full LB Keogh(Q,R) with EA using data from cache */ protected void tryContinueLBKeoghQR(double scoreToBeat) { int length = query.sequence.length; double[] LEQ = cache.getLE(indexQuery, currentW); double[] UEQ = cache.getUE(indexQuery, currentW); while (indexStoppedLB < length && minDist < scoreToBeat) { int index = cache.getIndexNthHighestVal(indexReference, indexStoppedLB); double c = ((MonoDoubleItemSet) reference.sequence[index]).value; if (c < LEQ[index]) { double diff = LEQ[index] - c; minDist += diff * diff; } else if (UEQ[index] < c) { double diff = UEQ[index] - c; minDist += diff * diff; } indexStoppedLB++; } } /** * Run Full LB Keogh(R,Q) with EA using data from cache */ protected void tryContinueLBKeoghRQ(double scoreToBeat) { int length = reference.sequence.length; double[] LER = cache.getLE(indexReference, currentW); double[] UER = cache.getUE(indexReference, currentW); while (indexStoppedLB < length && minDist < scoreToBeat) { int index = cache.getIndexNthHighestVal(indexQuery, indexStoppedLB); double c = ((MonoDoubleItemSet) query.sequence[index]).value; if (c < LER[index]) { double diff = LER[index] - c; minDist += diff * diff; } else if (UER[index] < c) { double diff = UER[index] - c; minDist += diff * diff; } indexStoppedLB++; } } /** * The main function for LazyUCR. * Start with LBKim,LBKeogh(Q,R),LBKeogh(R,Q),DTW * @param scoreToBeat * @param w * @return */ public RefineReturnType tryToBeat(double scoreToBeat, int w) { setCurrentW(w); switch (status) { case Previous_Window_LB: case Previous_Window_DTW: case LB_Kim: if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } // if LB_Kim_FL done, then start LB_Keogh(Q,R) indexStoppedLB = 0; minDist = 0; case Partial_LB_KeoghQR: // if had started LB_Keogh, then just starting from // previous index if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } tryContinueLBKeoghQR(scoreToBeat); if(minDist>bestMinDist){ bestMinDist = minDist; } if (bestMinDist >= scoreToBeat) { // stopped in the middle so must be pruning if (indexStoppedLB < query.getNbTuples()) { status = LBStatus.Partial_LB_KeoghQR; } else { LBKeogh1 = minDist; status = LBStatus.Full_LB_KeoghQR; } return RefineReturnType.Pruned_with_LB; }else{ status = LBStatus.Full_LB_KeoghQR; } case Full_LB_KeoghQR: // if LB_Keogh(Q,R) has been done, then we do the second one indexStoppedLB = 0; minDist = 0; case Partial_LB_KeoghRQ: // if had started LB_Keogh, then just starting from // previous index if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } tryContinueLBKeoghRQ(scoreToBeat); if(minDist>bestMinDist){ bestMinDist = minDist; } if (bestMinDist >= scoreToBeat) { if (indexStoppedLB < reference.getNbTuples()) { status = LBStatus.Partial_LB_KeoghRQ; } else { LBKeogh2 = minDist; status = LBStatus.Full_LB_KeoghRQ; } return RefineReturnType.Pruned_with_LB; }else{ status = LBStatus.Full_LB_KeoghRQ; } case Full_LB_KeoghRQ: // if had finished LB_Keogh(R,Q), then DTW if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } double res = query.DTW(reference, currentW); minDist = res * res; if(minDist>bestMinDist){ bestMinDist = minDist; } status = LBStatus.Full_DTW; case Full_DTW: if (bestMinDist >= scoreToBeat) { return RefineReturnType.Pruned_with_DTW; } else { return RefineReturnType.New_best; } default: throw new RuntimeException("Case not managed"); } } /** * The main function for LazyUCR with PrunedDTW. * Start with LBKim,LBKeogh(Q,R),LBKeogh(R,Q),PrunedDTW * @param scoreToBeat * @param w * @return */ public RefineReturnType tryToBeatPrunedDTW(double scoreToBeat, int w) { setCurrentW(w); switch (status) { case Previous_Window_LB: case Previous_Window_DTW: case LB_Kim: if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } // if LB_Kim_FL done, then start LB_Keogh(Q,R) indexStoppedLB = 0; minDist = 0; case Partial_LB_KeoghQR: // if had started LB_Keogh, then just starting from // previous index if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } tryContinueLBKeoghQR(scoreToBeat); if(minDist>bestMinDist){ bestMinDist = minDist; } if (bestMinDist >= scoreToBeat) { // stopped in the middle so must be pruning if (indexStoppedLB < query.getNbTuples()) { status = LBStatus.Partial_LB_KeoghQR; } else { LBKeogh1 = minDist; status = LBStatus.Full_LB_KeoghQR; } return RefineReturnType.Pruned_with_LB; }else{ status = LBStatus.Full_LB_KeoghQR; } case Full_LB_KeoghQR: // if LB_Keogh(Q,R) has been done, then we do the second one indexStoppedLB = 0; minDist = 0; case Partial_LB_KeoghRQ: // if had started LB_Keogh, then just starting from // previous index if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } tryContinueLBKeoghRQ(scoreToBeat); if(minDist>bestMinDist){ bestMinDist = minDist; } if (bestMinDist >= scoreToBeat) { if (indexStoppedLB < reference.getNbTuples()) { status = LBStatus.Partial_LB_KeoghRQ; } else { LBKeogh2 = minDist; status = LBStatus.Full_LB_KeoghRQ; } return RefineReturnType.Pruned_with_LB; }else{ status = LBStatus.Full_LB_KeoghRQ; } case Full_LB_KeoghRQ: // if had finished LB_Keogh(R,Q), then DTW if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } double res = query.PrunedDTW(reference, currentW); minDist = res * res; if(minDist>bestMinDist){ bestMinDist = minDist; } status = LBStatus.Full_DTW; case Full_DTW: if (bestMinDist >= scoreToBeat) { return RefineReturnType.Pruned_with_DTW; } else { return RefineReturnType.New_best; } default: throw new RuntimeException("Case not managed"); } } /** * The main function for LazyUCR with PrunedDTW with an Upper Bound * Start with LBKim,LBKeogh(Q,R),LBKeogh(R,Q),PrunedDTW * @param scoreToBeat * @param w * @return */ public RefineReturnType tryToBeatPrunedDTW(double scoreToBeat, int w, double UB) { setCurrentW(w); switch (status) { case Previous_Window_LB: case Previous_Window_DTW: case LB_Kim: if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } // if LB_Kim_FL done, then start LB_Keogh(Q,R) indexStoppedLB = 0; minDist = 0; case Partial_LB_KeoghQR: // if had started LB_Keogh, then just starting from // previous index if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } tryContinueLBKeoghQR(scoreToBeat); if(minDist>bestMinDist){ bestMinDist = minDist; } if (bestMinDist >= scoreToBeat) { // stopped in the middle so must be pruning if (indexStoppedLB < query.getNbTuples()) { status = LBStatus.Partial_LB_KeoghQR; } else { LBKeogh1 = minDist; status = LBStatus.Full_LB_KeoghQR; } return RefineReturnType.Pruned_with_LB; }else{ status = LBStatus.Full_LB_KeoghQR; } case Full_LB_KeoghQR: // if LB_Keogh(Q,R) has been done, then we do the second one indexStoppedLB = 0; minDist = 0; case Partial_LB_KeoghRQ: // if had started LB_Keogh, then just starting from // previous index if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } tryContinueLBKeoghRQ(scoreToBeat); if(minDist>bestMinDist){ bestMinDist = minDist; } if (bestMinDist >= scoreToBeat) { if (indexStoppedLB < reference.getNbTuples()) { status = LBStatus.Partial_LB_KeoghRQ; } else { LBKeogh2 = minDist; status = LBStatus.Full_LB_KeoghRQ; } return RefineReturnType.Pruned_with_LB; }else{ status = LBStatus.Full_LB_KeoghRQ; } case Full_LB_KeoghRQ: // if had finished LB_Keogh(R,Q), then PrunedDTW if(bestMinDist>=scoreToBeat){ return RefineReturnType.Pruned_with_LB; } double res = query.PrunedDTW(reference, currentW, UB); minDist = res * res; if(minDist>bestMinDist){ bestMinDist = minDist; } status = LBStatus.Full_DTW; case Full_DTW: if (bestMinDist >= scoreToBeat) { return RefineReturnType.Pruned_with_DTW; } else { return RefineReturnType.New_best; } default: throw new RuntimeException("Case not managed"); } } @Override public String toString() { return "" + indexQuery+ " - "+indexReference+" - "+bestMinDist; } public int getOtherIndex(int index) { if (index == indexQuery) { return indexReference; } else { return indexQuery; } } public SymbolicSequence getSequenceForOtherIndex(int index) { if (index == indexQuery) { return reference; } else { return query; } } public double getDistance(int window) { // System.out.println(minDist+" - "+minWindowValidityFullDTW + " // - "+window+ " - "+status.name()); if (status == LBStatus.Full_DTW && minWindowValidityFullDTW <= window) { return minDist; } throw new RuntimeException("Shouldn't call getDistance if not sure there is a valid already-computed DTW distance"); } public int getMinWindowValidityForFullDistance() { if (status == LBStatus.Full_DTW) { return minWindowValidityFullDTW; } throw new RuntimeException("Shouldn't call getDistance if not sure there is a valid already-computed DTW distance"); } public double[] getUBPartial() { return ubPartials; } public double getEuclideanDistance() { return EuclideanDist; } @Override public int compareTo(LazyAssessNNEarlyAbandon o) { int res = this.compare(o); return res; } protected int compare(LazyAssessNNEarlyAbandon o) { double num1 = this.getDoubleValueForRanking(); double num2 = o.getDoubleValueForRanking(); return Double.compare(num1, num2); } protected double getDoubleValueForRanking() { double thisD = this.bestMinDist; switch(status){ case Full_DTW: case Full_LB_KeoghQR: case Full_LB_KeoghRQ: return thisD/query.getNbTuples(); case LB_Kim: return thisD/nOperationsLBKim; case Partial_LB_KeoghQR: case Partial_LB_KeoghRQ: return thisD/indexStoppedLB; case Previous_Window_DTW: return 0.8*thisD/query.getNbTuples(); // DTW(w+1) should be tighter case Previous_Window_LB: if(indexStoppedLB==0){ //lb kim return thisD/nOperationsLBKim; }else{ //lbkeogh return thisD/oldIndexStoppedLB; } default: throw new RuntimeException("shouldn't come here"); } } @Override public boolean equals(Object o) { LazyAssessNNEarlyAbandon d = (LazyAssessNNEarlyAbandon) o; return (this.indexQuery == d.indexQuery && this.indexReference == d.indexReference); } public LBStatus getStatus() { return status; } public void setFullDistStatus(){ this.status = LBStatus.Full_DTW; } public double getBestLB(){ return bestMinDist; } }
18,756
29.0112
143
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/items/MonoDoubleItemSet.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items; import static java.lang.Math.abs; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class MonoDoubleItemSet extends Itemset implements java.io.Serializable { private static final long serialVersionUID = 5103879297281957601L; public double value; public MonoDoubleItemSet(double value){ this.value = value; } @Override public Itemset clone() { return new MonoDoubleItemSet(value); } @Override public double distance(Itemset o) { MonoDoubleItemSet o1 = (MonoDoubleItemSet)o; return abs(o1.value-value); } @Override public Itemset mean(Itemset[] tab) { if (tab.length < 1) { throw new RuntimeException("Empty tab"); } double sum = 0.0; for (Itemset itemset : tab) { MonoDoubleItemSet item = (MonoDoubleItemSet)itemset; sum += item.value; } return new MonoDoubleItemSet(sum / tab.length); } @Override public String toString() { return new Double(value).toString(); } public double getValue(){ return value; } }
2,137
29.985507
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/items/MonoItemSet.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items; import java.util.HashMap; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class MonoItemSet extends Itemset { public String letter; public MonoItemSet(String letter){ this.letter = letter; } @Override public Itemset clone() { return new MonoItemSet(new String(letter)); } @Override public double distance(Itemset o) { MonoItemSet mono = (MonoItemSet)o; return (letter.equals(mono.letter))?0.0:1.0; } @Override /** * vote */ public Itemset mean(Itemset[] tab) { HashMap<String, Integer> map = new HashMap<String, Integer>(); int maxCount = 0; String maxKey=null; for(Itemset itemset:tab){ String key = ((MonoItemSet)itemset).letter; Integer count = map.get(key); Integer newCount ; if(count == null){ newCount = 1; }else{ newCount = count+1; } if(newCount>maxCount){ maxCount = newCount; maxKey = key; } map.put(key, newCount); } return new MonoItemSet(maxKey); } @Override public String toString() { return letter; } }
2,180
27.697368
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/items/SequenceStatsCache.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items; import java.util.Arrays; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * <p> * Cache for storing the information on the time series dataset * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb */ public class SequenceStatsCache { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- double[][] LEs, UEs; double[] mins, maxs; boolean[] isMinFirst, isMinLast, isMaxFirst, isMaxLast; int[] lastWindowComputed; int currentWindow; SymbolicSequence[] train; IndexedDouble[][] indicesSortedByAbsoluteValue; // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- /** * Initialisation and find min,max for every time series in the dataset * * @param train * @param startingWindow */ public SequenceStatsCache(SymbolicSequence[] train, int startingWindow) { this.train = train; int nSequences = train.length; int length = train[0].getNbTuples(); this.LEs = new double[nSequences][length]; this.UEs = new double[nSequences][length]; this.lastWindowComputed = new int[nSequences]; Arrays.fill(this.lastWindowComputed, -1); this.currentWindow = startingWindow; this.mins = new double[nSequences]; this.maxs = new double[nSequences]; this.isMinFirst = new boolean[nSequences]; this.isMinLast = new boolean[nSequences]; this.isMaxFirst = new boolean[nSequences]; this.isMaxLast = new boolean[nSequences]; this.indicesSortedByAbsoluteValue = new IndexedDouble[nSequences][length]; for (int i = 0; i < train.length; i++) { double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; int indexMin = -1, indexMax = -1; for (int j = 0; j < train[i].getNbTuples(); j++) { double elt = ((MonoDoubleItemSet) train[i].sequence[j]).value; if (elt > max) { max = elt; indexMax = j; } if (elt < min) { min = elt; indexMin = j; } indicesSortedByAbsoluteValue[i][j] = new IndexedDouble(j, Math.abs(elt)); } mins[i] = min; maxs[i] = max; isMinFirst[i] = (indexMin == 0); isMinLast[i] = (indexMin == (train[i].getNbTuples() - 1)); isMaxFirst[i] = (indexMax == 0); isMaxLast[i] = (indexMax == (train[i].getNbTuples() - 1)); Arrays.sort(indicesSortedByAbsoluteValue[i], (v1, v2) -> -Double.compare(v1.value, v2.value)); } } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- /** * Get lower envelope of the ith time series, * Compute it if not computed * * @param i * @param w * @return */ public double[] getLE(int i, int w) { if (lastWindowComputed[i] != w) { computeLEandUE(i, w); } return LEs[i]; } /** * Get upper envelope of the ith time series, * Compute it if not computed * * @param i * @param w * @return */ public double[] getUE(int i, int w) { if (lastWindowComputed[i] != w) { computeLEandUE(i, w); } return UEs[i]; } /** * Compute envelope for the ith time series with window w * * @param i * @param w */ protected void computeLEandUE(int i, int w) { train[i].LB_KeoghFillUL(w, UEs[i], LEs[i]); this.lastWindowComputed[i] = w; } public boolean isMinFirst(int i) { return isMinFirst[i]; } public boolean isMaxFirst(int i) { return isMaxFirst[i]; } public boolean isMinLast(int i) { return isMinLast[i]; } public boolean isMaxLast(int i) { return isMaxLast[i]; } public double getMin(int i) { return mins[i]; } public double getMax(int i) { return maxs[i]; } class IndexedDouble { double value; int index; public IndexedDouble(int index, double value) { this.value = value; this.index = index; } } public int getIndexNthHighestVal(int i, int n) { return indicesSortedByAbsoluteValue[i][n].index; } }
6,131
33.449438
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/sequences/IndexScored.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class IndexScored implements Comparable<IndexScored> { int index; double score; public IndexScored(int index,double score){ this.index = index; this.score = score; } @Override public int compareTo(IndexScored o) { return Double.compare(score, o.score); } }
1,493
37.307692
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/sequences/SymbolicSequence.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences; import static java.lang.Math.sqrt; import java.util.ArrayList; import java.util.Arrays; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.DTWResult; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.Itemset; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoDoubleItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.tools.Tools; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Class for time series * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class SymbolicSequence implements java.io.Serializable { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Internal types // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = -8340081464719919763L; public static int w = Integer.MAX_VALUE; protected final static int NB_ITERATIONS = 15; protected final static int RIEN = -1; protected final static int DIAGONALE = 0; protected final static int GAUCHE = 1; protected final static int HAUT = 2; public Itemset[] sequence; public static long nDTWExt; private final static int MAX_SEQ_LENGTH = 8000; public static double[][] matriceW = new double[SymbolicSequence.MAX_SEQ_LENGTH][SymbolicSequence.MAX_SEQ_LENGTH]; public static int[][] matriceChoix = new int[SymbolicSequence.MAX_SEQ_LENGTH][SymbolicSequence.MAX_SEQ_LENGTH]; protected static int[][] optimalPathLength = new int[SymbolicSequence.MAX_SEQ_LENGTH][SymbolicSequence.MAX_SEQ_LENGTH]; protected static int[][] minWarpingWindow = new int[SymbolicSequence.MAX_SEQ_LENGTH][SymbolicSequence.MAX_SEQ_LENGTH]; public static double[] ub_partials = new double[SymbolicSequence.MAX_SEQ_LENGTH]; public SymbolicSequence(final Itemset[] sequence) { if (sequence == null || sequence.length == 0) { throw new RuntimeException("sequence vide"); } this.sequence = sequence; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public SymbolicSequence(SymbolicSequence o) { if (o.sequence == null || o.sequence.length == 0) { throw new RuntimeException("sequence vide"); } this.sequence = o.sequence; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- @Override public Object clone() { final Itemset[] newSequence = Arrays.copyOf(sequence, sequence.length); for (int i = 0; i < newSequence.length; i++) { newSequence[i] = sequence[i].clone(); } return new SymbolicSequence(newSequence); } /** * Get data point from this sequence at index n * @param n * @return */ public Itemset getItem(final int n) { return sequence[n]; } /** * Return the length of this sequence * @return */ public final int getNbTuples() { return this.sequence.length; } /** * Euclidean distance to a * @param a * @return */ public final double distanceEuc(SymbolicSequence a) { final int length = this.getNbTuples(); double res = 0; for (int i = 0; i < length; i++) { res += this.sequence[i].squaredDistance(a.sequence[i]); } return sqrt(res); } /** * Squared Euclidean distance with early abandon * @param a * @param max * @return */ public final double squaredEucEarlyAbandon(SymbolicSequence a,double max) { Itemset[] series1 = this.sequence; Itemset[] series2 = a.sequence; int minLength = Math.min(series1.length, series2.length); double distance = 0; for (int i = 0; i < minLength; i++) { distance += series1[i].squaredDistance(series2[i]); if(distance>= max){ return Double.MAX_VALUE; } } return distance; } /** * Euclidean distance with normalised length * @param a * @return */ public final double distanceEucNormalized(SymbolicSequence a) { Itemset[] series1 = this.sequence; Itemset[] series2 = a.sequence; int minLength = Math.min(series1.length, series2.length); double distance = 0; for (int i = 0; i < minLength; i++) { distance += series1[i].squaredDistance(series2[i]); } return sqrt(distance)/minLength; } /** * Squared Euclidean distance with early abandon and normalised length * @param a * @param max * @return */ public final double squaredEucEarlyAbandonNormalized(SymbolicSequence a,double max) { Itemset[] series1 = this.sequence; Itemset[] series2 = a.sequence; int minLength = Math.min(series1.length, series2.length); double distance = 0; for (int i = 0; i < minLength; i++) { distance += series1[i].squaredDistance(series2[i]); if(distance/(i+1)>= max){ return Double.MAX_VALUE; } } return distance/minLength; } /** * Compute LB Keogh to a at window r * @param a * @param r * @return */ public final double LB_Keogh(SymbolicSequence a, int r) { final int length = Math.min(this.getNbTuples(), a.getNbTuples()); double[] U = new double[length]; double[] L = new double[length]; for (int i = 0; i < length; i++) { double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; int startR = Math.max(0, i - r); int stopR = Math.min(length - 1, i + r); for (int j = startR; j <= stopR; j++) { double value = ((MonoDoubleItemSet) this.sequence[j]).value; min = Math.min(min, value); max = Math.max(max, value); } L[i] = min; U[i] = max; } double res = 0; for (int i = 0; i < length; i++) { double c = ((MonoDoubleItemSet) a.sequence[i]).value; if (c < L[i]) { double diff = L[i] - c; res += diff * diff; } else if (U[i] < c) { double diff = U[i] - c; res += diff * diff; } } return sqrt(res); } /** * Compute LB Keogh with a given U and L envelope * @param a * @param r * @param U * @param L * @return */ public final double LB_Keogh(SymbolicSequence a, int r,double[]U,double[]L) { LB_KeoghFillUL(r,U,L); return LB_KeoghPreFilled(a, U, L); } /** * Fill the Upper and Lower Envelope for this sequence * @param r * @param U * @param L */ public final void LB_KeoghFillUL( int r,double[]U,double[]L) { final int length = this.getNbTuples(); for (int i = 0; i < length; i++) { double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; int startR = Math.max(0, i - r); int stopR = Math.min(length - 1, i + r); for (int j = startR; j <= stopR; j++) { double value = ((MonoDoubleItemSet) this.sequence[j]).value; min = Math.min(min, value); max = Math.max(max, value); } L[i] = min; U[i] = max; } } /** * Compute LB Keogh with a prefilled U and L Envelope * @param a * @param U * @param L * @return */ public static final double LB_KeoghPreFilled(SymbolicSequence a, double[]U,double[]L) { final int length = Math.min(U.length, a.getNbTuples()); double res = 0; for (int i = 0; i < length; i++) { double c = ((MonoDoubleItemSet) a.sequence[i]).value; if (c < L[i]) { double diff = L[i] - c; res += diff * diff; } else if (U[i] < c) { double diff = U[i] - c; res += diff * diff; } } return sqrt(res); } /** * Compute Full DTW distance to sequence a without window validity * @param a * @return */ public synchronized double distance(SymbolicSequence a) { return this.distance(a, matriceW); } /** * Compute Full DTW distance with cost matrix without window validity * @param a * @param matriceW * @return */ public double distance(SymbolicSequence a, double[][] matriceW) { SymbolicSequence S1 = this; SymbolicSequence S2 = a; final int tailleS = S1.getNbTuples(); final int tailleT = S2.getNbTuples(); int i, j; matriceW[0][0] = S1.sequence[0].squaredDistance(S2.sequence[0]); for (i = 1; i < tailleS; i++) { matriceW[i][0] = matriceW[i - 1][0] + S1.sequence[i].squaredDistance(S2.sequence[0]); } for (j = 1; j < tailleT; j++) { matriceW[0][j] = matriceW[0][j - 1] + S1.sequence[0].squaredDistance(S2.sequence[j]); } for (i = 1; i < tailleS; i++) { for (j = 1; j < tailleT; j++) { matriceW[i][j] = Tools.Min3(matriceW[i - 1][j - 1], matriceW[i][j - 1], matriceW[i - 1][j]) + S1.sequence[i].squaredDistance(S2.sequence[j]); } } return sqrt(matriceW[tailleS - 1][tailleT - 1]); } /** * Compute Euclidean Distance * @param a * @return */ public double ED(SymbolicSequence a) { double sum = 0; for (int i = 0; i < this.getNbTuples(); i++) { sum += this.sequence[i].squaredDistance(a.sequence[i]); } return sqrt(sum); } /** * Compute DTW distance with warping window without window validity * @param a * @param w * @return */ public synchronized double DTW(SymbolicSequence a,int w) { return this.DTW(a, w, matriceW); } /** * Compute DTW distance with warping window and cost matrix without window validity * @param a * @param w * @param warpingMatrix * @return */ public double DTW(SymbolicSequence a, int w,double[][] warpingMatrix) { final int length1 = this.getNbTuples(); final int length2 = a.getNbTuples(); int i, j; warpingMatrix[0][0] = this.sequence[0].squaredDistance(a.sequence[0]); for (i = 1; i < Math.min(length1, 1 + w); i++) { warpingMatrix[i][0] = warpingMatrix[i - 1][0] + this.sequence[i].squaredDistance(a.sequence[0]); } for (j = 1; j < Math.min(length2, 1 + w); j++) { warpingMatrix[0][j] = warpingMatrix[0][j - 1] + this.sequence[0].squaredDistance(a.sequence[j]); } if (j < length2) { warpingMatrix[0][j] = Double.POSITIVE_INFINITY; } for (i = 1; i < length1; i++) { int jStart = Math.max(1, i - w); int jStop = Math.min(length2, i + w + 1); int indexInftyLeft = i-w-1; if(indexInftyLeft>=0)warpingMatrix[i][indexInftyLeft] = Double.POSITIVE_INFINITY; for (j = jStart; j < jStop; j++) { warpingMatrix[i][j] = Tools.Min3(warpingMatrix[i - 1][j - 1], warpingMatrix[i][j - 1], warpingMatrix[i - 1][j]) + this.sequence[i].squaredDistance(a.sequence[j]); } if (jStop < length2) { warpingMatrix[i][jStop] = Double.POSITIVE_INFINITY; } } return sqrt(warpingMatrix[length1 - 1][length2 - 1]); } /** * Compute PrunedDTW with warping window using partial Upper bound * that is computed on the go without window validity * @param T * @param w * @return */ public double PrunedDTW(SymbolicSequence T, int w) { nDTWExt++; final int tailleS = this.getNbTuples(); final int tailleT = T.getNbTuples(); int i, j, indiceRes; double res = 0.0; int sc = 1, ec = 1, ec_next = 0; boolean found_lower; double UB = 0; ub_partials[tailleS] = 0; for (i = tailleS-1; i >= 0; i--) { ub_partials[i] = ub_partials[i+1] + this.sequence[i].squaredDistance(T.sequence[i]); matriceW[i][0] = Double.POSITIVE_INFINITY; matriceW[0][i] = Double.POSITIVE_INFINITY; } matriceW[tailleS][0] = Double.POSITIVE_INFINITY; matriceW[0][tailleS] = Double.POSITIVE_INFINITY; matriceW[0][0] = 0; UB = ub_partials[0]; for (i = 1; i <= tailleS; i++) { int jStart = Math.max(sc, i-w); int jStop = Math.min(i+w, tailleT); UB = ub_partials[i-1] + matriceW[i-1][i-1]; matriceW[i][jStart-1] = Double.POSITIVE_INFINITY; found_lower = false; for (j = jStart; j <= jStop; j++) { if (j > ec) { res = matriceW[i][j-1]; } else { indiceRes = Tools.ArgMin3(matriceW[i - 1][j - 1], matriceW[i][j - 1], matriceW[i - 1][j]); switch (indiceRes) { case DIAGONALE: res = matriceW[i - 1][j - 1]; break; case GAUCHE: res = matriceW[i][j - 1]; break; case HAUT: res = matriceW[i - 1][j]; break; } } matriceW[i][j] = this.sequence[i-1].squaredDistance(T.sequence[j-1]) + res; if (matriceW[i][j] > UB) { if (!found_lower) { sc = j+1; } if (j > ec) { matriceW[i][j+1] = Double.POSITIVE_INFINITY; break; } } else { found_lower = true; ec_next = j; } if (jStop + 1 <= tailleT) { matriceW[i][jStop+1] = Double.POSITIVE_INFINITY; } } ec_next++; ec = ec_next; } return sqrt(matriceW[tailleS][tailleT]); } /** * Compute PrunedDTW with warping window using a given upper bound without window validity * @param T * @param w * @param UB * @return */ public double PrunedDTW(SymbolicSequence T, int w, double UB) { nDTWExt++; final int tailleS = this.getNbTuples(); final int tailleT = T.getNbTuples(); int i, j, indiceRes; double res = 0.0; int sc = 1, ec = 1, ec_next = 0; boolean found_lower; for (i = tailleS-1; i >= 0; i--) { matriceW[i][0] = Double.POSITIVE_INFINITY; matriceW[0][i] = Double.POSITIVE_INFINITY; } matriceW[tailleS][0] = Double.POSITIVE_INFINITY; matriceW[0][tailleS] = Double.POSITIVE_INFINITY; matriceW[0][0] = 0; for (i = 1; i <= tailleS; i++) { int jStart = Math.max(sc, i-w); int jStop = Math.min(i+w, tailleT); matriceW[i][jStart-1] = Double.POSITIVE_INFINITY; found_lower = false; for (j = jStart; j <= jStop; j++) { if (j > ec) { res = matriceW[i][j-1]; } else { indiceRes = Tools.ArgMin3(matriceW[i - 1][j - 1], matriceW[i][j - 1], matriceW[i - 1][j]); switch (indiceRes) { case DIAGONALE: res = matriceW[i - 1][j - 1]; break; case GAUCHE: res = matriceW[i][j - 1]; break; case HAUT: res = matriceW[i - 1][j]; break; } } matriceW[i][j] = this.sequence[i-1].squaredDistance(T.sequence[j-1]) + res; if (matriceW[i][j] > UB) { if (!found_lower) { sc = j+1; } if (j > ec) { matriceW[i][j+1] = Double.POSITIVE_INFINITY; break; } } else { found_lower = true; ec_next = j; } if (jStop + 1 <= tailleT) { matriceW[i][jStop+1] = Double.POSITIVE_INFINITY; } } ec_next++; ec = ec_next; } return sqrt(matriceW[tailleS][tailleT]); } /** * Compute DTW with warping window and window validity * @param T * @param w * @return */ public synchronized DTWResult DTWExtResults(SymbolicSequence T, int w) { nDTWExt++; final int tailleS = this.getNbTuples(); final int tailleT = T.getNbTuples(); int i, j, indiceRes; double res = 0.0; matriceW[0][0] = this.sequence[0].squaredDistance(T.sequence[0]); minWarpingWindow[0][0]=0; for (i = 1; i < Math.min(tailleS, 1 + w); i++) { matriceW[i][0] = matriceW[i - 1][0]+ this.sequence[i].squaredDistance(T.sequence[0]); minWarpingWindow[i][0]=i; } for (j = 1; j < Math.min(tailleT, 1 + w); j++) { matriceW[0][j] = matriceW[0][j - 1]+ T.sequence[j].squaredDistance(sequence[0]); minWarpingWindow[0][j] = j; } if (j < tailleT) { matriceW[0][j] = Double.POSITIVE_INFINITY; } for (i = 1; i < tailleS; i++) { int jStart = Math.max(1, i - w); int jStop = Math.min(tailleT, i + w + 1); int indexInftyLeft = i-w-1; if(indexInftyLeft>=0) matriceW[i][indexInftyLeft] = Double.POSITIVE_INFINITY; for (j = jStart; j < jStop; j++) { indiceRes = Tools.ArgMin3(matriceW[i - 1][j - 1], matriceW[i][j - 1], matriceW[i - 1][j]); int absIJ = Math.abs(i-j); switch (indiceRes) { case DIAGONALE: res = matriceW[i - 1][j - 1]; minWarpingWindow[i][j] = Math.max(absIJ, minWarpingWindow[i-1][j-1]); break; case GAUCHE: res = matriceW[i][j - 1]; minWarpingWindow[i][j] = Math.max(absIJ, minWarpingWindow[i][j-1]); break; case HAUT: res = matriceW[i - 1][j]; minWarpingWindow[i][j] = Math.max(absIJ, minWarpingWindow[i-1][j]); break; } matriceW[i][j] = res + this.sequence[i].squaredDistance(T.sequence[j]); } if (j < tailleT) { matriceW[i][j] = Double.POSITIVE_INFINITY; } } DTWResult resExt= new DTWResult(); resExt.distance = sqrt(matriceW[tailleS - 1][tailleT- 1]); resExt.r = minWarpingWindow[tailleS - 1][tailleT- 1]; return resExt; } /** * Compute PrunedDTW with warping window and window validity * @param T * @param w * @return */ public synchronized DTWResult PrunedDTWExtResults(SymbolicSequence T, int w) { nDTWExt++; final int tailleS = this.getNbTuples(); final int tailleT = T.getNbTuples(); int i, j, indiceRes; double res = 0.0; int sc = 1, ec = 1, ec_next = 0; boolean found_lower; double UB = 0; ub_partials[tailleS] = 0; for (i = tailleS-1; i >= 0; i--) { ub_partials[i] = ub_partials[i+1] + this.sequence[i].squaredDistance(T.sequence[i]); matriceW[i][0] = Double.POSITIVE_INFINITY; matriceW[0][i] = Double.POSITIVE_INFINITY; minWarpingWindow[0][i] = i; minWarpingWindow[i][0] = i; } matriceW[tailleS][0] = Double.POSITIVE_INFINITY; matriceW[0][tailleS] = Double.POSITIVE_INFINITY; matriceW[0][0] = 0; UB = ub_partials[0]; for (i = 1; i <= tailleS; i++) { int jStart = Math.max(sc, i-w); int jStop = Math.min(i+w, tailleT); UB = ub_partials[i-1] + matriceW[i-1][i-1]; matriceW[i][jStart-1] = Double.POSITIVE_INFINITY; found_lower = false; for (j = jStart; j <= jStop; j++) { int absIJ = Math.abs((i-1)-(j-1)); if (j > ec) { res = matriceW[i][j-1]; if (i == 1) minWarpingWindow[i][j] = i; else minWarpingWindow[i][j] = Math.max(absIJ, minWarpingWindow[i][j-1]); } else { indiceRes = Tools.ArgMin3(matriceW[i - 1][j - 1], matriceW[i][j - 1], matriceW[i - 1][j]); switch (indiceRes) { case DIAGONALE: res = matriceW[i - 1][j - 1]; minWarpingWindow[i][j] = Math.max(absIJ, minWarpingWindow[i-1][j-1]); break; case GAUCHE: res = matriceW[i][j - 1]; if (i == 1) minWarpingWindow[i][j] = i; else minWarpingWindow[i][j] = Math.max(absIJ, minWarpingWindow[i][j-1]); break; case HAUT: res = matriceW[i - 1][j]; if (j == 1) minWarpingWindow[i][j] = j; else minWarpingWindow[i][j] = Math.max(absIJ, minWarpingWindow[i-1][j]); break; } } matriceW[i][j] = this.sequence[i-1].squaredDistance(T.sequence[j-1]) + res; if (jStop + 1 <= tailleT) { matriceW[i][jStop+1] = Double.POSITIVE_INFINITY; } if (matriceW[i][j] > UB) { if (!found_lower) { sc = j+1; } if (j > ec) { matriceW[i][j+1] = Double.POSITIVE_INFINITY; break; } } else { found_lower = true; ec_next = j; } } ec_next++; ec = ec_next; } DTWResult resExt= new DTWResult(); resExt.distance = sqrt(matriceW[tailleS][tailleT]); resExt.r = minWarpingWindow[tailleS][tailleT]; return resExt; } public synchronized ArrayList<Integer>[] DTWAssociationFromS( final SymbolicSequence T) { @SuppressWarnings("unchecked") final ArrayList<Integer>[] association = new ArrayList[this.getNbTuples()]; for (int i = 0; i < association.length; i++) { association[i] = new ArrayList<Integer>(); } final int tailleS = this.getNbTuples(); final int tailleT = T.getNbTuples(); int nbTuplesAverageSeq, i, j, indiceRes; double res = 0.0; matriceW[0][0] = this.sequence[0].squaredDistance(T.sequence[0]); matriceChoix[0][0] = RIEN; optimalPathLength[0][0] = 0; for (i = 1; i < tailleS; i++) { matriceW[i][0] = matriceW[i - 1][0] + this.sequence[i].squaredDistance(T.sequence[0]); matriceChoix[i][0] = HAUT; optimalPathLength[i][0] = i; } for (j = 1; j < tailleT; j++) { matriceW[0][j] = matriceW[0][j - 1] + T.sequence[j].squaredDistance(sequence[0]); matriceChoix[0][j] = GAUCHE; optimalPathLength[0][j] = j; } for (i = 1; i < tailleS; i++) { for (j = 1; j < tailleT; j++) { indiceRes = Tools.ArgMin3(matriceW[i - 1][j - 1], matriceW[i][j - 1], matriceW[i - 1][j]); matriceChoix[i][j] = indiceRes; switch (indiceRes) { case DIAGONALE: res = matriceW[i - 1][j - 1]; optimalPathLength[i][j] = optimalPathLength[i - 1][j - 1] + 1; break; case GAUCHE: res = matriceW[i][j - 1]; optimalPathLength[i][j] = optimalPathLength[i][j - 1] + 1; break; case HAUT: res = matriceW[i - 1][j]; optimalPathLength[i][j] = optimalPathLength[i - 1][j] + 1; break; } matriceW[i][j] = res + this.sequence[i].squaredDistance(T.sequence[j]); } } nbTuplesAverageSeq = optimalPathLength[tailleS - 1][tailleT - 1] + 1; i = tailleS - 1; j = tailleT - 1; for (int t = nbTuplesAverageSeq - 1; t >= 0; t--) { association[i].add(j); switch (matriceChoix[i][j]) { case DIAGONALE: i = i - 1; j = j - 1; break; case GAUCHE: j = j - 1; break; case HAUT: i = i - 1; break; } } return association; } protected synchronized ArrayList<Itemset>[] computeAssociations( final SymbolicSequence... tabSequence) { @SuppressWarnings("unchecked") final ArrayList<Itemset>[] tupleAssociation = new ArrayList[this .getNbTuples()]; for (int i = 0; i < tupleAssociation.length; i++) { tupleAssociation[i] = new ArrayList<Itemset>(tabSequence.length); } int nbTuplesAverageSeq, i, j, indiceRes; double res = 0.0; final int tailleCenter = this.getNbTuples(); int tailleT; for (final SymbolicSequence S : tabSequence) { tailleT = S.getNbTuples(); SymbolicSequence.matriceW[0][0] = this.sequence[0] .squaredDistance(S.sequence[0]); SymbolicSequence.matriceChoix[0][0] = SymbolicSequence.RIEN; SymbolicSequence.optimalPathLength[0][0] = 0; for (i = 1; i < tailleCenter; i++) { SymbolicSequence.matriceW[i][0] = SymbolicSequence.matriceW[i - 1][0] + this.sequence[i].squaredDistance(S.sequence[0]); SymbolicSequence.matriceChoix[i][0] = SymbolicSequence.HAUT; SymbolicSequence.optimalPathLength[i][0] = i; } for (j = 1; j < tailleT; j++) { SymbolicSequence.matriceW[0][j] = SymbolicSequence.matriceW[0][j - 1] + S.sequence[j].squaredDistance(this.sequence[0]); SymbolicSequence.matriceChoix[0][j] = SymbolicSequence.GAUCHE; SymbolicSequence.optimalPathLength[0][j] = j; } for (i = 1; i < tailleCenter; i++) { for (j = 1; j < tailleT; j++) { indiceRes = Tools.ArgMin3( SymbolicSequence.matriceW[i - 1][j - 1], SymbolicSequence.matriceW[i][j - 1], SymbolicSequence.matriceW[i - 1][j]); SymbolicSequence.matriceChoix[i][j] = indiceRes; switch (indiceRes) { case DIAGONALE: res = SymbolicSequence.matriceW[i - 1][j - 1]; SymbolicSequence.optimalPathLength[i][j] = SymbolicSequence.optimalPathLength[i - 1][j - 1] + 1; break; case GAUCHE: res = SymbolicSequence.matriceW[i][j - 1]; SymbolicSequence.optimalPathLength[i][j] = SymbolicSequence.optimalPathLength[i][j - 1] + 1; break; case HAUT: res = SymbolicSequence.matriceW[i - 1][j]; SymbolicSequence.optimalPathLength[i][j] = SymbolicSequence.optimalPathLength[i - 1][j] + 1; break; } SymbolicSequence.matriceW[i][j] = res + this.sequence[i].squaredDistance(S.sequence[j]); } } nbTuplesAverageSeq = SymbolicSequence.optimalPathLength[tailleCenter - 1][tailleT - 1] + 1; i = tailleCenter - 1; j = tailleT - 1; for (int t = nbTuplesAverageSeq - 1; t >= 0; t--) { tupleAssociation[i].add(S.sequence[j]); switch (SymbolicSequence.matriceChoix[i][j]) { case DIAGONALE: i = i - 1; j = j - 1; break; case GAUCHE: j = j - 1; break; case HAUT: i = i - 1; break; } } } return tupleAssociation; } protected synchronized ArrayList<Itemset>[][] computeAssociationsBySequence( final SymbolicSequence... tabSequence) { @SuppressWarnings("unchecked") final ArrayList<Itemset>[][] tupleAssociation = new ArrayList[sequence.length][tabSequence.length]; for (int i = 0; i < tupleAssociation.length; i++) { for (int j = 0; j < tupleAssociation[i].length; j++) { tupleAssociation[i][j] = new ArrayList<Itemset>(); } } int nbTuplesAverageSeq, i, j, indiceRes; double res = 0.0; final int sequenceLength = this.sequence.length; int tailleT; for (int s = 0; s < tabSequence.length; s++) { final SymbolicSequence S = tabSequence[s]; tailleT = S.getNbTuples(); SymbolicSequence.matriceW[0][0] = this.sequence[0] .squaredDistance(S.sequence[0]); SymbolicSequence.matriceChoix[0][0] = SymbolicSequence.RIEN; SymbolicSequence.optimalPathLength[0][0] = 0; for (i = 1; i < sequenceLength; i++) { SymbolicSequence.matriceW[i][0] = SymbolicSequence.matriceW[i - 1][0] + this.sequence[i].squaredDistance(S.sequence[0]); SymbolicSequence.matriceChoix[i][0] = SymbolicSequence.HAUT; SymbolicSequence.optimalPathLength[i][0] = i; } for (j = 1; j < tailleT; j++) { SymbolicSequence.matriceW[0][j] = SymbolicSequence.matriceW[0][j - 1] + S.sequence[j].squaredDistance(this.sequence[0]); SymbolicSequence.matriceChoix[0][j] = SymbolicSequence.GAUCHE; SymbolicSequence.optimalPathLength[0][j] = j; } for (i = 1; i < sequenceLength; i++) { for (j = 1; j < tailleT; j++) { indiceRes = Tools.ArgMin3( SymbolicSequence.matriceW[i - 1][j - 1], SymbolicSequence.matriceW[i][j - 1], SymbolicSequence.matriceW[i - 1][j]); SymbolicSequence.matriceChoix[i][j] = indiceRes; switch (indiceRes) { case DIAGONALE: res = SymbolicSequence.matriceW[i - 1][j - 1]; SymbolicSequence.optimalPathLength[i][j] = SymbolicSequence.optimalPathLength[i - 1][j - 1] + 1; break; case GAUCHE: res = SymbolicSequence.matriceW[i][j - 1]; SymbolicSequence.optimalPathLength[i][j] = SymbolicSequence.optimalPathLength[i][j - 1] + 1; break; case HAUT: res = SymbolicSequence.matriceW[i - 1][j]; SymbolicSequence.optimalPathLength[i][j] = SymbolicSequence.optimalPathLength[i - 1][j] + 1; break; } SymbolicSequence.matriceW[i][j] = res + this.sequence[i].squaredDistance(S.sequence[j]); } } nbTuplesAverageSeq = SymbolicSequence.optimalPathLength[sequenceLength - 1][tailleT - 1] + 1; i = sequenceLength - 1; j = tailleT - 1; for (int t = nbTuplesAverageSeq - 1; t >= 0; t--) { tupleAssociation[i][s].add(S.sequence[j]); switch (SymbolicSequence.matriceChoix[i][j]) { case DIAGONALE: i = i - 1; j = j - 1; break; case GAUCHE: j = j - 1; break; case HAUT: i = i - 1; break; } } } return tupleAssociation; } @Override public String toString() { String str = "["; for (final Itemset t : sequence) { str += "{"; str += t.toString(); str += "}"; } str += "]"; return str; } public Itemset[] getSequence() { return this.sequence; } public static final double squaredL2(double a, double b) { double tmp = a - b; return tmp * tmp; } public static final double squaredL2(String a, String b) { return (a.equals(b)) ? 0.0 : 1.0; } public static MonoItemSet[] buildSeq(String s) { MonoItemSet[] seq = new MonoItemSet[s.length()]; for (int i = 0; i < seq.length; i++) { seq[i] = new MonoItemSet(s.charAt(i) + ""); } return seq; } }
28,883
27.626363
120
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/tools/QuickSort.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.tools; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Performing quicksort * * @author Chang Wei tan * */ public class QuickSort { /** * Sorting double array with index * @param numbers * @param index */ public final static void sort(double[] numbers, int[] index) { qsort(numbers, index, 0, numbers.length-1); } /** * Sorting integer array with index * @param numbers * @param index */ public final static void sort(int[] numbers, int[] index) { qsort(numbers, index, 0, numbers.length-1); } /** * Quicksort algorithm for double array * @param numbers * @param index * @param low * @param high */ public final static void qsort(double[] numbers, int[] index, int low, int high) { int i = low, j = high; final double pivot = numbers[(low + high)/2]; while (i <= j) { while (numbers[i] < pivot) i++; while (numbers[j] > pivot) j--; if (i <= j) { swap(numbers, index, i, j); i++; j--; } } if (low < j) qsort(numbers, index, low, j); if (i < high) qsort(numbers, index, i, high); } /** * Quicksort algorithm for Integer array * @param numbers * @param index * @param low * @param high */ public final static void qsort(int[] numbers, int[] index, int low, int high) { int i = low, j = high; final double pivot = numbers[(low + high)/2]; while (i <= j) { while (numbers[i] < pivot) i++; while (numbers[j] > pivot) j--; if (i <= j) { swap(numbers, index, i, j); i++; j--; } } if (low < j) qsort(numbers, index, low, j); if (i < high) qsort(numbers, index, i, high); } /** * Swap operation * @param numbers * @param index * @param i * @param j */ private final static void swap(double[] numbers, int[] index, int i, int j) { final double tempNum = numbers[i]; final int tempIndex = index[i]; numbers[i] = numbers[j]; index[i] = index[j]; numbers[j] = tempNum; index[j] = tempIndex; } /** * Swap operation * @param numbers * @param index * @param i * @param j */ private final static void swap(int[] numbers, int[] index, int i, int j) { final int tempNum = numbers[i]; final int tempIndex = index[i]; numbers[i] = numbers[j]; index[i] = index[j]; numbers[j] = tempNum; index[j] = tempIndex; } }
3,323
23.262774
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/tools/Sampling.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.tools; import java.util.Random; import weka.core.Instances; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Different types of sampling for the datasets * * @author Chang Wei Tan * */ public class Sampling { /** * Sample a subset from train and test set each * @param train * @param numTrain * @param test * @param numTest * @return */ public static Instances[] sample(Instances train, int numTrain, Instances test, int numTest) { Instances trainDataset = new Instances(train,numTrain); trainDataset = random(train); trainDataset = new Instances(trainDataset, 0, numTrain); Instances testDataset = new Instances(test,numTest); testDataset = random(test); testDataset = new Instances(testDataset, 0, numTest); return new Instances[] { trainDataset, testDataset }; } /** * Sample a subset from the given dataset * @param data * @param size * @return */ public static Instances sample(Instances data, int size) { Instances newData = new Instances(data,size); newData = random(data); newData = new Instances(newData, 0, size); return newData; } /** * Randomize the dataset * @param data * @return */ public static Instances random(Instances data) { data.randomize(new Random()); return data; } /** * Reorder the dataset by its largest class * @param data * @return */ public static Instances orderByLargestClass(Instances data) { Instances newData = new Instances(data, data.numInstances()); // get the number of class in the data int nbClass = data.numClasses(); int[] instancePerClass = new int[nbClass]; int[] labels = new int[nbClass]; int[] classIndex = new int[nbClass]; // sort the data base on its class data.sort(data.classAttribute()); // get the number of instances per class in the data for (int i = 0; i < nbClass; i++) { instancePerClass[i] = data.attributeStats(data.classIndex()).nominalCounts[i]; labels[i] = i; if (i > 0) classIndex[i] = classIndex[i-1] + instancePerClass[i-1]; } QuickSort.sort(instancePerClass, labels); for (int i = nbClass-1; i >=0 ; i--) { for (int j = 0; j < instancePerClass[i]; j++) { newData.add(data.instance(classIndex[labels[i]] + j)); } } return newData; } /** * Reorder the data by compactness of each class using Euclidean distance * @param data * @return */ public static Instances orderByCompactClass(Instances data) { Instances newData = new Instances(data, data.numInstances()); // get the number of class in the data int nbClass = data.numClasses(); int[] instancePerClass = new int[nbClass]; int[] labels = new int[nbClass]; int[] classIndex = new int[nbClass]; double[] compactness = new double[nbClass]; // sort the data base on its class data.sort(data.classAttribute()); int start = 0; // get the number of instances per class in the data for (int i = 0; i < nbClass; i++) { instancePerClass[i] = data.attributeStats(data.classIndex()).nominalCounts[i]; labels[i] = i; if (i > 0) classIndex[i] = classIndex[i-1] + instancePerClass[i-1]; int end = start + instancePerClass[i]; int counter = 0; double[][] dataPerClass = new double[instancePerClass[i]][data.numAttributes()-1]; for (int j = start; j < end; j++) { dataPerClass[counter++] = data.instance(j).toDoubleArray(); } double[] mean = arithmeticMean(dataPerClass); double d = 0; for (int j = 0; j < instancePerClass[i]; j++) { double temp = euclideanDistance(mean, dataPerClass[j]); temp *= temp; temp -= (mean[0] - dataPerClass[j][0]) * (mean[0] - dataPerClass[j][0]); d += temp; } compactness[i] = d / instancePerClass[i]; start = end; } QuickSort.sort(compactness, labels); for (int i = nbClass-1; i >=0 ; i--) { for (int j = 0; j < instancePerClass[labels[i]]; j++) { newData.add(data.instance(classIndex[labels[i]] + j)); } } return newData; } /** * Compute Euclidean distance between two sequences * @param x * @param y * @return */ private static double euclideanDistance(double[] x, double[] y) { double dist = 0; for (int i = 0; i < x.length; i++) { dist += (x[i]-y[i]) * (x[i]-y[i]); } return Math.sqrt(dist); } /** * Compute mean of a set of sequences * @param array * @return */ public static double[] arithmeticMean(double[][] array) { double[] mean = new double[array[0].length]; for (int i = 0; i < array[0].length; i++) { for (int j = 0; j < array.length; j++) { mean[i] += array[j][i]; } mean[i]/=array.length; } return mean; } }
5,628
27.429293
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/tools/Tools.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.tools; import java.util.Random; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Some basic tools for simple operations * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class Tools { /** * Minimum of 3 elements * @param a * @param b * @param c * @return */ public final static double Min3(final double a, final double b, final double c) { return (a <= b) ? ((a <= c) ? a : c) : (b <= c) ? b : c; } /** * Argument for the minimum of 3 elements * @param a * @param b * @param c * @return */ public static int ArgMin3(final double a, final double b, final double c) { return (a <= b) ? ((a <= c) ? 0 : 2) : (b <= c) ? 1 : 2; } /** * Sum of an array * @param tab * @return */ public static double sum(final double... tab) { double res = 0.0; for (double d : tab) res += d; return res; } /** * Maximum of an array * @param tab * @return */ public static double max(final double... tab) { double max = Double.NEGATIVE_INFINITY; for (double d : tab){ if(max<d){ max = d; } } return max; } /** * Minimum of an array * @param tab * @return */ public static double min(final double... tab) { double min = Double.POSITIVE_INFINITY; for (double d : tab){ if(d<min){ min = d; } } return min; } /** * Generate random permutation given a length * @param length * @return */ public static final int[] randPermutation(int length) { int[] array = new int[length]; for (int i = 0; i < length; i++) { array[i] = i; } return randPermutation(array); } /** * Generate random permutation given an array * @param array * @return */ public static final int[] randPermutation(int[] array) { Random r = new Random(); int randNum; for (int i = 0; i < array.length; i++) { randNum = i + r.nextInt(array.length-i); swap(array, i, randNum); } return array; } /** * Swap operation * @param array * @param a * @param b * @return */ public static final int[] swap(int[] array, int a, int b){ int temp = array[a]; array[a] = array[b]; array[b] = temp; return array; } /** * Swap operation * @param array * @param a * @param b * @return */ public static final double[] swap(double[] array, int a, int b){ double temp = array[a]; array[a] = array[b]; array[b] = temp; return array; } /** * Squared Euclidean distance * @param a * @param b * @return */ public static final double squaredEuclidean(double a, double b) { return (a - b) * (a - b); } }
3,677
21.703704
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/tools/UCR2CSV.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.tools; import java.io.BufferedReader; import java.io.File; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Convert dataset in UCR format to CSV format * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class UCR2CSV { public static void run(File f, File fout) { BufferedReader in = null; PrintWriter out = null; String line; String[] temp; boolean firstLine = true; try { in = new BufferedReader(new FileReader(f)); out = new PrintWriter(new FileOutputStream(fout), true); while ((line = in.readLine()) != null) { if (!line.isEmpty()) { if(firstLine){ int k = 0; while (line.charAt(k) == ' ') k++; line = line.substring(k); temp = line.split(","); out.print("class"); for (int j = 1; j < temp.length; j++) { out.print(",t"+(j-1)); } out.println(); firstLine=false; } int k = 0; while (line.charAt(k) == ' ') k++; line = line.substring(k); temp = line.split(","); out.print("'"+((int)Math.round(Double.valueOf(temp[0])))+"'"); for (int j = 1; j < temp.length; j++) { out.print(","+temp[j] ); } out.println(); } } } catch (IOException e) { System.err.println("PB d'I/O"); e.printStackTrace(); } finally { try { in.close(); } catch (IOException e) { e.printStackTrace(); } out.close(); } } }
2,629
28.886364
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/tools/UCRArchive.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.tools; /** * Stores dataset names for the Standard UCR Archive * * Yanping Chen, Eamonn Keogh, Bing Hu, Nurjahan Begum, Anthony Bagnall, Abdullah Mueen and Gustavo Batista (2015). * The UCR Time Series Classification Archive. * URL www.cs.ucr.edu/~eamonn/time_series_data/ * * @author Chang Wei Tan * */ public class UCRArchive { /** * Sorted in increasing DTW computations per test series */ public static String[] sortedDataset = new String[]{"SonyAIBORobotSurface","ItalyPowerDemand", "MoteStrain","SonyAIBORobotSurfaceII","TwoLeadECG","ECGFiveDays","CBF", "DiatomSizeReduction","Gun_Point","Coffee","FaceFour","ArrowHead","ECG200", "Symbols","ShapeletSim","BeetleFly","BirdChicken","ToeSegmentation1", "DistalPhalanxOutlineAgeGroup","DistalPhalanxTW","MiddlePhalanxOutlineAgeGroup", "MiddlePhalanxTW","ToeSegmentation2","Wine","Beef","Plane","ProximalPhalanxTW", "OliveOil","synthetic_control","DistalPhalanxOutlineCorrect","Lighting7", "MiddlePhalanxOutlineCorrect","FacesUCR","Meat","Trace","ProximalPhalanxOutlineAgeGroup", "Herring","Car","MedicalImages","Lighting2","Ham","ProximalPhalanxOutlineCorrect", "InsectWingbeatSound","MALLAT","SwedishLeaf","CinC_ECG_torso","Adiac","Worms","WormsTwoClass", "ECG5000","Earthquakes","WordsSynonyms","FaceAll","ChlorineConcentration","FISH","OSULeaf", "Strawberry","Cricket_X","Cricket_Y","Cricket_Z","50words","yoga","Two_Patterns", "PhalangesOutlinesCorrect","wafer","Haptics","Computers","InlineSkate","Phoneme", "LargeKitchenAppliances","RefrigerationDevices","ScreenType","SmallKitchenAppliances", "uWaveGestureLibrary_X","uWaveGestureLibrary_Y","uWaveGestureLibrary_Z","ShapesAll", "FordB","FordA","UWaveGestureLibraryAll","ElectricDevices","HandOutlines", "StarLightCurves","NonInvasiveFatalECG_Thorax1","NonInvasiveFatalECG_Thorax2"}; /** * Datasets that are small and fast to classify */ public static String[] smallDataset = new String[]{"SonyAIBORobotSurface","ItalyPowerDemand", "MoteStrain","SonyAIBORobotSurfaceII","TwoLeadECG","ECGFiveDays","CBF", "DiatomSizeReduction","Gun_Point","Coffee","FaceFour","ArrowHead","ECG200", "Symbols","ShapeletSim","BeetleFly","BirdChicken","ToeSegmentation1", "DistalPhalanxOutlineAgeGroup","DistalPhalanxTW","MiddlePhalanxOutlineAgeGroup", "MiddlePhalanxTW","ToeSegmentation2","Wine","Beef","Plane","ProximalPhalanxTW", "OliveOil","synthetic_control","DistalPhalanxOutlineCorrect","Lighting7", "MiddlePhalanxOutlineCorrect","FacesUCR","Meat","Trace","ProximalPhalanxOutlineAgeGroup", "Herring","Car","MedicalImages","Lighting2","Ham","ProximalPhalanxOutlineCorrect", "InsectWingbeatSound","MALLAT","SwedishLeaf","CinC_ECG_torso","Adiac","Worms","WormsTwoClass", "ECG5000","Earthquakes","WordsSynonyms","FaceAll","ChlorineConcentration","FISH","OSULeaf", "Strawberry","Cricket_X","Cricket_Y","Cricket_Z","50words","yoga"}; /** * New datasets used in * Time-Series Classification with COTE: The Collective of Transformation-Based Ensembles (COTE) and * Time series classification with ensembles of elastic distance measures (EE) * * Refer to http://www.timeseriesclassification.com/dataset.php */ public static String[] newTSCProblems = new String[] {"ElectricDeviceOn","EpilepsyX", "EthanolLevel","HeartbeatBIDMC","NonInvasiveFetalECGThorax1","NonInvasiveFetalECGThorax2"}; /** * Size of all dataset * @return */ public static int totalDatasets(){ return newTSCProblems.length + sortedDataset.length; } public static String total(){ return newTSCProblems.length + sortedDataset.length + " in total."; } }
4,567
49.755556
116
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/FastWWS.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import java.util.ArrayList; import java.util.Collections; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNN; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.SequenceStatsCache; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNN.RefineReturnType; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Search for the best warping window using Fast Warping Window Search (FastWWS) * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * */ public class FastWWS extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Internal types // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- /** * Potential nearest neighbour */ private static class PotentialNN { /** * Status of the PotentialNN */ public enum Status { NN, // This is the Nearest Neighbour BC, // Best Candidate so far } public int index; // Index of the sequence in train[] public int r; // Window validity public double distance; // Computed distance public Status status; // Is that public PotentialNN() { this.index = Integer.MIN_VALUE; // Will be an invalid, negative, index. this.r = Integer.MAX_VALUE; // Max: stands for "haven't found yet" this.distance = Double.POSITIVE_INFINITY; // Infinity: stands for "not computed yet". this.status = Status.BC; // By default, we don't have any found NN. } /** * Setting the Potential NN for the query at a window * @param index: Index in training dataset * @param r: Window validity with query * @param distance: Distance to query * @param status: Status of the nearest neighbour */ public void set(int index, int r, double distance, Status status) { this.index = index; this.r = r; this.distance = distance; this.status = status; } /** * Check if this is a nearest neighbour for the query at a window * @return */ public boolean isNN() { return this.status == Status.NN; } @Override public String toString() { return "" + this.index; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PotentialNN that = (PotentialNN) o; return index == that.index; } } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = 1536192551485201554L; private PotentialNN[][] nns; // Our main structure private boolean init; // Have we initialize our structure? // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public FastWWS() { super(); forwardSearch = false; init = false; } public FastWWS(String name) { super(); forwardSearch = false; init = false; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public String doTime(long start){ long duration = System.currentTimeMillis() - start; return "" + (duration / 1000) + " s " + (duration % 1000) + " ms"; } public String doTime(long start, long now){ long duration = now - start; return "" + (duration / 1000) + " s " + (duration % 1000) + " ms"; } /** * Initializing our main structure * * Data: * protected SymbolicSequence[] train; -- Array of sequences * protected HashMap<String, ArrayList<SymbolicSequence>> classedData; -- Sequences by classes * protected HashMap<String, ArrayList<Integer>> classedDataIndices; -- Sequences index in train by classes * protected String[] classMap; -- Class per index * * We are using SymbolicSequenceScoredClassed to retain what is the nearest neighbour: * this.public SymbolicSequence sequence; -- The sequence of interest, the nearest neighbour itself * this.public String classValue; -- Class of the NN * this.public int index; -- Index of the NN in the train * this.public public int smallestValidWindow; -- Smallest window that would give the same distance * this.public double score -- Value of the distance * * When computing DTW, we get a DTWResult, storing more data than just the distance: * this.public double distance; -- the DTW distance * this.public int r; -- the smallest window that would give the same path */ protected void initTable() { if (train.length < 2) { System.err.println("Set is to small: " + train.length + " sequence. At least 2 sequences needed."); } System.out.println("Starting optimisation"); // // --- STATS DECLARATIONS // // Timing and progress output long timeInit = System.currentTimeMillis(); // // --- ALGORITHM DECLARATIONS & INITIALISATION // // Cache: SequenceStatsCache cache = new SequenceStatsCache(train, maxWindow); // We need a N*L storing area. We favorite an access per window size. // For each [Window Size][sequence], we store the nearest neighbour. See above. nns = new PotentialNN[maxWindow + 1][train.length]; for (int win = 0; win < maxWindow + 1; ++win) { for (int len = 0; len < train.length; ++len) { nns[win][len] = new PotentialNN(); } } // Vector of LazyUCR distance, propagating bound info "horizontally" LazyAssessNN[] lazyUCR = new LazyAssessNN[train.length]; for (int i = 0; i < train.length; ++i) { lazyUCR[i] = new LazyAssessNN(cache); } // "Challengers" that compete with each other to be the NN of query ArrayList<LazyAssessNN> challengers = new ArrayList<LazyAssessNN>(train.length); System.out.println("Initialisation done ("+doTime(timeInit)+")"); // // --- ALGORITHM // // Iteration for all TS, starting with the second one (first is a reference) for(int current=1; current < train.length; ++current){ // --- --- Get the data --- --- SymbolicSequence sCurrent = train[current]; // Clear off the previous challengers and add all the previous sequences challengers.clear(); for(int previous=0; previous < current; ++previous) { LazyAssessNN d = lazyUCR[previous]; d.set(train[previous], previous, sCurrent, current); challengers.add(d); } // --- --- For each, decreasing (positive) windows --- --- for(int win = maxWindow; win > -1; --win){ // --- Get the data PotentialNN currPNN = nns[win][current]; if(currPNN.isNN()){ // --- --- WITH NN CASE --- --- // We already have a NN for sure, but we still have to check if current is a new NN for previous for(int previous = 0; previous < current; ++previous){ // --- Get the data PotentialNN prevNN = nns[win][previous]; // --- Try to beat the previous best NN double toBeat = prevNN.distance; LazyAssessNN challenger = lazyUCR[previous]; RefineReturnType rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if(rrt == RefineReturnType.New_best){ int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } } // END WITH NN CASE else { // --- --- WITHOUT NN CASE --- --- // We don't have a NN yet. // Sort the challengers so we have a better chance to organize a good pruning. Collections.sort(challengers); for (LazyAssessNN challenger : challengers) { // --- Get the data int previous = challenger.indexQuery; PotentialNN prevNN = nns[win][previous]; // --- First we want to beat the current best candidate: double toBeat = currPNN.distance; RefineReturnType rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); currPNN.set(previous, r, d, PotentialNN.Status.BC); } // --- Now check for previous NN // --- Try to beat the previous best NN toBeat = prevNN.distance; challenger = lazyUCR[previous]; rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } // END for(AutoRefineDistance challenger: challengers) // --- When we looked at every past sequences, // the current best candidate is really the best one, so the NN. // So assign the current NN to all the windows that are valid int r = currPNN.r; double d = currPNN.distance; int index = currPNN.index; for (int w = win; w >= r; --w) { nns[w][current].set(index, r, d, PotentialNN.Status.NN); } } // END WITHOUT NN CASE } // END for(int win=maxWindow; win>-1; --win) } // END for(int current=1; current < train.length; ++current) System.out.println("done! (" + doTime (timeInit) + ")"); this.init = true; } // END initTable() @Override protected double evalSolution(int warpingWindow) { // Will only be called once if (!init) { initTable(); } // Error counter: int nErrors = 0; for (int i = 0; i < train.length; i++) { if (!classMap[nns[warpingWindow][i].index].equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } }
14,132
40.445748
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/FastWWSByPercent.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNN; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNN.RefineReturnType; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.SequenceStatsCache; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; import java.util.ArrayList; import java.util.Collections; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * <p> * Search for the best warping window using Fast Warping Window Search (FastWWS) * Searches through all the percentage of time series length * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb */ public class FastWWSByPercent extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Internal types // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = 1536192551485201554L; private PotentialNN[][] nns; // Our main structure private boolean init; // Have we initialize our structure? // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public FastWWSByPercent() { super(); forwardSearch = false; init = false; } public FastWWSByPercent(String name) { super(); forwardSearch = false; init = false; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public String doTime(long start) { long duration = System.currentTimeMillis() - start; return "" + (duration / 1000) + " s " + (duration % 1000) + " ms"; } public String doTime(long start, long now) { long duration = now - start; return "" + (duration / 1000) + " s " + (duration % 1000) + " ms"; } /** * Initializing our main structure * <p> * Data: * protected SymbolicSequence[] train; -- Array of sequences * protected HashMap<String, ArrayList<SymbolicSequence>> classedData; -- Sequences by classes * protected HashMap<String, ArrayList<Integer>> classedDataIndices; -- Sequences index in train by classes * protected String[] classMap; -- Class per index * <p> * We are using SymbolicSequenceScoredClassed to retain what is the nearest neighbour: * this.public SymbolicSequence sequence; -- The sequence of interest, the nearest neighbour itself * this.public String classValue; -- Class of the NN * this.public int index; -- Index of the NN in the train * this.public public int smallestValidWindow; -- Smallest window that would give the same distance * this.public double score -- Value of the distance * <p> * When computing DTW, we get a DTWResult, storing more data than just the distance: * this.public double distance; -- the DTW distance * this.public int r; -- the smallest window that would give the same path */ protected void initTable() { if (train.length < 2) { System.err.println("Set is to small: " + train.length + " sequence. At least 2 sequences needed."); } System.out.println("Starting optimisation"); // // --- STATS DECLARATIONS // // Timing and progress output long timeInit = System.currentTimeMillis(); // // --- ALGORITHM DECLARATIONS & INITIALISATION // // Cache: SequenceStatsCache cache = new SequenceStatsCache(train, maxWindow); // We need a N*L storing area. We favorite an access per window size. // For each [Window Size][sequence], we store the nearest neighbour. See above. nns = new PotentialNN[100 + 1][train.length]; for (int win = 0; win < 100 + 1; ++win) { for (int len = 0; len < train.length; ++len) { nns[win][len] = new PotentialNN(); } } // Vector of LazyUCR distance, propagating bound info "horizontally" LazyAssessNN[] lazyUCR = new LazyAssessNN[train.length]; for (int i = 0; i < train.length; ++i) { lazyUCR[i] = new LazyAssessNN(cache); } // "Challengers" that compete with each other to be the NN of query ArrayList <LazyAssessNN> challengers = new ArrayList <LazyAssessNN>(train.length); System.out.println("Initialisation done (" + doTime(timeInit) + ")"); // // --- ALGORITHM // // Iteration for all TS, starting with the second one (first is a reference) for (int current = 1; current < train.length; ++current) { // --- --- Get the data --- --- SymbolicSequence sCurrent = train[current]; // Clear off the previous challengers and add all the previous sequences challengers.clear(); for (int previous = 0; previous < current; ++previous) { LazyAssessNN d = lazyUCR[previous]; d.set(train[previous], previous, sCurrent, current); challengers.add(d); } // --- --- For each, decreasing (positive) windows --- --- for (int percent = 100; percent > -1; --percent) { int win = percentToLength(percent); // --- Get the data PotentialNN currPNN = nns[percent][current]; if (currPNN.isNN()) { // --- --- WITH NN CASE --- --- // We already have a NN for sure, but we still have to check if current is a new NN for previous for (int previous = 0; previous < current; ++previous) { // --- Get the data PotentialNN prevNN = nns[percent][previous]; // --- Try to beat the previous best NN double toBeat = prevNN.distance; LazyAssessNN challenger = lazyUCR[previous]; RefineReturnType rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } } // END WITH NN CASE else { // --- --- WITHOUT NN CASE --- --- // We don't have a NN yet. // Sort the challengers so we have a better chance to organize a good pruning. Collections.sort(challengers); for (LazyAssessNN challenger : challengers) { // --- Get the data int previous = challenger.indexQuery; PotentialNN prevNN = nns[percent][previous]; // --- First we want to beat the current best candidate: double toBeat = currPNN.distance; RefineReturnType rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); currPNN.set(previous, r, d, PotentialNN.Status.BC); } // --- Now check for previous NN // --- Try to beat the previous best NN toBeat = prevNN.distance; challenger = lazyUCR[previous]; rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } // END for(AutoRefineDistance challenger: challengers) // --- When we looked at every past sequences, // the current best candidate is really the best one, so the NN. // So assign the current NN to all the windows that are valid int r = currPNN.r; int rEnd = lengthToPercent(r); double d = currPNN.distance; int index = currPNN.index; for (int w = percent; w >= rEnd; --w) { nns[w][current].set(index, r, d, PotentialNN.Status.NN); } } // END WITHOUT NN CASE } // END for(int win=maxWindow; win>-1; --win) } // END for(int current=1; current < train.length; ++current) System.out.println("done! (" + doTime(timeInit) + ")"); this.init = true; } // END initTablePercent() @Override protected double evalSolution(int warpingWindow) { // Will only be called once if (!init) { initTable(); } // Error counter: int nErrors = 0; for (int i = 0; i < train.length; i++) { if (!classMap[nns[warpingWindow][i].index].equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } @Override protected void searchBestWarpingWindow() { int currentWindowPercent = (forwardSearch) ? 0 : 100; double currentScore; bestScore = 1.0; long startTime = System.currentTimeMillis(); while (currentWindowPercent >= 0 && currentWindowPercent <= 100) { currentScore = evalSolution(currentWindowPercent); long endTime = System.currentTimeMillis(); long accumulatedTime = (endTime - startTime); // saving results // searchResults[currentWindowPercent] = currentWindowPercent + "," + currentScore + "," + accumulatedTime; // out.println(currentWindowPercent + " " + currentScore + " " + accumulatedTime); // out.flush(); if (currentScore <= bestScore || (currentScore == bestScore && !forwardSearch)) { bestScore = currentScore; bestWindowPercent = currentWindowPercent; } else if (greedySearch && currentScore > bestScore) { break; } currentWindowPercent = (forwardSearch) ? currentWindowPercent + 1 : currentWindowPercent - 1; } bestWarpingWindow = percentToLength(bestWindowPercent); } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the best warping window in percentage found */ @Override public int getBestPercent() { return bestWindowPercent; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } /** * Potential nearest neighbour */ private static class PotentialNN { public int index; // Index of the sequence in train[] public int r; // Window validity public double distance; // Computed distance public Status status; // Is that public PotentialNN() { this.index = Integer.MIN_VALUE; // Will be an invalid, negative, index. this.r = Integer.MAX_VALUE; // Max: stands for "haven't found yet" this.distance = Double.POSITIVE_INFINITY; // Infinity: stands for "not computed yet". this.status = Status.BC; // By default, we don't have any found NN. } /** * Setting the Potential NN for the query at a window * * @param index: Index in training dataset * @param r: Window validity with query * @param distance: Distance to query * @param status: Status of the nearest neighbour */ public void set(int index, int r, double distance, Status status) { this.index = index; this.r = r; this.distance = distance; this.status = status; } /** * Check if this is a nearest neighbour for the query at a window * * @return */ public boolean isNN() { return this.status == Status.NN; } @Override public String toString() { return "" + this.index; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PotentialNN that = (PotentialNN) o; return index == that.index; } /** * Status of the PotentialNN */ public enum Status { NN, // This is the Nearest Neighbour BC, // Best Candidate so far } } }
15,784
39.788114
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/FastWWSPrunedDTW.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import java.util.ArrayList; import java.util.Collections; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNN; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.SequenceStatsCache; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNN.RefineReturnType; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Search for the best warping window using Fast Warping Window Search with PrunedDTW (FastWWS) * * We use the original PrunedDTW C++ code from http://sites.labic.icmc.usp.br/prunedDTW/ * and modify it into Java * * @author Chang Wei Tan * */ public class FastWWSPrunedDTW extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Internal types // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- /** * Potential nearest neighbour */ private static class PotentialNN { /** * Status of the PotentialNN */ public enum Status { NN, // This is the Nearest Neighbour BC, // Best Candidate so far } public int index; // Index of the sequence in train[] public int r; // Window validity public double distance; // Computed distance public Status status; // Is that public PotentialNN() { this.index = Integer.MIN_VALUE; // Will be an invalid, negative, index. this.r = Integer.MAX_VALUE; // Max: stands for "haven't found yet" this.distance = Double.POSITIVE_INFINITY; // Infinity: stands for "not computed yet". this.status = Status.BC; // By default, we don't have any found NN. } /** * Setting the Potential NN for the query at a window * @param index: Index in training dataset * @param r: Window validity with query * @param distance: Distance to query * @param status: Status of the nearest neighbour */ public void set(int index, int r, double distance, Status status) { this.index = index; this.r = r; this.distance = distance; this.status = status; } /** * Check if this is a nearest neighbour for the query at a window * @return */ public boolean isNN() { return this.status == Status.NN; } @Override public String toString() { return "" + this.index; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PotentialNN that = (PotentialNN) o; return index == that.index; } } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = 1536192551485201554L; private PotentialNN[][] nns; // Our main structure private boolean init; // Have we initialize our structure? // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public FastWWSPrunedDTW() { super(); forwardSearch = false; init = false; } public FastWWSPrunedDTW(String name) { super(); forwardSearch = false; init = false; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public String doTime(long start){ long duration = System.currentTimeMillis() - start; return "" + (duration / 1000) + " s " + (duration % 1000) + " ms"; } public String doTime(long start, long now){ long duration = now - start; return "" + (duration / 1000) + " s " + (duration % 1000) + " ms"; } /** * Initializing our main structure * * Data: * protected SymbolicSequence[] train; -- Array of sequences * protected HashMap<String, ArrayList<SymbolicSequence>> classedData; -- Sequences by classes * protected HashMap<String, ArrayList<Integer>> classedDataIndices; -- Sequences index in train by classes * protected String[] classMap; -- Class per index * * We are using SymbolicSequenceScoredClassed to retain what is the nearest neighbour: * this.public SymbolicSequence sequence; -- The sequence of interest, the nearest neighbour itself * this.public String classValue; -- Class of the NN * this.public int index; -- Index of the NN in the train * this.public public int smallestValidWindow; -- Smallest window that would give the same distance * this.public double score -- Value of the distance * * When computing DTW, we get a DTWResult, storing more data than just the distance: * this.public double distance; -- the DTW distance * this.public int r; -- the smallest window that would give the same path */ protected void initTable() { if (train.length < 2) { System.err.println("Set is to small: " + train.length + " sequence. At least 2 sequences needed."); } System.out.println("Starting optimisation"); // // --- STATS DECLARATIONS // // Timing and progress output long timeInit = System.currentTimeMillis(); // // --- ALGORITHM DECLARATIONS & INITIALISATION // // Cache: SequenceStatsCache cache = new SequenceStatsCache(train, maxWindow); // We need a N*L storing area. We favorite an access per window size. // For each [Window Size][sequence], we store the nearest neighbour. See above. nns = new PotentialNN[maxWindow + 1][train.length]; for (int win = 0; win < maxWindow + 1; ++win) { for (int len = 0; len < train.length; ++len) { nns[win][len] = new PotentialNN(); } } // Vector of LazyUCR distance, propagating bound info "horizontally" LazyAssessNN[] lazyUCR = new LazyAssessNN[train.length]; for (int i = 0; i < train.length; ++i) { lazyUCR[i] = new LazyAssessNN(cache); } // "Challengers" that compete with each other to be the NN of query ArrayList<LazyAssessNN> challengers = new ArrayList<LazyAssessNN>(train.length); System.out.println("Initialisation done ("+doTime(timeInit)+")"); // // --- ALGORITHM // // Iteration for all TS, starting with the second one (first is a reference) for(int current=1; current < train.length; ++current){ // --- --- Get the data --- --- SymbolicSequence sCurrent = train[current]; // Clear off the previous challengers and add all the previous sequences challengers.clear(); for(int previous=0; previous < current; ++previous) { LazyAssessNN d = lazyUCR[previous]; d.set(train[previous], previous, sCurrent, current); d.initUBPartial(); challengers.add(d); } // --- --- First do full window using PrunedDTW // Currently compute UB on the go for PrunedDTW // Could have done Euclidean Distance first then use Euclidean Distance as the UB int win = maxWindow; PotentialNN currPNN = nns[win][current]; if(currPNN.isNN()){ // --- --- WITH NN CASE --- --- // We already have a NN for sure, but we still have to check if current is a new NN for previous for(int previous = 0; previous < current; ++previous){ // --- Get the data PotentialNN prevNN = nns[win][previous]; // --- Try to beat the previous best NN double toBeat = prevNN.distance; LazyAssessNN challenger = lazyUCR[previous]; // Used PrunedDTW instead of DTW here RefineReturnType rrt = challenger.tryToBeatPrunedDTW(toBeat, win); // --- Check the result if(rrt == RefineReturnType.New_best){ int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } } // END WITH NN CASE else { // --- --- WITHOUT NN CASE --- --- // We don't have a NN yet. // Sort the challengers so we have a better chance to organize a good pruning. Collections.sort(challengers); for (LazyAssessNN challenger : challengers) { // --- Get the data int previous = challenger.indexQuery; PotentialNN prevNN = nns[win][previous]; // --- First we want to beat the current best candidate: double toBeat = currPNN.distance; // Used PrunedDTW instead of DTW here RefineReturnType rrt = challenger.tryToBeatPrunedDTW(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); currPNN.set(previous, r, d, PotentialNN.Status.BC); } // --- Now check for previous NN // --- Try to beat the previous best NN toBeat = prevNN.distance; challenger = lazyUCR[previous]; rrt = challenger.tryToBeatPrunedDTW(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } // END for(AutoRefineDistance challenger: challengers) // --- When we looked at every past sequences, // the current best candidate is really the best one, so the NN. int r = currPNN.r; double d = currPNN.distance; int index = currPNN.index; for (int w = win; w >= r; --w) { nns[w][current].set(index, r, d, PotentialNN.Status.NN); } } // END WITHOUT NN CASE // --- --- For each, decreasing (positive) windows --- --- for(win=maxWindow-1; win>-1; --win){ // --- Get the data currPNN = nns[win][current]; if(currPNN.isNN()){ // --- --- WITH NN CASE --- --- // We already have a NN for sure, but we still have to check if current is a new NN for previous for(int previous = 0; previous < current; ++previous){ // --- Get the data PotentialNN prevNN = nns[win][previous]; // --- Try to beat the previous best NN double toBeat = prevNN.distance; LazyAssessNN challenger = lazyUCR[previous]; RefineReturnType rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if(rrt == RefineReturnType.New_best){ int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } } // END WITH NN CASE else { // --- --- WITHOUT NN CASE --- --- // We don't have a NN yet. // Sort the challengers so we have a better chance to organize a good pruning. Collections.sort(challengers); for (LazyAssessNN challenger : challengers) { // --- Get the data int previous = challenger.indexQuery; PotentialNN prevNN = nns[win][previous]; // --- First we want to beat the current best candidate: double toBeat = currPNN.distance; RefineReturnType rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); currPNN.set(previous, r, d, PotentialNN.Status.BC); } // --- Now check for previous NN // --- Try to beat the previous best NN toBeat = prevNN.distance; challenger = lazyUCR[previous]; rrt = challenger.tryToBeat(toBeat, win); // --- Check the result if (rrt == RefineReturnType.New_best) { int r = challenger.getMinWindowValidityForFullDistance(); double d = challenger.getDistance(win); prevNN.set(current, r, d, PotentialNN.Status.NN); } } // END for(AutoRefineDistance challenger: challengers) // --- When we looked at every past sequences, // the current best candidate is really the best one, so the NN. int r = currPNN.r; double d = currPNN.distance; int index = currPNN.index; for (int w = win; w >= r; --w) { nns[w][current].set(index, r, d, PotentialNN.Status.NN); } } // END WITHOUT NN CASE } // END for(int win=maxWindow; win>-1; --win) } // END for(int current=1; current < train.length; ++current) System.out.println("done! (" + doTime (timeInit) + ")"); this.init = true; } // END initTable() @Override protected double evalSolution(int warpingWindow) { // Will only be called once if (!init) { initTable(); } // Our counter: int nErrors = 0; // test fold number is nFold for (int i = 0; i < train.length; i++) { if (!classMap[nns[warpingWindow][i].index].equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } }
17,774
40.823529
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/LbKeoghPrunedDTW.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import java.io.PrintStream; import java.util.ArrayList; import java.util.HashMap; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoDoubleItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Search for the best warping window using PrunedDTW * * We use the original PrunedDTW C++ code from http://sites.labic.icmc.usp.br/prunedDTW/ * and modify it into Java * * Original paper: * Silva, D. F., & Batista, G. E. (2016, June). * Speeding up all-pairwise dynamic time warping matrix calculation. * In Proceedings of the 2016 SIAM International Conference on Data Mining (pp. 837-845). * Society for Industrial and Applied Mathematics. * * @author Chang Wei Tan * */ public class LbKeoghPrunedDTW extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = -1561497612657542978L; public PrintStream out; // Output print private String[] searchResults; // Our results private int[][] nns; // Similar to our main structure private double[][] dist; // Matrix to store the distances // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public LbKeoghPrunedDTW() { super(); out = System.out; } public LbKeoghPrunedDTW(String name) { super(); out = System.out; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- @Override public void buildClassifier(Instances data) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap<>(); classedDataIndices = new HashMap<>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } warpingMatrix = new double[maxLength][maxLength]; U = new double[maxLength]; L = new double[maxLength]; maxWindow = Math.round(1 * maxLength); searchResults = new String[maxWindow+1]; nns = new int[maxWindow+1][train.length]; dist = new double[train.length][train.length]; // Start searching for the best window searchBestWarpingWindow(); // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore)); } /** * This is similar to buildClassifier but it is an estimate. * This is used for large dataset where it takes very long to run. * The main purpose of this is to get the run time and not actually search for the best window. * We call this to draw Figure 1 of our SDM18 paper * @param data * @param estimate * @throws Exception */ @Override public void buildClassifierEstimate(Instances data, int estimate) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap<>(); classedDataIndices = new HashMap<>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } warpingMatrix = new double[maxLength][maxLength]; U = new double[maxLength]; L = new double[maxLength]; maxWindow = Math.round(1 * maxLength); searchResults = new String[maxWindow+1]; nns = new int[maxWindow+1][train.length]; dist = new double[train.length][train.length]; int[] nErrors = new int[maxWindow+1]; double[] score = new double[maxWindow+1]; double bestScore = Double.MAX_VALUE; double minD; bestWarpingWindow=-1; // Start searching for the best window. // Only loop through a given size of the dataset, but still search for NN from the whole train // for every sequence in train, we find NN for all window // then in the end, update the best score for (int i = 0; i < estimate; i++) { SymbolicSequence testSeq = train[i]; for (int w = 0; w <= maxWindow; w++){ testSeq.LB_KeoghFillUL(w, U, L); minD = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i == j) continue; SymbolicSequence trainSeq = train[j]; if (SymbolicSequence.LB_KeoghPreFilled(trainSeq, U, L) < minD) { double tmpD; if (w == 0) { tmpD = testSeq.PrunedDTW(trainSeq, w); } else { tmpD = testSeq.PrunedDTW(trainSeq, w, dist[i][j]); } if (tmpD < minD) { minD = tmpD; classValue = classMap[j]; nns[w][i] = j; } dist[i][j] = tmpD*tmpD; } else { if (w > 0) { dist[i][j] = dist[i][j]; } else { dist[i][j] = Double.MAX_VALUE; } } } if (classValue == null || !classValue.equals(classMap[i])) { nErrors[w]++; } score[w] = 1.0 * nErrors[w]/train.length; } } for (int w = 0; w < maxWindow; w++) { if (score[w] < bestScore) { bestScore = score[w]; bestWarpingWindow = w; } } // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore)); } /** * Search for the best warping window * for every window, we evaluate the performance of the classifier */ @Override protected void searchBestWarpingWindow(){ int currentWindow = 0; double currentScore = 1.0; bestScore = 1.0; long startTime = System.currentTimeMillis(); // Start from smallest window, w=0 while (currentWindow >= 0 && currentWindow<= maxWindow) { currentScore = evalSolution(currentWindow); long endTime = System.currentTimeMillis(); long accumulatedTime = (endTime-startTime); // saving results searchResults[currentWindow] = currentWindow + "," + currentScore + "," + accumulatedTime; // out.println(currentWindow+" "+currentScore+" "+accumulatedTime); // out.flush(); if (currentScore < bestScore) { bestScore = currentScore; bestWarpingWindow = currentWindow; } currentWindow = currentWindow + 1; } } /** * Evaluate the performance of the classifier * Here we use LB Keogh to further speed up the process * Original code does not have LB Keogh * * @param warpingWindow * @return */ @Override protected double evalSolution(int warpingWindow) { int nErrors = 0; // test fold number is nFold for (int i = 0; i < train.length; i++) { SymbolicSequence testSeq = train[i]; testSeq.LB_KeoghFillUL(warpingWindow, U, L); double minD = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i == j) continue; SymbolicSequence trainSeq = train[j]; if (SymbolicSequence.LB_KeoghPreFilled(trainSeq, U, L) < minD) { double tmpD; if (warpingWindow == 0) { tmpD = testSeq.PrunedDTW(trainSeq, warpingWindow); } else { tmpD = testSeq.PrunedDTW(trainSeq, warpingWindow, dist[i][j]); } if (tmpD < minD) { minD = tmpD; classValue = classMap[j]; nns[warpingWindow][i] = j; } dist[i][j] = tmpD*tmpD; } else { if (warpingWindow > 0) { dist[i][j] = dist[i][j]; } else { dist[i][j] = Double.MAX_VALUE; } } } if (classValue == null || !classValue.equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } @Override public double classifyInstance(Instance sample) throws Exception { // transform instance to sequence MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } SymbolicSequence seq = new SymbolicSequence(sequence); double minD = Double.MAX_VALUE; String classValue = null; seq.LB_KeoghFillUL(bestWarpingWindow, U, L); for (int i = 0; i < train.length; i++) { SymbolicSequence s = train[i]; if (SymbolicSequence.LB_KeoghPreFilled(s, U, L) < minD) { double tmpD = seq.DTW(s,bestWarpingWindow, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[i]; } } } // System.out.println(prototypes.size()); return sample.classAttribute().indexOfValue(classValue); } /** * Get our search results * @return */ @Override public String[] getSearchResults() { return searchResults; } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } }
12,113
30.38342
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/NaiveDTW.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import java.io.PrintStream; import java.util.ArrayList; import java.util.HashMap; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoDoubleItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Search for the best warping window using just DTW without any optimisation * Expect to have the worst runtime! * * @author Chang Wei Tan * */ public class NaiveDTW extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = -1561497612657542978L; public boolean forwardSearch = false; // Search from front or back public boolean greedySearch = false; // Greedy or not public PrintStream out; // Output print private String[] searchResults; // Our results private int[][] nns; // Similar to our main structure private double[][] dist; // Matrix to store the distances // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public NaiveDTW() { super(); out = System.out; } public NaiveDTW(String name) { super(); out = System.out; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- @Override public void buildClassifier(Instances data) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap<>(); classedDataIndices = new HashMap<>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } warpingMatrix = new double[maxLength][maxLength]; U = new double[maxLength]; L = new double[maxLength]; maxWindow = Math.round(1 * maxLength); searchResults = new String[maxWindow+1]; nns = new int[maxWindow+1][train.length]; dist = new double[maxWindow+1][train.length]; // Start searching for the best window searchBestWarpingWindow(); // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore)); } /** * Search for the best warping window * for every window, we evaluate the performance of the classifier */ @Override protected void searchBestWarpingWindow(){ int currentWindow = (forwardSearch) ? 0 : maxWindow; double currentScore = 1.0; bestScore = 1.0; long startTime = System.currentTimeMillis(); while (currentWindow >= 0 && currentWindow<= maxWindow) { currentScore = evalSolution(currentWindow); long endTime = System.currentTimeMillis(); long accumulatedTime = (endTime-startTime); // saving results searchResults[currentWindow] = currentWindow + "," + currentScore + "," + accumulatedTime; if (currentScore < bestScore || (currentScore == bestScore && !forwardSearch) ) { bestScore = currentScore; bestWarpingWindow = currentWindow; }else if(greedySearch && currentScore > bestScore){ break; } currentWindow = (forwardSearch) ? currentWindow + 1 : currentWindow - 1; } } /** * Evaluate the performance of the classifier * @param warpingWindow * @return */ @Override protected double evalSolution(int warpingWindow) { int nErrors = 0; // test fold number is nFold for (int i = 0; i < train.length; i++) { SymbolicSequence testSeq = train[i]; double minD = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i == j) continue; SymbolicSequence trainSeq = train[j]; double tmpD = testSeq.DTW(trainSeq, warpingWindow, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[j]; nns[warpingWindow][i] = j; dist[warpingWindow][i] = minD; } } if (classValue == null || !classValue.equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } @Override public double classifyInstance(Instance sample) throws Exception { // transform instance to sequence MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } SymbolicSequence seq = new SymbolicSequence(sequence); double minD = Double.MAX_VALUE; String classValue = null; seq.LB_KeoghFillUL(bestWarpingWindow, U, L); for (int i = 0; i < train.length; i++) { SymbolicSequence s = train[i]; if (SymbolicSequence.LB_KeoghPreFilled(s, U, L) < minD) { double tmpD = seq.DTW(s,bestWarpingWindow, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[i]; } } } // System.out.println(prototypes.size()); return sample.classAttribute().indexOfValue(classValue); } /** * Get our search results * @return */ @Override public String[] getSearchResults() { return searchResults; } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } }
7,902
30.361111
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/Trillion.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import java.io.PrintStream; import java.util.ArrayList; import java.util.HashMap; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoDoubleItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.SequenceStatsCache; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Search for the best warping window using Cascading Lower Bound and Early Abandon * 1. LB Kim * 2. LB Keogh (Q,C) EA * 3. LB Keogh (C,Q) EA * 4. DTW * * Original paper: * Rakthanmanon, T., Campana, B., Mueen, A., Batista, G., Westover, B., Zhu, Q., ... & Keogh, E. (2012, August). * Searching and mining trillions of time series subsequences under dynamic time warping. * In Proceedings of the 18th ACM SIGKDD international conference * on Knowledge discovery and data mining (pp. 262-270). ACM. Chicago * * @author Chang Wei Tan * */ public class Trillion extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = 1L; public PrintStream out; // Output print private SequenceStatsCache cache; // Cache to store the information for the sequences private SymbolicSequence query, reference; // Query and reference sequences private int indexQuery, indexReference; // Index for query and reference private double minDist, bestMinDist; // Distance and best so far distance private int currentW; // Current warping window private int indexStoppedLB; // Index where we stop LB // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public Trillion() { super(); out = System.out; } public Trillion(String name) { super(); out = System.out; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- @Override public void buildClassifier(Instances data) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap<>(); classedDataIndices = new HashMap<>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } U = new double[maxLength]; L = new double[maxLength]; maxWindow = Math.round(1 * maxLength); cache = new SequenceStatsCache(train, maxWindow); int nbErrors = 0; double score; bestScore = Double.MAX_VALUE; bestWarpingWindow=-1; // Start searching for the best window for (int w = 0; w <= maxWindow; w++) { currentW = w; nbErrors = 0; for (int i = 0; i < train.length; i++) { query = train[i]; indexQuery = i; bestMinDist = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i==j) continue; reference = train[j]; indexReference = j; // LB Kim doLBKim(); if (minDist < bestMinDist) { minDist = 0; indexStoppedLB = 0; // LB Keogh(Q,R) doLBKeoghQR(bestMinDist); if (minDist < bestMinDist) { minDist = 0; indexStoppedLB = 0; // LB Keogh(R,Q) doLBKeoghRQ(bestMinDist); if (minDist < bestMinDist) { // DTW double res = query.DTW(reference, currentW); minDist = res * res; if(minDist < bestMinDist){ bestMinDist = minDist; classValue = classMap[j]; } } } } } if (classValue == null || !classValue.equals(classMap[i])) { nbErrors++; } } score = 1.0 * nbErrors / train.length; if (score < bestScore) { bestScore = score; bestWarpingWindow = w; } } // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore)); } /** * This is similar to buildClassifier but it is an estimate. * This is used for large dataset where it takes very long to run. * The main purpose of this is to get the run time and not actually search for the best window. * We call this to draw Figure 1 of our SDM18 paper * @param data * @param estimate * @throws Exception */ @Override public void buildClassifierEstimate(Instances data, int estimate) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap<>(); classedDataIndices = new HashMap<>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } U = new double[maxLength]; L = new double[maxLength]; maxWindow = Math.round(1 * maxLength); cache = new SequenceStatsCache(train, maxWindow); int[] nbErrors = new int[maxWindow+1]; double[] score = new double[maxWindow+1]; bestScore = Double.MAX_VALUE; bestWarpingWindow=-1; // Start searching for the best window. // Only loop through a given size of the dataset, but still search for NN from the whole train // for every sequence in train, we find NN for all window // then in the end, update the best score for (int i = 0; i < estimate; i++) { query = train[i]; indexQuery = i; for (int w = 0; w <= maxWindow; w++) { currentW = w; bestMinDist = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i==j) continue; reference = train[j]; indexReference = j; // LB Kim doLBKim(); if (minDist < bestMinDist) { minDist = 0; indexStoppedLB = 0; // LB Keogh(Q,R) doLBKeoghQR(bestMinDist); if (minDist < bestMinDist) { minDist = 0; indexStoppedLB = 0; // LB Keogh(R,Q) doLBKeoghRQ(bestMinDist); if (minDist < bestMinDist) { // DTW double res = query.DTW(reference, currentW); minDist = res * res; if(minDist < bestMinDist){ bestMinDist = minDist; classValue = classMap[j]; } } } } } if (classValue == null || !classValue.equals(classMap[i])) { nbErrors[w]++; } score[w] = 1.0 * nbErrors[w]/train.length; } } for (int w = 0; w < maxWindow; w++) { if (score[w] < bestScore) { bestScore = score[w]; bestWarpingWindow = w; } } // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore)); } @Override public double classifyInstance(Instance sample) throws Exception { // transform instance to sequence MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } SymbolicSequence seq = new SymbolicSequence(sequence); double minD = Double.MAX_VALUE; String classValue = null; seq.LB_KeoghFillUL(bestWarpingWindow, U, L); for (int i = 0; i < train.length; i++) { SymbolicSequence s = train[i]; if (SymbolicSequence.LB_KeoghPreFilled(s, U, L) < minD) { double tmpD = seq.DTW(s,bestWarpingWindow); if (tmpD < minD) { minD = tmpD; classValue = classMap[i]; } } } // System.out.println(prototypes.size()); return sample.classAttribute().indexOfValue(classValue); } /** * Run LB Kim using data from cache */ public void doLBKim() { double diffFirsts = query.sequence[0].squaredDistance(reference.sequence[0]); double diffLasts = query.sequence[query.getNbTuples() - 1].squaredDistance(reference.sequence[reference.getNbTuples() - 1]); minDist = diffFirsts + diffLasts; if(!cache.isMinFirst(indexQuery)&&!cache.isMinFirst(indexReference) && !cache.isMinLast(indexQuery) && !cache.isMinLast(indexReference)){ double diffMin = cache.getMin(indexQuery)-cache.getMin(indexReference); minDist += diffMin*diffMin; } if(!cache.isMaxFirst(indexQuery)&&!cache.isMaxFirst(indexReference)&& !cache.isMaxLast(indexQuery) && !cache.isMaxLast(indexReference)){ double diffMax = cache.getMax(indexQuery)-cache.getMax(indexReference); minDist += diffMax*diffMax; } } /** * Run LB Keogh(Q,R) with EA using data from cache * @param scoreToBeat */ public void doLBKeoghQR(double scoreToBeat) { int length = query.sequence.length; double[] LEQ = cache.getLE(indexQuery, currentW); double[] UEQ = cache.getUE(indexQuery, currentW); while (indexStoppedLB < length && minDist < scoreToBeat) { int index = cache.getIndexNthHighestVal(indexReference, indexStoppedLB); double c = ((MonoDoubleItemSet) reference.sequence[index]).value; if (c < LEQ[index]) { double diff = LEQ[index] - c; minDist += diff * diff; } else if (UEQ[index] < c) { double diff = UEQ[index] - c; minDist += diff * diff; } indexStoppedLB++; } } /** * Run LB Keogh(R,Q) with EA using data from cache * @param scoreToBeat */ public void doLBKeoghRQ(double scoreToBeat) { int length = reference.sequence.length; double[] LER = cache.getLE(indexReference, currentW); double[] UER = cache.getUE(indexReference, currentW); while (indexStoppedLB < length && minDist < scoreToBeat) { int index = cache.getIndexNthHighestVal(indexQuery, indexStoppedLB); double c = ((MonoDoubleItemSet) query.sequence[index]).value; if (c < LER[index]) { double diff = LER[index] - c; minDist += diff * diff; } else if (UER[index] < c) { double diff = UER[index] - c; minDist += diff * diff; } indexStoppedLB++; } } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } }
13,247
31.711111
139
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/UCRSuite.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import java.io.PrintStream; import java.util.ArrayList; import java.util.HashMap; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNNEarlyAbandon; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNNEarlyAbandon.RefineReturnType; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoDoubleItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.SequenceStatsCache; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Search for the best warping window using Cascading Lower Bound and Early Abandoning * 1. LB Kim * 2. LB Keogh (Q,C) EA * 3. LB Keogh (C,Q) EA * 4. DTW * * Here, we modify the original algorithm using LazyUCR distance introduced in our SDM18 paper * The code in KDD12.java is in the original format of UCR Suite * * Original paper: * Rakthanmanon, T., Campana, B., Mueen, A., Batista, G., Westover, B., Zhu, Q., ... & Keogh, E. (2012, August). * Searching and mining trillions of time series subsequences under dynamic time warping. * In Proceedings of the 18th ACM SIGKDD international conference * on Knowledge discovery and data mining (pp. 262-270). ACM. Chicago * * @author Chang Wei Tan * */ public class UCRSuite extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = -1561497612657542978L; public boolean forwardSearch = false; // Search from front or back public boolean greedySearch = false; // Greedy or not public PrintStream out; // Output print private String[] searchResults; // Our results private int[][] nns; // Similar to our main structure private double[][] dist; // Matrix to store the distances private SequenceStatsCache cache; // Cache to store the information for the sequences private LazyAssessNNEarlyAbandon[][] lazyUCR; // LazyUCR distance with Early Abandon // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public UCRSuite() { super(); out = System.out; } public UCRSuite(String name) { super(); out = System.out; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- @Override public void buildClassifier(Instances data) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap<>(); classedDataIndices = new HashMap<>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } warpingMatrix = new double[maxLength][maxLength]; U = new double[maxLength]; L = new double[maxLength]; U1 = new double[maxLength]; L1 = new double[maxLength]; maxWindow = Math.round(1 * maxLength); searchResults = new String[maxWindow+1]; nns = new int[maxWindow+1][train.length]; dist = new double[maxWindow+1][train.length]; cache = new SequenceStatsCache(train, maxWindow); lazyUCR = new LazyAssessNNEarlyAbandon[train.length][train.length]; for (int i = 0; i < train.length; i++) { for (int j = 0; j < train.length; j++) { lazyUCR[i][j] = new LazyAssessNNEarlyAbandon(cache); } } // Start searching for the best window searchBestWarpingWindow(); // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore)); } /** * Search for the best warping window * for every window, we evaluate the performance of the classifier */ @Override protected void searchBestWarpingWindow(){ int currentWindow = (forwardSearch) ? 0 : maxWindow; double currentScore = 1.0; bestScore = 1.0; long startTime = System.currentTimeMillis(); while (currentWindow >= 0 && currentWindow<= maxWindow) { currentScore = evalSolution(currentWindow); long endTime = System.currentTimeMillis(); long accumulatedTime = (endTime-startTime); // saving results searchResults[currentWindow] = currentWindow + "," + currentScore + "," + accumulatedTime; // out.println(currentWindow+" "+currentScore+" "+accumulatedTime); // out.flush(); if (currentScore < bestScore || (currentScore == bestScore && !forwardSearch) ) { bestScore = currentScore; bestWarpingWindow = currentWindow; }else if(greedySearch && currentScore > bestScore){ break; } currentWindow = (forwardSearch) ? currentWindow + 1 : currentWindow - 1; } } /** * Evaluate the performance of the classifier * * @param warpingWindow * @return */ @Override protected double evalSolution(int warpingWindow) { int nErrors = 0; // test fold number is nFold for (int i = 0; i < train.length; i++) { double minD = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i == j) continue; lazyUCR[i][j].set(train[i], i, train[j], j); RefineReturnType rrt = lazyUCR[i][j].tryToBeat(minD, warpingWindow); if (rrt == RefineReturnType.New_best) { minD = lazyUCR[i][j].getDistance(warpingWindow); nns[warpingWindow][i] = j; dist[warpingWindow][i] = minD; classValue = classMap[j]; } } if (classValue == null || !classValue.equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } @Override public double classifyInstance(Instance sample) throws Exception { // transform instance to sequence MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } SymbolicSequence seq = new SymbolicSequence(sequence); double minD = Double.MAX_VALUE; String classValue = null; seq.LB_KeoghFillUL(bestWarpingWindow, U, L); for (int i = 0; i < train.length; i++) { SymbolicSequence s = train[i]; if (SymbolicSequence.LB_KeoghPreFilled(s, U, L) < minD) { double tmpD = seq.DTW(s,bestWarpingWindow, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[i]; } } } // System.out.println(prototypes.size()); return sample.classAttribute().indexOfValue(classValue); } /** * Get our search results * @return */ @Override public String[] getSearchResults() { return searchResults; } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } }
9,466
31.986063
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/UCRSuitePrunedDTW.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * TSI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import java.io.PrintStream; import java.util.ArrayList; import java.util.HashMap; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNNEarlyAbandon; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.LazyAssessNNEarlyAbandon.RefineReturnType; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoDoubleItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.SequenceStatsCache; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * * Search for the best warping window using Cascading Lower Bound and Early Abandon with PrunedDTW * Replace the standard DTW with PrunedDTW * * Original papers: * Rakthanmanon, T., Campana, B., Mueen, A., Batista, G., Westover, B., Zhu, Q., ... & Keogh, E. (2012, August). * Searching and mining trillions of time series subsequences under dynamic time warping. * In Proceedings of the 18th ACM SIGKDD international conference * on Knowledge discovery and data mining (pp. 262-270). ACM. Chicago * * Silva, D. F., & Batista, G. E. (2016, June). * Speeding up all-pairwise dynamic time warping matrix calculation. * In Proceedings of the 2016 SIAM International Conference on Data Mining (pp. 837-845). * Society for Industrial and Applied Mathematics. * * @author Chang Wei Tan * */ public class UCRSuitePrunedDTW extends WindowSearcher { // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = -1561497612657542978L; public PrintStream out; // Output print private String[] searchResults; // Our results private int[][] nns; // Similar to our main structure private double[][] dist; // Matrix to stroe the distances private SequenceStatsCache cache; // Cache to store the information for the sequences private LazyAssessNNEarlyAbandon[][] lazyUCR; // LazyUCR distance with Early Abandon // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public UCRSuitePrunedDTW() { super(); out = System.out; } public UCRSuitePrunedDTW(String name) { super(); out = System.out; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- @Override public void buildClassifier(Instances data) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap<>(); classedDataIndices = new HashMap<>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } warpingMatrix = new double[maxLength][maxLength]; U = new double[maxLength]; L = new double[maxLength]; U1 = new double[maxLength]; L1 = new double[maxLength]; maxWindow = Math.round(1 * maxLength); searchResults = new String[maxWindow+1]; nns = new int[maxWindow+1][train.length]; dist = new double[train.length][train.length]; cache = new SequenceStatsCache(train, maxWindow); lazyUCR = new LazyAssessNNEarlyAbandon[train.length][train.length]; for (int i = 0; i < train.length; i++) { for (int j = 0; j < train.length; j++) { lazyUCR[i][j] = new LazyAssessNNEarlyAbandon(cache); } } // Start searching for the best window searchBestWarpingWindow(); // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore)); } /** * Search for the best warping window * for every window, we evaluate the performance of the classifier */ @Override protected void searchBestWarpingWindow(){ int currentWindow = 0; double currentScore = 1.0; bestScore = 1.0; long startTime = System.currentTimeMillis(); // Start from smallest window, w=0 while (currentWindow >= 0 && currentWindow<= maxWindow) { currentScore = evalSolution(currentWindow); long endTime = System.currentTimeMillis(); long accumulatedTime = (endTime-startTime); // saving results searchResults[currentWindow] = currentWindow + "," + currentScore + "," + accumulatedTime; // out.println(currentWindow+" "+currentScore+" "+accumulatedTime); // out.flush(); if (currentScore < bestScore) { bestScore = currentScore; bestWarpingWindow = currentWindow; } currentWindow = currentWindow + 1; } } /** * Evaluate the performance of the classifier * * @param warpingWindow * @return */ @Override protected double evalSolution(int warpingWindow) { int nErrors = 0; // test fold number is nFold for (int i = 0; i < train.length; i++) { double minD = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i == j) continue; lazyUCR[i][j].set(train[i], i, train[j], j); RefineReturnType rrt; if (warpingWindow == 0) rrt = lazyUCR[i][j].tryToBeatPrunedDTW(minD, warpingWindow); else rrt = lazyUCR[i][j].tryToBeatPrunedDTW(minD, warpingWindow, dist[i][j]); if (rrt == RefineReturnType.New_best) { minD = lazyUCR[i][j].getDistance(warpingWindow); nns[warpingWindow][i] = j; classValue = classMap[j]; dist[i][j] = minD; } else if (rrt == RefineReturnType.Pruned_with_DTW) { dist[i][j] = lazyUCR[i][j].getDistance(warpingWindow); } else { if (warpingWindow > 0) { dist[i][j] = dist[i][j]; } else { dist[i][j] = Double.MAX_VALUE; } } } if (classValue == null || !classValue.equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } @Override public double classifyInstance(Instance sample) throws Exception { // transform instance to sequence MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } SymbolicSequence seq = new SymbolicSequence(sequence); double minD = Double.MAX_VALUE; String classValue = null; seq.LB_KeoghFillUL(bestWarpingWindow, U, L); for (int i = 0; i < train.length; i++) { SymbolicSequence s = train[i]; if (SymbolicSequence.LB_KeoghPreFilled(s, U, L) < minD) { double tmpD = seq.DTW(s,bestWarpingWindow, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[i]; } } } // System.out.println(prototypes.size()); return sample.classAttribute().indexOfValue(classValue); } /** * Get our search results * @return */ @Override public String[] getSearchResults() { return searchResults; } /** * Get the best warping window found */ @Override public int getBestWin() { return bestWarpingWindow; } /** * Get the LOOCV accuracy for the best warping window */ @Override public double getBestScore() { return bestScore; } /** * Set the result directory */ @Override public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ @Override public void setType(String t) { type = t; } }
9,660
31.638514
118
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/legacy/elastic_ensemble/fast_window_search/windowSearcher/WindowSearcher.java
/******************************************************************************* * Copyright (C) 2017 Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb * * This file is part of FastWWSearch. * * FastWWSearch is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3 of the License. * * FastWWSearch is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FastWWSearch. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.items.MonoDoubleItemSet; import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.sequences.SymbolicSequence; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import java.io.PrintStream; import java.util.ArrayList; import java.util.HashMap; import weka.classifiers.AbstractClassifier; /** * Code for the paper "Efficient search of the best warping window for Dynamic Time Warping" published in SDM18 * <p> * Superclass for all the classifiers that we used * By default, it uses DTW with LB Keogh to search for the best window using Algorithm 2 in our SDM18 paper * In our SDM18 paper, we call this as the Naive method * Refer to http://www.cs.ucr.edu/~eamonn/LB_Keogh.htm for the details of LB Keogh * * @author Chang Wei Tan, Francois Petitjean, Matthieu Herrmann, Germain Forestier, Geoff Webb */ public class WindowSearcher extends AbstractClassifier{ // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Fields // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- private static final long serialVersionUID = -1561497612657542978L; protected static int bestWarpingWindow; // Best warping window found protected static int bestWindowPercent = -1; // Best warping window found in percentage protected static double bestScore; // Best LOOCV accuracy for the best warping window protected static String type = "keogh"; // Default type is DTW with LB Keogh protected static String datasetName; // Name of dataset that is being tested protected static String resDir = "/home/changwei/workspace/FindBestWarpingWindow/outputs/"; // result directory public PrintStream out; // Output print protected boolean forwardSearch = false; // Search from front or back protected boolean greedySearch = false; // Greedy or not protected SymbolicSequence[] train; // Training dataset, array of sequences protected HashMap <String, ArrayList <SymbolicSequence>> classedData; // Sequences by classes protected HashMap <String, ArrayList <Integer>> classedDataIndices; // Sequences index in train protected String[] classMap; // Class per index protected double[][] warpingMatrix; // DTW cost matrix protected double[] U, L, U1, L1; // Upper and lower envelope for LB Keogh protected int maxLength, maxWindow; // Max length of the sequences protected String[] searchResults; // Our results protected int nParams = 100; // this can be the maximum length or percentage private int[][] nns; // Similar to our main structure private double[][] dist; // Matrix to store the distances // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Constructor // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- public WindowSearcher() { super(); out = System.out; } public WindowSearcher(String name) { super(); out = System.out; datasetName = name; } // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- // Methods // --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- @Override public void buildClassifier(Instances data) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap <>(); classedDataIndices = new HashMap <>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList <SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList <Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } warpingMatrix = new double[maxLength][maxLength]; U = new double[maxLength]; L = new double[maxLength]; maxWindow = Math.round(1 * maxLength); nns = new int[maxWindow + 1][train.length]; dist = new double[maxWindow + 1][train.length]; // Start searching for the best window searchBestWarpingWindow(); // if we are doing length, find the best window in percentage if (bestWindowPercent < 0) bestWindowPercent = lengthToPercent(bestWarpingWindow); // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + "(" + bestWindowPercent + ") Best Acc=" + (1 - bestScore)); } /** * This is similar to buildClassifier but it is an estimate. * This is used for large dataset where it takes very long to run. * The main purpose of this is to get the run time and not actually search for the best window. * We use this to draw Figure 1 of our SDM18 paper * * @param data * @param estimate * @throws Exception */ public void buildClassifierEstimate(Instances data, int estimate) throws Exception { // Initialise training dataset Attribute classAttribute = data.classAttribute(); classedData = new HashMap <>(); classedDataIndices = new HashMap <>(); for (int c = 0; c < data.numClasses(); c++) { classedData.put(data.classAttribute().value(c), new ArrayList <SymbolicSequence>()); classedDataIndices.put(data.classAttribute().value(c), new ArrayList <Integer>()); } train = new SymbolicSequence[data.numInstances()]; classMap = new String[train.length]; maxLength = 0; for (int i = 0; i < train.length; i++) { Instance sample = data.instance(i); MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; maxLength = Math.max(maxLength, sequence.length); int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } train[i] = new SymbolicSequence(sequence); String clas = sample.stringValue(classAttribute); classMap[i] = clas; classedData.get(clas).add(train[i]); classedDataIndices.get(clas).add(i); } warpingMatrix = new double[maxLength][maxLength]; U = new double[maxLength]; L = new double[maxLength]; maxWindow = Math.round(1 * maxLength); searchResults = new String[maxWindow + 1]; nns = new int[maxWindow + 1][train.length]; dist = new double[maxWindow + 1][train.length]; int[] nErrors = new int[maxWindow + 1]; double[] score = new double[maxWindow + 1]; double bestScore = Double.MAX_VALUE; double minD; bestWarpingWindow = -1; // Start searching for the best window. // Only loop through a given size of the dataset, but still search for NN from the whole train // for every sequence in train, we find NN for all window // then in the end, update the best score for (int i = 0; i < estimate; i++) { SymbolicSequence testSeq = train[i]; for (int w = 0; w <= maxWindow; w++) { testSeq.LB_KeoghFillUL(w, U, L); minD = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i == j) continue; SymbolicSequence trainSeq = train[j]; if (SymbolicSequence.LB_KeoghPreFilled(trainSeq, U, L) < minD) { double tmpD = testSeq.DTW(trainSeq, w, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[j]; nns[w][i] = j; } dist[w][j] = tmpD * tmpD; } } if (classValue == null || !classValue.equals(classMap[i])) { nErrors[w]++; } score[w] = 1.0 * nErrors[w] / train.length; } } for (int w = 0; w < maxWindow; w++) { if (score[w] < bestScore) { bestScore = score[w]; bestWarpingWindow = w; } } // Saving best windows found System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1 - bestScore)); } /** * Search for the best warping window * for every window, we evaluate the performance of the classifier */ protected void searchBestWarpingWindow() { int currentWindow = (forwardSearch) ? 0 : maxWindow; double currentScore; bestScore = 1.0; searchResults = new String[maxWindow + 1]; long startTime = System.currentTimeMillis(); while (currentWindow >= 0 && currentWindow <= maxWindow) { currentScore = evalSolution(currentWindow); long endTime = System.currentTimeMillis(); long accumulatedTime = (endTime - startTime); // saving results searchResults[currentWindow] = currentWindow + "," + currentScore + "," + accumulatedTime; // out.println(currentWindow+" "+currentScore+" "+accumulatedTime); // out.flush(); if (currentScore < bestScore || (currentScore == bestScore && !forwardSearch)) { bestScore = currentScore; bestWarpingWindow = currentWindow; } else if (greedySearch && currentScore > bestScore) { break; } currentWindow = (forwardSearch) ? currentWindow + 1 : currentWindow - 1; } } /** * Evaluate the performance of the classifier * * @param warpingWindow * @return */ protected double evalSolution(int warpingWindow) { int nErrors = 0; // test fold number is nFold for (int i = 0; i < train.length; i++) { SymbolicSequence testSeq = train[i]; testSeq.LB_KeoghFillUL(warpingWindow, U, L); double minD = Double.MAX_VALUE; String classValue = null; for (int j = 0; j < train.length; j++) { if (i == j) continue; SymbolicSequence trainSeq = train[j]; if (SymbolicSequence.LB_KeoghPreFilled(trainSeq, U, L) < minD) { double tmpD = testSeq.DTW(trainSeq, warpingWindow, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[j]; nns[warpingWindow][i] = j; dist[warpingWindow][i] = minD; } } } if (classValue == null || !classValue.equals(classMap[i])) { nErrors++; } } return 1.0 * nErrors / train.length; } @Override public double classifyInstance(Instance sample) throws Exception { // transform instance to sequence MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1]; int shift = (sample.classIndex() == 0) ? 1 : 0; for (int t = 0; t < sequence.length; t++) { sequence[t] = new MonoDoubleItemSet(sample.value(t + shift)); } SymbolicSequence seq = new SymbolicSequence(sequence); double minD = Double.MAX_VALUE; String classValue = null; seq.LB_KeoghFillUL(bestWarpingWindow, U, L); for (int i = 0; i < train.length; i++) { SymbolicSequence s = train[i]; if (SymbolicSequence.LB_KeoghPreFilled(s, U, L) < minD) { double tmpD = seq.DTW(s, bestWarpingWindow, warpingMatrix); if (tmpD < minD) { minD = tmpD; classValue = classMap[i]; } } } // System.out.println(prototypes.size()); return sample.classAttribute().indexOfValue(classValue); } /** * Convert window in percentage to length */ int percentToLength(int windowInPercent) { return maxWindow * windowInPercent / 100; } /** * Convert window in length to percentage */ int lengthToPercent(int windowInLength) { double r = 1.0 * windowInLength / maxWindow; return (int) Math.ceil(r * 100); } /** * Get our search results */ public String[] getSearchResults() { return searchResults; } /** * Get the best warping window found */ public int getBestWin() { return bestWarpingWindow; } /** * Get the best warping window in percentage */ public int getBestPercent() { return bestWindowPercent; } /** * Get the LOOCV accuracy for the best warping window */ public double getBestScore() { return bestScore; } /** * Set the result directory */ public void setResDir(String path) { resDir = path; } /** * Set type of classifier */ public void setType(String t) { type = t; } /** * Set number of parameters * Either maximum length or percentage */ public void setnParams(int n) { nParams = n; } }
16,212
39.736181
128
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/ConcatenateClassifier.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instances; import static utilities.multivariate_tools.MultivariateInstanceTools.concatinateInstances; import static utilities.multivariate_tools.MultivariateInstanceTools.numDimensions; import static utilities.multivariate_tools.MultivariateInstanceTools.splitMultivariateInstances; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.core.Instance; /** * * @author raj09hxu */ public class ConcatenateClassifier extends AbstractClassifier{ Classifier original_model; Instances concat_train; Instances concat_test; long seed; public ConcatenateClassifier(Classifier cla){ original_model = cla; } public void setSeed(long sd){ seed = sd; if(original_model instanceof RandomizableIteratedSingleClassifierEnhancer){ RandomizableIteratedSingleClassifierEnhancer r = (RandomizableIteratedSingleClassifierEnhancer) original_model; r.setSeed((int) seed); } else{ //check through reflection if the classifier has a method with seed in the name, that takes an int or a long. Method[] methods = original_model.getClass().getMethods(); for (Method method : methods) { Class[] paras = method.getParameterTypes(); //if the method contains the name seed, and takes in 1 parameter, thats a primitive. probably setRandomSeed. String name = method.getName().toLowerCase(); if((name.contains("random") || name.contains("seed")) && paras.length == 1 && (paras[0] == int.class || paras[0] == long.class )){ try { if(paras[0] == int.class) method.invoke(original_model, (int) seed); else method.invoke(original_model, seed); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException ex) { System.out.println(ex); System.out.println("Tried to set the seed method name: " + method.getName()); } } } } } @Override public void buildClassifier(Instances data) throws Exception { concat_train = concatinateInstances(splitMultivariateInstances(data)); original_model.buildClassifier(concat_train); } @Override public double[] distributionForInstance(Instance instance) throws Exception { //split the multivariate test into if(concat_test == null){ concat_test = concatinateInstances(splitMultivariateInstances(instance.dataset())); } //get the index of the text instance from the original and use that in the concatenated. double[] dist = original_model.distributionForInstance(concat_test.get(instance.dataset().indexOf(instance))); return dist; } }
4,048
38.31068
124
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/MultivariateAbstractClassifier.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; //import tsml.classifiers.distance_based.distances.old.DTW_D; import utilities.generic_storage.Pair; import utilities.multivariate_tools.MultivariateInstanceTools; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import static utilities.InstanceTools.findMinDistance; /** * * @author Alejandro Pasos Ruiz */ public abstract class MultivariateAbstractClassifier extends AbstractClassifier { public MultivariateAbstractClassifier(){ super(); } @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.enable(Capabilities.Capability.RELATIONAL_ATTRIBUTES); result.disable(Capabilities.Capability.MISSING_VALUES); return result; } protected void testWithFailRelationalInstances(Instances data) throws Exception { getCapabilities().testWithFail(data); for (Instance instance: data){ testWithFailRelationalInstance(instance); } } protected void testWithFailRelationalInstance(Instance data) throws Exception { Instances group = MultivariateInstanceTools.splitMultivariateInstanceOnInstances(data); getCapabilities().testWithFail(group); } }
2,117
31.090909
99
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/MultivariateAbstractEnsemble.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import evaluation.evaluators.Evaluator; import evaluation.storage.ClassifierResults; import machine_learning.classifiers.ensembles.AbstractEnsemble; import tsml.classifiers.EnhancedAbstractClassifier; import utilities.ErrorReport; import utilities.ThreadingUtilities; import utilities.multivariate_tools.MultivariateInstanceTools; import weka.classifiers.Classifier; import weka.core.Instances; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; public abstract class MultivariateAbstractEnsemble extends AbstractEnsemble { public MultivariateAbstractEnsemble(){ } @Override public void setupDefaultEnsembleSettings() { } protected void initialiseModules(Instances[] data, int numClasses) throws Exception { //currently will only have file reading ON or OFF (not load some files, train the rest) //having that creates many, many, many annoying issues, especially when classifying test cases if (readIndividualsResults) { if (!resultsFilesParametersInitialised) throw new Exception("Trying to load "+ensembleName+" modules from file, but parameters for results file reading have not been initialised"); loadModules(); //will throw exception if a module cannot be loaded (rather than e.g training that individual instead) } else trainModules(data); for (EnsembleModule module : modules) { //in case train results didnt have probability distributions, hack for old hive cote results tony todo clean module.trainResults.setNumClasses(numClasses); if (fillMissingDistsWithOneHotVectors) module.trainResults.populateMissingDists(); module.trainResults.findAllStatsOnce(); } } protected synchronized void trainModules(Instances[] data) throws Exception { //define the operations to build and evaluate each module, as a function //that will build the classifier and return train results for it, either //generated by the classifier itself or the trainEstimator List<Callable<ClassifierResults>> moduleBuilds = new ArrayList<>(); int i=0; for (EnsembleModule module : modules) { final Classifier classifier = module.getClassifier(); final Evaluator eval = trainEstimator.cloneEvaluator(); final int fi = i; Callable<ClassifierResults> moduleBuild = () -> { ClassifierResults trainResults = null; if (EnhancedAbstractClassifier.classifierIsEstimatingOwnPerformance(classifier)) { classifier.buildClassifier(data[fi]); trainResults = ((EnhancedAbstractClassifier)classifier).getTrainResults(); } else { trainResults = eval.evaluate(classifier, data[fi]); classifier.buildClassifier(data[fi]); } return trainResults; }; moduleBuilds.add(moduleBuild); i++; } //complete the operations, either threaded via the executor service or //locally/sequentially List<ClassifierResults> results = new ArrayList<>(); if (multiThread) { ExecutorService executor = ThreadingUtilities.buildExecutorService(numThreads); boolean shutdownAfter = true; results = ThreadingUtilities.computeAll(executor, moduleBuilds, shutdownAfter); } else { for (Callable<ClassifierResults> moduleBuild : moduleBuilds) results.add(moduleBuild.call()); } //gather back the train results, write them if needed for (i = 0; i < modules.length; i++) { modules[i].trainResults = results.get(i); if (writeIndividualsResults) { //if we're doing trainFold# file writing String params = modules[i].getParameters(); if (modules[i].getClassifier() instanceof EnhancedAbstractClassifier) params = ((EnhancedAbstractClassifier)modules[i].getClassifier()).getParameters(); writeResultsFile(modules[i].getModuleName(), params, modules[i].trainResults, "train"); //write results out } } } @Override public void buildClassifier(Instances data) throws Exception { printlnDebug("**MTSC ENSEMBLE TRAIN: " + ensembleName + "**"); System.out.println("DATA NAME ="+data.relationName()); Instances[] splitData = MultivariateInstanceTools.splitMultivariateInstances(data); // can classifier handle the data? for (int i=0;i<splitData.length;i++) getCapabilities().testWithFail(splitData[i]); long startTime = System.nanoTime(); this.setupMultivariateEnsembleSettings(splitData.length); //housekeeping if (resultsFilesParametersInitialised) { if (readResultsFilesDirectories.length > 1) if (readResultsFilesDirectories.length != modules.length) throw new Exception("Ensemble, " + this.getClass().getSimpleName() + ".buildClassifier: " + "more than one results path given, but number given does not align with the number of classifiers/modules."); if (writeResultsFilesDirectory == null) writeResultsFilesDirectory = readResultsFilesDirectories[0]; } //init this.numTrainInsts = data.numInstances(); this.numClasses = data.numClasses(); this.numAttributes = 2; //set up modules initialiseModules(splitData,data.numClasses()); //if modules' results are being read in from file, ignore the i/o overhead //of loading the results, we'll sum the actual buildtimes of each module as //reported in the files if (readIndividualsResults) startTime = System.nanoTime(); //set up ensemble weightingScheme.defineWeightings(modules, numClasses); votingScheme.trainVotingScheme(modules, numClasses); buildTime = System.nanoTime() - startTime; if (readIndividualsResults) { //we need to sum the modules' reported build time as well as the weight //and voting definition time for (EnsembleModule module : modules) { buildTime += module.trainResults.getBuildTimeInNanos(); //TODO see other todo in trainModules also. Currently working under //assumption that the estimate time is already accounted for in the build //time of TrainAccuracyEstimators, i.e. those classifiers that will //estimate their own accuracy during the normal course of training if (!EnhancedAbstractClassifier.classifierIsEstimatingOwnPerformance(module.getClassifier())) buildTime += module.trainResults.getErrorEstimateTime(); } } trainResults = new ClassifierResults(); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); if(getEstimateOwnPerformance()) trainResults = estimateEnsemblePerformance(data); //combine modules to find overall ensemble trainpreds //HACK FOR CAWPE_EXTENSION PAPER: //since experiments expects us to make a train results object //and for us to record our build time, going to record it here instead of //editing experiments to record the buildTime at that level //buildTime does not include the ensemble's trainEstimator in any case, only the work required to be ready for testing //time unit has been set in estimateEnsemblePerformance(data); trainResults.turnOffZeroTimingsErrors(); trainResults.setBuildTime(buildTime); trainResults.turnOnZeroTimingsErrors(); this.testInstCounter = 0; //prep for start of testing this.prevTestInstance = null; } @Override protected void loadModules() throws Exception { //will look for all files and report all that are missing, instead of bailing on the first file not found //just helps debugging/running experiments a little ErrorReport errors = new ErrorReport("Errors while loading modules from file. Directories given: " + Arrays.toString(readResultsFilesDirectories)); //for each module for(int m = 0; m < this.modules.length; m++){ String readResultsFilesDirectory = readResultsFilesDirectories.length == 1 ? readResultsFilesDirectories[0] : readResultsFilesDirectories[m]; boolean trainResultsLoaded = false; boolean testResultsLoaded = false; //try and load in the train/test results for this module File moduleTrainResultsFile = findResultsFile(readResultsFilesDirectory, modules[m].getModuleName(), "train", (m+1)); if (moduleTrainResultsFile != null) { printlnDebug(modules[m].getModuleName() + " train loading... " + moduleTrainResultsFile.getAbsolutePath()); modules[m].trainResults = new ClassifierResults(moduleTrainResultsFile.getAbsolutePath()); trainResultsLoaded = true; } File moduleTestResultsFile = findResultsFile(readResultsFilesDirectory, modules[m].getModuleName(), "test", (m+1)); if (moduleTestResultsFile != null) { //of course these results not actually used at all during training, //only loaded for future use when classifying with ensemble printlnDebug(modules[m].getModuleName() + " test loading..." + moduleTestResultsFile.getAbsolutePath()); modules[m].testResults = new ClassifierResults(moduleTestResultsFile.getAbsolutePath()); numTestInsts = modules[m].testResults.numInstances(); testResultsLoaded = true; } if (!trainResultsLoaded) errors.log("\nTRAIN results files for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "' not found. "); else if (needIndividualTrainPreds() && modules[m].trainResults.getProbabilityDistributions().isEmpty()) errors.log("\nNo pred/distribution for instance data found in TRAIN results file for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "'. "); if (!testResultsLoaded) errors.log("\nTEST results files for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "' not found. "); else if (modules[m].testResults.numInstances()==0) errors.log("\nNo prediction data found in TEST results file for '" + modules[m].getModuleName() + "' on '" + datasetName + "' fold '" + seed + "'. "); } errors.throwIfErrors(); } protected File findResultsFile(String readResultsFilesDirectory, String classifierName, String trainOrTest, int dimension) { File file = new File(readResultsFilesDirectory+classifierName+"/Predictions/"+datasetName+"Dimension"+(dimension)+"/"+trainOrTest+"Fold"+seed+".csv"); if(!file.exists() || file.length() == 0) return null; else return file; } protected abstract void setupMultivariateEnsembleSettings(int instancesLength); }
12,380
44.351648
187
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/MultivariateHiveCote.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import evaluation.evaluators.CrossValidationEvaluator; import machine_learning.classifiers.ensembles.voting.MajorityConfidence; import machine_learning.classifiers.ensembles.weightings.EqualWeighting; import machine_learning.classifiers.ensembles.weightings.TrainAcc; import tsml.classifiers.hybrids.HIVE_COTE; import weka.classifiers.Classifier; import weka.core.Capabilities; public class MultivariateHiveCote extends MultivariateAbstractEnsemble { private String resultsPath; private String dataset; private int fold; public MultivariateHiveCote(String resultsPath, String dataset, int fold) { this.resultsPath = resultsPath; this.dataset = dataset; this.fold = fold; } @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes must be numeric // Here add in relational when ready result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capabilities.Capability.NOMINAL_CLASS); result.enable(Capabilities.Capability.MISSING_VALUES); // instances result.setMinimumNumberInstances(1); return result; } @Override protected void setupMultivariateEnsembleSettings(int instancesLength) { this.ensembleName = "MTSC_HC_I"; this.weightingScheme = new TrainAcc(4); this.votingScheme = new MajorityConfidence(); this.transform = null; CrossValidationEvaluator cv = new CrossValidationEvaluator(seed, false, false, false, false); cv.setNumFolds(10); this.trainEstimator = cv; Classifier[] classifiers = new Classifier[instancesLength]; String[] classifierNames = new String[instancesLength]; for (int i=0;i<instancesLength;i++){ String[] cls={"TSF","cBOSS","RISE","STC"}; classifiers[i] = new HIVE_COTE(); ((HIVE_COTE)classifiers[i]).setFillMissingDistsWithOneHotVectors(true); ((HIVE_COTE)classifiers[i]).setSeed(fold); ((HIVE_COTE)classifiers[i]).setBuildIndividualsFromResultsFiles(true); ((HIVE_COTE)classifiers[i]).setResultsFileLocationParameters(resultsPath, dataset+"Dimension"+(i+1), fold); ((HIVE_COTE)classifiers[i]).setClassifiersNamesForFileRead(cls); classifierNames[i] = "HC-Channel-" + (i+1); } setClassifiers(classifiers, classifierNames, null); } }
3,392
38.453488
123
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/MultivariateShapeletTransformClassifier.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import tsml.classifiers.*; import tsml.transformers.shapelet_tools.ShapeletTransformFactory; import tsml.filters.shapelet_filters.ShapeletFilter; import tsml.transformers.shapelet_tools.ShapeletTransformFactoryOptions; import tsml.transformers.shapelet_tools.ShapeletTransformTimingUtilities; import java.io.File; import java.util.concurrent.TimeUnit; import utilities.InstanceTools; import machine_learning.classifiers.ensembles.CAWPE; import weka.core.Instance; import weka.core.Instances; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch.SearchType; import machine_learning.classifiers.ensembles.voting.MajorityConfidence; import machine_learning.classifiers.ensembles.weightings.TrainAcc; import tsml.transformers.shapelet_tools.DefaultShapeletOptions; import evaluation.storage.ClassifierResults; import experiments.data.DatasetLoading; import weka.classifiers.Classifier; import weka.classifiers.bayes.NaiveBayes; import weka.classifiers.functions.SMO; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.lazy.IBk; import weka.classifiers.meta.RotationForest; import weka.classifiers.trees.J48; import weka.classifiers.trees.RandomForest; import tsml.classifiers.TrainTimeContractable; /** * * @author raj09hxu * By default, performs a shapelet transform through full enumeration (max 2000 shapelets selected) * then classifies with the heterogeneous ensemble CAWPE, using randF, rotF and SVMQ. * If can be contracted to a maximum run time for shapelets, and can be configured for a different * */ public class MultivariateShapeletTransformClassifier extends EnhancedAbstractClassifier implements TrainTimeContractable, Checkpointable{ //Minimum number of instances per class in the train set public static final int minimumRepresentation = 25; private static int MAXTRANSFORMSIZE=1000; //Default number in transform private boolean preferShortShapelets = false; private String shapeletOutputPath; private CAWPE ensemble; private ShapeletFilter transform; private Instances format; int[] redundantFeatures; private boolean doTransform=true; private long transformBuildTime; protected ClassifierResults res =new ClassifierResults(); int numShapeletsInTransform = MAXTRANSFORMSIZE; private SearchType searchType = SearchType.IMPROVED_RANDOM; private long numShapelets = 0; private long trainContractTimeNanos = Long.MAX_VALUE; private boolean trainTimeContract = false; private String checkpointFullPath; //location to check point private boolean checkpoint=false; enum TransformType{INDEP,MULTI_D,MULTI_I}; TransformType type=TransformType.MULTI_D; public void setTransformType(TransformType t){ type=t; } public void setTransformType(String t){ t=t.toLowerCase(); switch(t){ case "shapeletd": case "shapelet_d": case "dependent": type=TransformType.MULTI_D; break; case "shapeleti": case "shapelet_i": type=TransformType.MULTI_I; break; case "indep": case "shapelet_indep": case "shapeletindep": type=TransformType.INDEP; break; } } public MultivariateShapeletTransformClassifier(){ super(CANNOT_ESTIMATE_OWN_PERFORMANCE); configureDefaultEnsemble(); } //careful when setting search type as you could set a type that violates the contract. public void setSearchType(ShapeletSearch.SearchType type) { searchType = type; } /*//if you want CAWPE to perform CV. public void setEstimateEnsemblePerformance(boolean b) { ensemble.setEstimateEnsemblePerformance(b); }*/ @Override public ClassifierResults getTrainResults() { return ensemble.getTrainResults(); } @Override public String getParameters(){ String paras=transform.getParameters(); String ens=this.ensemble.getParameters(); return super.getParameters()+",CVAcc,"+res.getAcc()+",TransformBuildTime,"+transformBuildTime+",timeLimit,"+ trainContractTimeNanos +",TransformParas,"+paras+",EnsembleParas,"+ens; } public double getTrainAcc() { return ensemble.getTrainResults().getAcc(); } public double[] getTrainPreds() { return ensemble.getTrainResults().getPredClassValsAsArray(); } public void doSTransform(boolean b){ doTransform=b; } public long getTransformOpCount(){ return transform.getCount(); } public Instances transformDataset(Instances data){ if(transform.isFirstBatchDone()) return transform.process(data); return null; } @Override public void setTrainTimeLimit(long amount) { trainContractTimeNanos =amount; trainTimeContract = false; } @Override public boolean withinTrainContract(long start) { return start<trainContractTimeNanos; } public void setNumberOfShapelets(long numS){ numShapelets = numS; } @Override public void buildClassifier(Instances data) throws Exception { if(checkpoint){ buildCheckpointClassifier(data); } else{ long startTime=System.nanoTime(); format = doTransform ? createTransformData(data, trainContractTimeNanos) : data; transformBuildTime=System.nanoTime()-startTime; if(seedClassifier) ensemble.setSeed((int) seed); redundantFeatures=InstanceTools.removeRedundantTrainAttributes(format); ensemble.buildClassifier(format); format=new Instances(data,0); res.setTimeUnit(TimeUnit.NANOSECONDS); res.setBuildTime(System.currentTimeMillis()-startTime); } } private void buildCheckpointClassifier(Instances data) throws Exception { //Load file if one exists //Set timer options //Sample shapelets until checkpoint time //Save to file //When finished, build classifier ensemble.buildClassifier(format); format=new Instances(data,0); // res.buildTime=System.currentTimeMillis()-startTime; } /** * Classifiers used in the HIVE COTE paper */ public void configureDefaultEnsemble(){ //HIVE_SHAPELET_SVMQ HIVE_SHAPELET_RandF HIVE_SHAPELET_RotF //HIVE_SHAPELET_NN HIVE_SHAPELET_NB HIVE_SHAPELET_C45 HIVE_SHAPELET_SVML ensemble=new CAWPE(); ensemble.setWeightingScheme(new TrainAcc(4)); ensemble.setVotingScheme(new MajorityConfidence()); Classifier[] classifiers = new Classifier[7]; String[] classifierNames = new String[7]; SMO smo = new SMO(); smo.turnChecksOff(); smo.setBuildLogisticModels(true); PolyKernel kl = new PolyKernel(); kl.setExponent(2); smo.setKernel(kl); if (seedClassifier) smo.setRandomSeed((int)seed); classifiers[0] = smo; classifierNames[0] = "SVMQ"; RandomForest r=new RandomForest(); r.setNumTrees(500); if(seedClassifier) r.setSeed((int)seed); classifiers[1] = r; classifierNames[1] = "RandF"; RotationForest rf=new RotationForest(); rf.setNumIterations(100); if(seedClassifier) rf.setSeed((int)seed); classifiers[2] = rf; classifierNames[2] = "RotF"; IBk nn=new IBk(); classifiers[3] = nn; classifierNames[3] = "NN"; NaiveBayes nb=new NaiveBayes(); classifiers[4] = nb; classifierNames[4] = "NB"; J48 c45=new J48(); classifiers[5] = c45; classifierNames[5] = "C45"; SMO svml = new SMO(); svml.turnChecksOff(); svml.setBuildLogisticModels(true); PolyKernel k2 = new PolyKernel(); k2.setExponent(1); smo.setKernel(k2); classifiers[6] = svml; classifierNames[6] = "SVML"; ensemble.setClassifiers(classifiers, classifierNames, null); } //This sets up the ensemble to work within the time constraints of the problem public void configureEnsemble(){ ensemble.setWeightingScheme(new TrainAcc(4)); ensemble.setVotingScheme(new MajorityConfidence()); Classifier[] classifiers = new Classifier[3]; String[] classifierNames = new String[3]; SMO smo = new SMO(); smo.turnChecksOff(); smo.setBuildLogisticModels(true); PolyKernel kl = new PolyKernel(); kl.setExponent(2); smo.setKernel(kl); if (seedClassifier) smo.setRandomSeed((int)seed); classifiers[0] = smo; classifierNames[0] = "SVMQ"; RandomForest r=new RandomForest(); r.setNumTrees(500); if(seedClassifier) r.setSeed((int)seed); classifiers[1] = r; classifierNames[1] = "RandF"; RotationForest rf=new RotationForest(); rf.setNumIterations(100); if(seedClassifier) rf.setSeed((int)seed); classifiers[2] = rf; classifierNames[2] = "RotF"; ensemble.setClassifiers(classifiers, classifierNames, null); } @Override public double classifyInstance(Instance ins) throws Exception{ format.add(ins); Instances temp = doTransform ? transform.process(format) : format; //Delete redundant for(int del:redundantFeatures) temp.deleteAttributeAt(del); Instance test = temp.get(0); format.remove(0); return ensemble.classifyInstance(test); } @Override public double[] distributionForInstance(Instance ins) throws Exception{ format.add(ins); Instances temp = doTransform ? transform.process(format) : format; //Delete redundant for(int del:redundantFeatures) temp.deleteAttributeAt(del); Instance test = temp.get(0); format.remove(0); return ensemble.distributionForInstance(test); } public void setShapeletOutputFilePath(String path){ shapeletOutputPath = path; } public void preferShortShapelets(){ preferShortShapelets = true; } /** * ADAPT FOR MTSC * @param train * @param time * @return */ public Instances createTransformData(Instances train, long time){ int n = train.numInstances(); int m = train.numAttributes()-1; //Set the number of shapelets to keep, max is MAXTRANSFORMSIZE (500) //numShapeletsInTransform // n > 2000 ? 2000 : n; if(n*m<numShapeletsInTransform) numShapeletsInTransform=n*m; //All hard coded for now to 1 day and whatever Aaron's defaults are! ShapeletTransformFactoryOptions options; switch(type){ case INDEP: options = DefaultShapeletOptions.TIMED_FACTORY_OPTIONS.get("INDEPENDENT").apply(train, ShapeletTransformTimingUtilities.dayNano,(long)seed); break; case MULTI_D: options = DefaultShapeletOptions.TIMED_FACTORY_OPTIONS.get("SHAPELET_D").apply(train, ShapeletTransformTimingUtilities.dayNano,(long)seed); break; case MULTI_I: default: options = DefaultShapeletOptions.TIMED_FACTORY_OPTIONS.get("SHAPELET_I").apply(train, ShapeletTransformTimingUtilities.dayNano,(long)seed); break; } transform = new ShapeletTransformFactory(options).getFilter(); if(shapeletOutputPath != null) transform.setLogOutputFile(shapeletOutputPath); return transform.process(train); } public static void main(String[] args) throws Exception { String dataLocation = "C:\\Temp\\MTSC\\"; //String dataLocation = "..\\..\\resampled transforms\\BalancedClassShapeletTransform\\"; String saveLocation = "C:\\Temp\\MTSC\\"; String datasetName = "ERing"; int fold = 0; Instances train= DatasetLoading.loadDataNullable(dataLocation+datasetName+File.separator+datasetName+"_TRAIN"); Instances test= DatasetLoading.loadDataNullable(dataLocation+datasetName+File.separator+datasetName+"_TEST"); String trainS= saveLocation+datasetName+File.separator+"TrainCV.csv"; String testS=saveLocation+datasetName+File.separator+"TestPreds.csv"; String preds=saveLocation+datasetName; MultivariateShapeletTransformClassifier st= new MultivariateShapeletTransformClassifier(); //st.saveResults(trainS, testS); st.doSTransform(true); st.setOneMinuteLimit(); st.buildClassifier(train); double accuracy = utilities.ClassifierTools.accuracy(test, st); System.out.println("accuracy: " + accuracy); } /** * Checkpoint methods */ public boolean setCheckpointPath(String path) { boolean validPath=Checkpointable.super.createDirectories(path); if(validPath){ this.checkpointFullPath=path; } return validPath; } public void copyFromSerObject(Object obj) throws Exception{ if(!(obj instanceof MultivariateShapeletTransformClassifier)) throw new Exception("Not a ShapeletTransformClassifier object"); //Copy meta data MultivariateShapeletTransformClassifier st=(MultivariateShapeletTransformClassifier)obj; //We assume the classifiers have not been built, so are basically copying over the set up ensemble=st.ensemble; preferShortShapelets = st.preferShortShapelets; shapeletOutputPath=st.shapeletOutputPath; transform=st.transform; format=st.format; int[] redundantFeatures=st.redundantFeatures; doTransform=st.doTransform; transformBuildTime=st.transformBuildTime; res =st.res; numShapeletsInTransform =st.numShapeletsInTransform; searchType =st.searchType; numShapelets =st.numShapelets; seed =st.seed; seedClassifier=st.seedClassifier; trainContractTimeNanos =st.trainContractTimeNanos; } }
15,294
34.487239
188
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/MultivariateSingleEnsemble.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import evaluation.evaluators.CrossValidationEvaluator; import machine_learning.classifiers.ensembles.voting.MajorityConfidence; import machine_learning.classifiers.ensembles.weightings.TrainAcc; import tsml.classifiers.dictionary_based.cBOSS; import tsml.classifiers.interval_based.RISE; import tsml.classifiers.interval_based.TSF; import tsml.classifiers.shapelet_based.ShapeletTransformClassifier; import weka.classifiers.Classifier; public class MultivariateSingleEnsemble extends MultivariateAbstractEnsemble { private String resultsPath; private String dataset; private int fold; private String classifierName; public MultivariateSingleEnsemble(String classifierName, String resultsPath, String dataset, int fold) { this.classifierName = classifierName; this.resultsPath = resultsPath; this.dataset = dataset; this.fold = fold; this.setBuildIndividualsFromResultsFiles(true); this.setResultsFileLocationParameters(resultsPath,dataset, fold); setSeed(fold); setDebug(true); } @Override protected void setupMultivariateEnsembleSettings(int instancesLength) { this.ensembleName = "MTSC_"+ this.classifierName +"_I"; // this.weightingScheme = new EqualWeighting(); // this.votingScheme = new MajorityConfidence(); this.weightingScheme = new TrainAcc(4); this.votingScheme = new MajorityConfidence(); this.transform = null; CrossValidationEvaluator cv = new CrossValidationEvaluator(seed, false, false, false, false); cv.setNumFolds(10); this.trainEstimator = cv; Classifier[] classifiers = new Classifier[instancesLength]; String[] classifierNames = new String[instancesLength]; for (int i=0;i<instancesLength;i++){ classifiers[i] = getClassifierFromString(); classifierNames[i] = this.classifierName; } setClassifiers(classifiers, classifierNames, null); } private Classifier getClassifierFromString(){ switch (this.classifierName){ case "cBOSS": return new cBOSS(); case "RISE": return new RISE(); case "STC": return new ShapeletTransformClassifier(); case "TSF": return new TSF(); default: return new TSF(); } } }
3,163
36.666667
108
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/NN_DTW_A.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_D; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_I; import java.util.ArrayList; import java.util.Collections; import java.util.List; import utilities.generic_storage.Pair; import weka.core.Instance; import weka.core.Instances; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_DistanceBasic; import static utilities.InstanceTools.findMinDistance; /** * * @author ABostrom */ public class NN_DTW_A extends MultivariateAbstractClassifier{ Instances train; public double threshold; DTW_DistanceBasic I; DTW_DistanceBasic D; double R; public NN_DTW_A(){ I = new DTW_I(); D = new DTW_D(); } public void setR(double r){ R = r; I.setR(R); D.setR(R); } @Override public void buildClassifier(Instances data) throws Exception{ testWithFailRelationalInstances(data); train = data; threshold = learnThreshold(train); System.out.println("threshold = " + threshold); //build DTW_A. doesn't matter what function it uses for building as its' lazy. //default A to support a distance function of some kind. } @Override public double classifyInstance(Instance instance) throws Exception{ testWithFailRelationalInstance(instance); Pair<Instance, Double> minD = findMinDistance(train, instance, D); Pair<Instance, Double> minI = findMinDistance(train, instance, I); //System.out.println("minD = " + minD + "minI = " + minI); double S = minD.var2 / (minI.var2 + 0.000000001); double out = S > threshold ? minI.var1.classValue() : minD.var1.classValue(); //System.out.println("minD " + minD.var2 + " minI "+ minI.var2 + " S " + S); return out; } public double learnThreshold(Instances data){ Pair<List<Double>, List<Double>> scores = findScores(data); List<Double> S_dSuccess = scores.var1; List<Double> S_iSuccess = scores.var2; double output; if(S_iSuccess.isEmpty() && S_dSuccess.isEmpty()) output= 1; else if(!S_iSuccess.isEmpty() && S_dSuccess.isEmpty()) output = Collections.min(S_iSuccess) -0.1; //they take off 0.1 else if(S_iSuccess.isEmpty() && !S_dSuccess.isEmpty()) output = Collections.max(S_dSuccess) + 0.1; //they add on 0.1 else output = calculateThreshold(S_dSuccess, S_iSuccess); return output; } double calculateThreshold(List<Double> dSuccess, List<Double> iSuccess){ double output = 0; //trying to minimse common int common = iSuccess.size() + dSuccess.size(); for (int j = 0;j<dSuccess.size();j++){ int in = 0; int dp = 0; for (int i = 0;i<dSuccess.size();i++){ if (dSuccess.get(i) >= dSuccess.get(j)){ dp++; } } for (int i = 0;i<iSuccess.size();i++){ if (iSuccess.get(i) < dSuccess.get(j)){ in++; } } if (in+dp < common){ common = in+dp; output = dSuccess.get(j); } } for (int j = 0; j<iSuccess.size();j++){ int in = 0; int dp = 0; for (int i = 0;i<dSuccess.size();i++){ if (dSuccess.get(i) >= iSuccess.get(j)){ dp++; } } for (int i = 0;i<iSuccess.size();i++){ if (iSuccess.get(i) < iSuccess.get(j)){ in++; } } if (in+dp < common){ common = in+dp; output = iSuccess.get(j); } } return output; } Pair<List<Double>, List<Double>> findScores(Instances data){ List<Double> S_dSuccess = new ArrayList<>(); List<Double> S_iSuccess = new ArrayList<>(); for(int i=0; i<data.numInstances(); i++){ try { //LOOCV search for distances. Instances cv_train = data.trainCV(data.numInstances(), i); Instances cv_test = data.testCV(data.numInstances(), i); Instance test = cv_test.firstInstance(); Pair<Instance, Double> pair_D = findMinDistance(cv_train, test, D); Pair<Instance, Double> pair_I = findMinDistance(cv_train, test, I); //we know we only have one instance. double pred_d = pair_D.var1.classValue(); double pred_i = pair_I.var1.classValue(); double dist_d = pair_D.var2; double dist_i = pair_I.var2; double S = dist_d / (dist_i+0.000000001); //if d is correct and i is incorrect. if(test.classValue() == pred_d && test.classValue() != pred_i) S_dSuccess.add(S); //if d is incorrect and i is correct. if(test.classValue() != pred_d && test.classValue() == pred_i) S_iSuccess.add(S); } catch (Exception ex) { System.out.println(ex); } } return new Pair(S_dSuccess, S_iSuccess); } @Override public String toString(){ return "threshold="+threshold; } }
6,509
32.384615
86
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/NN_DTW_D.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import tsml.classifiers.distance_based.NN_CID; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_D; import static utilities.InstanceTools.findMinDistance; import utilities.generic_storage.Pair; import weka.core.Instance; import weka.core.Instances; /** * * @author raj09hxu */ public class NN_DTW_D extends MultivariateAbstractClassifier{ Instances train; DTW_D D; public NN_DTW_D(){ D = new DTW_D(); } public void setR(double r){ D.setR(r); } @Override public void buildClassifier(Instances data) throws Exception { testWithFailRelationalInstances(data); train = data; } @Override public double classifyInstance(Instance instance) throws Exception{ testWithFailRelationalInstance(instance); Pair<Instance, Double> minD = findMinDistance(train, instance, D); return minD.var1.classValue(); } }
1,754
28.25
76
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/NN_DTW_I.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_I; import static utilities.InstanceTools.findMinDistance; import utilities.generic_storage.Pair; import weka.core.Instance; import weka.core.Instances; /** * * @author raj09hxu */ public class NN_DTW_I extends MultivariateAbstractClassifier{ Instances train; DTW_I I; public NN_DTW_I(){ I = new DTW_I(); } public void setR(double r){ I.setR(r); } @Override public void buildClassifier(Instances data) throws Exception { testWithFailRelationalInstances(data); train = data; } @Override public double classifyInstance(Instance instance) throws Exception{ testWithFailRelationalInstance(instance); Pair<Instance, Double> minD = findMinDistance(train, instance, I); return minD.var1.classValue(); } }
1,697
28.789474
76
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/NN_ED_D.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.EuclideanDistance_D; import static utilities.InstanceTools.findMinDistance; import utilities.generic_storage.Pair; import weka.core.Instance; import weka.core.Instances; /** * * @author Aaron */ public class NN_ED_D extends MultivariateAbstractClassifier{ Instances train; EuclideanDistance_D D; public NN_ED_D(){ D = new EuclideanDistance_D(); } @Override public void buildClassifier(Instances data) throws Exception { testWithFailRelationalInstances(data); train = data; } @Override public double classifyInstance(Instance instance) throws Exception{ testWithFailRelationalInstance(instance); Pair<Instance, Double> minD = findMinDistance(train, instance, D); return minD.var1.classValue(); } }
1,675
31.230769
87
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/NN_ED_I.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.EuclideanDistance_I; import static utilities.InstanceTools.findMinDistance; import utilities.generic_storage.Pair; import weka.core.Instance; import weka.core.Instances; /** * * @author Aaron */ public class NN_ED_I extends MultivariateAbstractClassifier{ Instances train; EuclideanDistance_I I; public NN_ED_I(){ I = new EuclideanDistance_I(); } @Override public void buildClassifier(Instances data) throws Exception { testWithFailRelationalInstances(data); train = data; } @Override public double classifyInstance(Instance instance) throws Exception{ testWithFailRelationalInstance(instance); Pair<Instance, Double> minD = findMinDistance(train, instance, I); return minD.var1.classValue(); } }
1,672
30.566038
87
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/STC_D.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import evaluation.evaluators.CrossValidationEvaluator; import experiments.data.DatasetLoading; import machine_learning.classifiers.ensembles.ContractRotationForest; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Converter; import tsml.transformers.ShapeletTransform; import tsml.transformers.shapelet_tools.ShapeletTransformFactory; import tsml.transformers.shapelet_tools.ShapeletTransformFactoryOptions.ShapeletTransformOptions; import tsml.transformers.shapelet_tools.ShapeletTransformTimingUtilities; import tsml.transformers.shapelet_tools.distance_functions.ShapeletDistance; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch.SearchType; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearchOptions; import utilities.ClassifierTools; import utilities.InstanceTools; import weka.core.Instance; import weka.core.Instances; import java.math.BigDecimal; import java.math.BigInteger; import java.math.MathContext; import java.util.concurrent.TimeUnit; import static tsml.transformers.shapelet_tools.ShapeletTransformTimingUtilities.nanoToOp; import static utilities.InstanceTools.resampleTrainAndTestInstances; /** * * @author Matthew Middlehurst */ public class STC_D extends EnhancedAbstractClassifier { private ContractRotationForest classifier; private ShapeletTransform transform; private int[] redundantFeatures; private long transformBuildTime; private String[] classLabels; public STC_D(){ super(CAN_ESTIMATE_OWN_PERFORMANCE); classifier = new ContractRotationForest(); classifier.setMaxNumTrees(200); } @Override public String getParameters(){ String paras=transform.getParameters(); String ens=classifier.getParameters(); return super.getParameters()+",TransformBuildTime,"+transformBuildTime+ ",TransformParas,"+paras+",EnsembleParas,"+ens; } @Override public void buildClassifier(TimeSeriesInstances data) throws Exception { long startTime = System.nanoTime(); classLabels = data.getClassLabels(); Instances shapeletData = createTransformData(data); transformBuildTime = System.nanoTime()-startTime; redundantFeatures=InstanceTools.removeRedundantTrainAttributes(shapeletData); if(getEstimateOwnPerformance()){ int numFolds = setNumberOfFolds(data); CrossValidationEvaluator cv = new CrossValidationEvaluator(); cv.setSeed(seed * 12); cv.setNumFolds(numFolds); trainResults = cv.crossValidateWithStats(classifier, shapeletData); } if (seedClassifier) classifier.setSeed(seed); classifier.buildClassifier(shapeletData); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); if(getEstimateOwnPerformance()){ trainResults.setBuildTime(System.nanoTime()-startTime - trainResults.getErrorEstimateTime()); } else{ trainResults.setBuildTime(System.nanoTime()-startTime); } trainResults.setBuildPlusEstimateTime(trainResults.getBuildTime()+trainResults.getErrorEstimateTime()); trainResults.setParas(getParameters()); } @Override public void buildClassifier(Instances data) throws Exception { buildClassifier(Converter.fromArff(data)); } @Override public double classifyInstance(TimeSeriesInstance ins) throws Exception{ Instances temp = Converter.toArff(transform.transform(ins), classLabels).dataset(); for(int del: redundantFeatures) temp.deleteAttributeAt(del); Instance test = temp.get(0); return classifier.classifyInstance(test); } @Override public double classifyInstance(Instance ins) throws Exception { return classifyInstance(Converter.fromArff(ins)); } @Override public double[] distributionForInstance(TimeSeriesInstance ins) throws Exception{ Instances temp = Converter.toArff(transform.transform(ins), classLabels).dataset(); for(int del: redundantFeatures) temp.deleteAttributeAt(del); Instance test = temp.get(0); return classifier.distributionForInstance(test); } @Override public double[] distributionForInstance(Instance ins) throws Exception { return distributionForInstance(Converter.fromArff(ins)); } public Instances createTransformData(TimeSeriesInstances train){ int n = train.numInstances(); int m = train.getMaxLength(); int d = train.getMaxNumDimensions(); ShapeletTransformOptions transformOptions=new ShapeletTransformOptions(); transformOptions.setDistanceType(ShapeletDistance.DistanceType.DEPENDENT); transformOptions.setQualityMeasure(ShapeletQuality.ShapeletQualityChoice.INFORMATION_GAIN); transformOptions.setRescalerType(ShapeletDistance.RescalerType.NORMALISATION); transformOptions.setRoundRobin(true); transformOptions.setCandidatePruning(true); transformOptions.setMinLength(3); transformOptions.setMaxLength(m); if(train.numClasses() > 2) { transformOptions.setBinaryClassValue(true); transformOptions.setClassBalancing(true); }else{ transformOptions.setBinaryClassValue(false); transformOptions.setClassBalancing(false); } int numShapeletsInTransform = Math.min(10 * train.numInstances(), 2000); long transformContractTime = TimeUnit.NANOSECONDS.convert(4, TimeUnit.HOURS); SearchType searchType = SearchType.RANDOM; long numShapeletsInProblem = ShapeletTransformTimingUtilities.calculateNumberOfShapelets(n, m, 3, m); double proportionToEvaluate = estimatePropOfFullSearch(n, m, d, transformContractTime); long numShapeletsToEvaluate; if(proportionToEvaluate == 1.0) { searchType = ShapeletSearch.SearchType.FULL; numShapeletsToEvaluate = numShapeletsInProblem; } else numShapeletsToEvaluate = (long) (numShapeletsInProblem * proportionToEvaluate); if(numShapeletsToEvaluate < n) numShapeletsToEvaluate = n; numShapeletsInTransform = numShapeletsToEvaluate > numShapeletsInTransform ? numShapeletsInTransform : (int) numShapeletsToEvaluate; transformOptions.setKShapelets(numShapeletsInTransform); ShapeletSearchOptions.Builder searchBuilder = new ShapeletSearchOptions.Builder(); if(seedClassifier) searchBuilder.setSeed(2 * seed); searchBuilder.setMin(transformOptions.getMinLength()); searchBuilder.setMax(transformOptions.getMaxLength()); searchBuilder.setSearchType(searchType); searchBuilder.setNumShapeletsToEvaluate(numShapeletsToEvaluate / train.numInstances()); transformOptions.setSearchOptions(searchBuilder.build()); transform = new ShapeletTransformFactory(transformOptions.build()).getTransform(); transform.setContractTime(transformContractTime); transform.setAdaptiveTiming(true); transform.setTimePerShapelet((double) transformContractTime / numShapeletsToEvaluate); transform.setPruneMatchingShapelets(false); return Converter.toArff(transform.fitTransform(train)); } // Aarons way of doing it based on time for a single operation private double estimatePropOfFullSearch(int n, int m, int d, long time){ BigInteger opCountTarget = new BigInteger(Long.toString(time / nanoToOp)); BigInteger opCount = ShapeletTransformTimingUtilities.calculateOps(n, m, 1, 1); opCount = opCount.multiply(BigInteger.valueOf(d)); double p = 1; if(opCount.compareTo(opCountTarget) > 0){ BigDecimal oct = new BigDecimal(opCountTarget); BigDecimal oc = new BigDecimal(opCount); BigDecimal prop = oct.divide(oc, MathContext.DECIMAL64); p = prop.doubleValue(); } return p; } public static void main(String[] args) throws Exception { int fold = 0; String dataset = "ERing"; Instances train = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\" + "MultivariateARFF\\" + dataset + "\\" + dataset + "_TRAIN.arff"); Instances test = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\" + "MultivariateARFF\\" + dataset + "\\" + dataset + "_TEST.arff"); Instances[] data = resampleTrainAndTestInstances(train, test, fold); train = data[0]; test = data[1]; STC_D stc = new STC_D(); stc.setSeed(fold); stc.setEstimateOwnPerformance(true); stc.buildClassifier(train); double acc = ClassifierTools.accuracy(test, stc); System.out.println("Test Accuracy = " + acc); System.out.println("Train Accuracy = "+ stc.trainResults.getAcc()); } }
10,116
39.468
111
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/multivariate/WEASEL_MUSE.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.multivariate; import com.carrotsearch.hppc.*; import com.carrotsearch.hppc.cursors.*; import de.bwaldvogel.liblinear.*; import edu.emory.mathcs.jtransforms.fft.DoubleFFT_1D; import experiments.data.DatasetLoading; import tsml.classifiers.EnhancedAbstractClassifier; import utilities.ClassifierTools; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.io.Serializable; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; import static utilities.multivariate_tools.MultivariateInstanceTools.*; /** * The WEASEL+MUSE classifier as published in * * Schäfer, P., Leser, U.: Multivariate Time Series Classification * with WEASEL+MUSE. arXiv 2017 * http://arxiv.org/abs/1711.11343 * * Code adapted from the tsml WEASEL code and WEASEL+MUSE implementation * in the SFA package by Patrick Schäfer * https://github.com/patrickzib/SFA * * Author: Matthew Middlehurst 29/07/2020 */ public class WEASEL_MUSE extends EnhancedAbstractClassifier { private static int maxF = 6; private static int minF = 2; private static int maxS = 4; private static boolean[] NORMALIZATION = new boolean[]{true, false}; private enum HistogramType { EQUI_FREQUENCY, EQUI_DEPTH } private static HistogramType[] histTypes = new HistogramType[]{HistogramType.EQUI_DEPTH, HistogramType.EQUI_FREQUENCY}; private static double chi = 2; private static double bias = 1; private static SolverType solverType = SolverType.L2R_LR; private static int iterations = 5000; private static double p = 0.1; private static double c = 1; private Instances header; private boolean derivatives = true; // ten-fold cross validation private int folds = 10; private static int MIN_WINDOW_LENGTH = 2; private static int MAX_WINDOW_LENGTH = 450; private MUSEModel classifier; public WEASEL_MUSE() { super(CANNOT_ESTIMATE_OWN_PERFORMANCE); } @Override public void buildClassifier(final Instances samples) throws Exception { long t1=System.nanoTime(); if (samples.classIndex() != samples.numAttributes()-1) throw new Exception("WEASEL_MUSE_BuildClassifier: Class attribute not set as last attribute in dataset"); Instances newSamples; //get derivatives for the instances if enabled if (derivatives){ int dimensionality = numDimensions(samples); Instances[] split = splitMultivariateInstances(samples); Instances[] channels = new Instances[dimensionality * 2]; for (int i = 0; i < dimensionality; i++) { Instances derivative = new Instances(split[i], 0); for (int n = 0; n < samples.numInstances(); n++) { Instance inst = split[i].get(n); double[] d = new double[inst.numAttributes()]; for (int a = 1; a < inst.numAttributes()-1; a++) { d[a - 1] = Math.abs(inst.value(a) - inst.value(a - 1)); } d[inst.numAttributes()-1] = inst.classValue(); derivative.add(new DenseInstance(1, d)); } channels[i] = split[i]; channels[dimensionality + i] = derivative; } newSamples = mergeToMultivariateInstances(channels); header = new Instances(newSamples, 0); } else{ newSamples = samples; } int dimensionality = numDimensions(newSamples); try { int maxCorrect = -1; int bestF = -1; boolean bestNorm = false; HistogramType bestHistType = null; optimize: for (final HistogramType histType : histTypes) { for (final boolean mean : NORMALIZATION) { int[] windowLengths = getWindowLengths(newSamples, mean); for (int f = minF; f <= maxF; f += 2) { final MUSE model = new MUSE(f, maxS, histType, windowLengths, mean); MUSE.BagOfBigrams[] bag = null; for (int w = 0; w < model.windowLengths.length; w++) { int[][] words = model.createWords(newSamples, w); MUSE.BagOfBigrams[] bobForOneWindow = fitOneWindow( newSamples, windowLengths, mean, histType, model, words, f, dimensionality, w); bag = mergeBobs(bag, bobForOneWindow); } // train liblinear final Problem problem = initLibLinearProblem(bag, model.dict, bias); int correct = trainLibLinear(problem, solverType, c, iterations, p, folds); if (correct > maxCorrect || correct == maxCorrect && f < bestF) { maxCorrect = correct; bestF = f; bestNorm = mean; bestHistType = histType; if (debug) { System.out.println("New best model" + maxCorrect + " " + bestF + " " + bestNorm + " " + bestHistType); } } if (correct == newSamples.numInstances()) { break optimize; } } } } // obtain the final matrix int[] windowLengths = getWindowLengths(newSamples, bestNorm); // obtain the final matrix MUSE model = new MUSE(bestF, maxS, bestHistType, windowLengths, bestNorm); MUSE.BagOfBigrams[] bob = null; for (int w = 0; w < model.windowLengths.length; w++) { int[][] words = model.createWords(newSamples, w); MUSE.BagOfBigrams[] bobForOneWindow = fitOneWindow( newSamples, windowLengths, bestNorm, bestHistType, model, words, bestF, dimensionality, w); bob = mergeBobs(bob, bobForOneWindow); } // train liblinear Problem problem = initLibLinearProblem(bob, model.dict, bias); Parameter par = new Parameter(solverType, c, iterations, p); //par.setThreadCount(Math.min(Runtime.getRuntime().availableProcessors(),10)); de.bwaldvogel.liblinear.Model linearModel = Linear.train(problem, par); this.classifier = new MUSEModel( bestNorm, bestF, bestHistType, model, linearModel); } catch (Exception e) { e.printStackTrace(); } long t2=System.nanoTime(); trainResults.setEstimatorName(getClassifierName()); trainResults.setParas(classifierName); trainResults.setBuildTime(t2-t1); trainResults.setParas(getParameters()); } @Override public double classifyInstance(Instance instance) throws Exception { FeatureNode[] features = predictionTransform(instance); return Linear.predict(classifier.linearModel, features); } @Override public double[] distributionForInstance(Instance instance) throws Exception { FeatureNode[] features = predictionTransform(instance); double[] probabilities = new double[classifier.linearModel.getNrClass()]; Linear.predictProbability(classifier.linearModel, features, probabilities); double[] classHist = new double[instance.numClasses()]; for (int i = 0; i < classifier.linearModel.getLabels().length; i++) { classHist[classifier.linearModel.getLabels()[i]] = probabilities[i]; } return classHist; } private FeatureNode[] predictionTransform(Instance instance){ Instance newInstance; //get derivatives for the instance if enabled if (derivatives){ int dimensionality = numDimensions(instance); Instance[] split = splitMultivariateInstance(instance); double[][] channels = new double[dimensionality * 2][split[0].numAttributes()]; for (int i = 0; i < dimensionality; i++) { for (int a = 1; a < split[i].numAttributes(); a++) { channels[dimensionality + i][a - 1] = Math.abs(split[i].value(a) - split[i].value(a - 1)); } channels[i] = split[i].toDoubleArray(); } newInstance = new DenseInstance(2); Instances relational = createRelationFrom(header.attribute(0).relation(), channels); newInstance.setDataset(header); int index = newInstance.attribute(0).addRelation(relational); newInstance.setValue(0, index); newInstance.setValue(1, instance.classValue());; } else{ newInstance = instance; } int dimensionality = numDimensions(newInstance); MUSE.BagOfBigrams[] bagTest = null; for (int w = 0; w < classifier.muse.windowLengths.length; w++) { int[][] wordsTest = classifier.muse.createWords(newInstance, w); MUSE.BagOfBigrams[] bopForWindow = new MUSE.BagOfBigrams[]{classifier.muse.createBagOfPatterns(wordsTest, newInstance, w, dimensionality, classifier.features)}; classifier.muse.dict.filterChiSquared(bopForWindow); bagTest = mergeBobs(bagTest, bopForWindow); } return initLibLinear(bagTest, classifier.muse.dict)[0]; } private MUSE.BagOfBigrams[] fitOneWindow( Instances samples, int[] windowLengths, boolean mean, HistogramType histType, MUSE model, int[][] word, int f, int dimensionality, int w) { MUSE modelForWindow = new MUSE(f, maxS, histType, windowLengths, mean); MUSE.BagOfBigrams[] bopForWindow = modelForWindow.createBagOfPatterns(word, samples, w, dimensionality, f); modelForWindow.trainChiSquared(bopForWindow, chi); model.dict.dictChi.putAll(modelForWindow.dict.dictChi); return bopForWindow; } private MUSE.BagOfBigrams[] mergeBobs( MUSE.BagOfBigrams[] bop, MUSE.BagOfBigrams[] bopForWindow) { if (bop == null) { bop = bopForWindow; } else { for (int i = 0; i < bop.length; i++) { bop[i].bob.putAll(bopForWindow[i].bob); } } return bop; } public static Problem initLibLinearProblem( final MUSE.BagOfBigrams[] bob, final MUSE.Dictionary dict, final double bias) { Linear.resetRandom(); Problem problem = new Problem(); problem.bias = bias; problem.y = getLabels(bob); final FeatureNode[][] features = initLibLinear(bob, dict); problem.n = dict.size() + 1; problem.l = features.length; problem.x = features; return problem; } public static double[] getLabels(final MUSE.BagOfBigrams[] bagOfPatternsTestSamples) { double[] labels = new double[bagOfPatternsTestSamples.length]; for (int i = 0; i < bagOfPatternsTestSamples.length; i++) { labels[i] = bagOfPatternsTestSamples[i].label; } return labels; } protected static FeatureNode[][] initLibLinear( final MUSE.BagOfBigrams[] bob, final MUSE.Dictionary dict) { FeatureNode[][] featuresTrain = new FeatureNode[bob.length][]; for (int j = 0; j < bob.length; j++) { MUSE.BagOfBigrams bop = bob[j]; ArrayList<FeatureNode> features = new ArrayList<FeatureNode>(bop.bob.size()); for (ObjectIntCursor<MUSE.MuseWord> word : bop.bob) { if (word.value > 0 ) { features.add(new FeatureNode(dict.getWordChi(word.key), word.value)); } } FeatureNode[] featuresArray = features.toArray(new FeatureNode[]{}); Arrays.sort(featuresArray, new Comparator<FeatureNode>() { public int compare(FeatureNode o1, FeatureNode o2) { return Integer.compare(o1.index, o2.index); } }); featuresTrain[j] = featuresArray; } return featuresTrain; } @SuppressWarnings("static-access") protected static int trainLibLinear( final Problem prob, final SolverType solverType, double c, int iter, double p, int nr_fold) { final Parameter param = new Parameter(solverType, c, iter, p); ThreadLocal<Random> myRandom = new ThreadLocal<>(); myRandom.set(new Random(1)); Random random = myRandom.get(); int k; final int l = prob.l; final int[] perm = new int[l]; if (nr_fold > l) { nr_fold = l; } final int[] fold_start = new int[nr_fold + 1]; for (k = 0; k < l; k++) { perm[k] = k; } for (k = 0; k < l; k++) { int j = k + random.nextInt(l - k); swap(perm, k, j); } for (k = 0; k <= nr_fold; k++) { fold_start[k] = k * l / nr_fold; } final AtomicInteger correct = new AtomicInteger(0); final int fold = nr_fold; Linear myLinear = new Linear(); myLinear.disableDebugOutput(); myLinear.resetRandom(); // reset random component of liblinear for reproducibility for (int i = 0; i < fold; i++) { int begin = fold_start[i]; int end = fold_start[i + 1]; int j, kk; Problem subprob = new Problem(); subprob.bias = prob.bias; subprob.n = prob.n; subprob.l = l - (end - begin); subprob.x = new Feature[subprob.l][]; subprob.y = new double[subprob.l]; kk = 0; for (j = 0; j < begin; j++) { subprob.x[kk] = prob.x[perm[j]]; subprob.y[kk] = prob.y[perm[j]]; ++kk; } for (j = end; j < l; j++) { subprob.x[kk] = prob.x[perm[j]]; subprob.y[kk] = prob.y[perm[j]]; ++kk; } de.bwaldvogel.liblinear.Model submodel = myLinear.train(subprob, param); for (j = begin; j < end; j++) { correct.addAndGet(prob.y[perm[j]] == myLinear.predict(submodel, prob.x[perm[j]]) ? 1 : 0); } } return correct.get(); } private static void swap(int[] array, int idxA, int idxB) { int temp = array[idxA]; array[idxA] = array[idxB]; array[idxB] = temp; } public int[] getWindowLengths(final Instances samples, boolean norm) { int min = norm && MIN_WINDOW_LENGTH<=2? Math.max(3,MIN_WINDOW_LENGTH) : MIN_WINDOW_LENGTH; int max = Math.min(channelLength(samples), MAX_WINDOW_LENGTH); int[] wLengths = new int[max - min + 1]; for (int w = min, a = 0; w <= max; w++, a++) { wLengths[a] = w; } return wLengths; } protected static int binlog(int bits) { int log = 0; if ((bits & 0xffff0000) != 0) { bits >>>= 16; log = 16; } if (bits >= 256) { bits >>>= 8; log += 8; } if (bits >= 16) { bits >>>= 4; log += 4; } if (bits >= 4) { bits >>>= 2; log += 2; } return log + (bits >>> 1); } protected static int instanceLength(Instance inst) { int length = inst.numAttributes(); if (inst.classIndex() >= 0) --length; return length; } /** * @return data of passed instance in a double array with the class value removed if present */ protected static double[] toArrayNoClass(Instance inst) { int length = inst.numAttributes(); if (inst.classIndex() >= 0) --length; double[] data = new double[length]; for (int i=0, j=0; i < inst.numAttributes(); ++i) if (inst.classIndex() != i) data[j++] = inst.value(i); return data; } public static class MUSEModel { public MUSEModel() { } public MUSEModel( boolean normed, int features, HistogramType histType, MUSE model, de.bwaldvogel.liblinear.Model linearModel ) { this.normed = normed; this.features = features; this.muse = model; this.linearModel = linearModel; this.histType = histType; } public boolean normed; // the best number of Fourier values to be used public int features; // the trained MUSE transformation public MUSE muse; // the trained liblinear classifier public de.bwaldvogel.liblinear.Model linearModel; public HistogramType histType; } /** * The WEASEL+MUSE-Model as published in * * Schäfer, P., Leser, U.: Multivariate Time Series Classification * with WEASEL+MUSE. arXiv 2017 * http://arxiv.org/abs/1711.11343 */ public static class MUSE { public int alphabetSize; public int maxF; public HistogramType histogramType = null; public int[] windowLengths; public boolean normMean; public SFA[][] signature; public Dictionary dict; public static class MuseWord { int w = 0; int dim = 0; int word = 0; int word2 = 0; public MuseWord(int w, int dim, int word, int word2) { this.w = w; this.dim = dim; this.word = word; this.word2 = word2; } @Override public boolean equals(Object o) { if (this == o) return true; MuseWord museWord = (MuseWord) o; return w == museWord.w && dim == museWord.dim && word == museWord.word && word2 == museWord.word2; } @Override public int hashCode() { int result = 1; result = 31 * result + Integer.hashCode(word); result = 31 * result + Integer.hashCode(word2); result = 31 * result + Integer.hashCode(w); result = 31 * result + Integer.hashCode(dim); return result; } @Override public String toString() { return w + "-" + dim + "-" + word + "-" + word2; } } /** * The WEASEL-model: a histogram of SFA word and bi-gram frequencies */ public static class BagOfBigrams { public ObjectIntHashMap<MuseWord> bob; public Double label; public BagOfBigrams(int size, Double label) { this.bob = new ObjectIntHashMap<>(size); this.label = label; } } /** * A dictionary that maps each SFA word to an integer. * * Condenses the SFA word space. */ public static class Dictionary { public ObjectIntHashMap<MuseWord> dictChi; public ArrayList<MuseWord> inverseDict; public Dictionary() { this.dictChi = new ObjectIntHashMap<>(); this.inverseDict = new ArrayList<>(); this.inverseDict.add(new MuseWord(0, 0, 0, 0)); } public void reset() { this.dictChi = new ObjectIntHashMap<>(); this.inverseDict = new ArrayList<>(); this.inverseDict.add(new MuseWord(0, 0, 0, 0)); } public int getWordChi(MuseWord word) { int index = 0; if ((index = this.dictChi.indexOf(word)) > -1) { return this.dictChi.indexGet(index); } else { int newWord = this.dictChi.size() + 1; this.dictChi.put(word, newWord); inverseDict.add(/*newWord,*/ word); return newWord; } } public int size() { return this.dictChi.size(); } public void filterChiSquared(final BagOfBigrams[] bagOfPatterns) { for (int j = 0; j < bagOfPatterns.length; j++) { ObjectIntHashMap<MuseWord> oldMap = bagOfPatterns[j].bob; bagOfPatterns[j].bob = new ObjectIntHashMap<>(); for (ObjectIntCursor<MuseWord> word : oldMap) { if (this.dictChi.containsKey(word.key) && word.value > 0) { bagOfPatterns[j].bob.put(word.key, word.value); } } } } } /** * Create a WEASEL+MUSE model. * * @param maxF Length of the SFA words * @param maxS alphabet size * @param histogramType histogram types (EQUI-Depth and/or EQUI-Frequency) to use * @param windowLengths the set of window lengths to use for extracting SFA words from * time series. * @param normMean set to true, if mean should be set to 0 for a window */ public MUSE( int maxF, int maxS, HistogramType histogramType, int[] windowLengths, boolean normMean) { this.maxF = maxF + maxF % 2; // even number this.alphabetSize = maxS; this.windowLengths = windowLengths; this.normMean = normMean; this.dict = new Dictionary(); this.signature = new SFA[windowLengths.length][]; this.histogramType = histogramType; } /** * Create SFA words and bigrams for all samples * * @param samples * @return */ protected int[][] createWords(final Instances samples, final int index) { // SFA quantization if (this.signature[index] == null) { this.signature[index] = new SFA[numDimensions(samples)]; for (int i = 0; i < this.signature[index].length; i++) { this.signature[index][i] = new SFA(this.histogramType); this.signature[index][i].fitWindowing(samples, this.windowLengths[index], this.maxF, this.alphabetSize, this.normMean, i); } } // create words Instances[] split = splitMultivariateInstances(samples); final int[][] words = new int[samples.numInstances() * split.length][]; int pos = 0; for (int i = 0; i < samples.numInstances(); i++) { for (int n = 0; n < split.length; n++) { if (channelLength(samples) >= this.windowLengths[index]) { words[pos] = this.signature[index][n].transformWindowingInt(split[n].get(i), this.maxF); } else { words[pos] = new int[]{}; } pos++; } } return words; } /** * Create SFA words and bigrams for a single sample * * @param sample * @return */ private int[][] createWords(final Instance sample, final int index) { // create words Instance[] split = splitMultivariateInstance(sample); final int[][] words = new int[split.length][]; for (int n = 0; n < split.length; n++) { if (channelLength(sample) >= this.windowLengths[index]) { words[n] = this.signature[index][n].transformWindowingInt(split[n], this.maxF); } else { words[n] = new int[]{}; } } return words; } /** * Implementation based on: * https://github.com/scikit-learn/scikit-learn/blob/c957249/sklearn/feature_selection/univariate_selection.py#L170 */ public void trainChiSquared(final BagOfBigrams[] bob, double chi_limit) { // Chi2 Test ObjectIntHashMap<MuseWord> featureCount = new ObjectIntHashMap<>(bob[0].bob.size()); LongDoubleHashMap classProb = new LongDoubleHashMap(10); LongObjectHashMap<ObjectIntHashMap<MuseWord>> observed = new LongObjectHashMap<>(bob[0].bob.size()); // count number of samples with this word for (BagOfBigrams bagOfPattern : bob) { long label = bagOfPattern.label.longValue(); if (!observed.containsKey(label)) { observed.put(label, new ObjectIntHashMap<>()); } for (ObjectIntCursor<MuseWord> word : bagOfPattern.bob) { if (word.value > 0) { featureCount.putOrAdd(word.key, 1, 1); observed.get(label).putOrAdd(word.key, 1, 1); } } } // samples per class for (BagOfBigrams bagOfPattern : bob) { long label = bagOfPattern.label.longValue(); classProb.putOrAdd(label, 1, 1); } // chi-squared: observed minus expected occurrence ObjectHashSet<MuseWord> chiSquare = new ObjectHashSet<>(featureCount.size()); for (LongDoubleCursor classLabel : classProb) { classLabel.value /= bob.length; if (observed.get(classLabel.key) != null) { ObjectIntHashMap<MuseWord> observe = observed.get(classLabel.key); for (ObjectIntCursor<MuseWord> feature : featureCount) { double expected = classLabel.value * feature.value; double chi = observe.get(feature.key) - expected; double newChi = chi * chi / expected; if (newChi >= chi_limit && !chiSquare.contains(feature.key)) { chiSquare.add(feature.key); } } } } // best elements above limit for (int j = 0; j < bob.length; j++) { for (ObjectIntCursor<MuseWord> cursor : bob[j].bob) { if (!chiSquare.contains(cursor.key)) { bob[j].bob.values[cursor.index] = 0; } } } } /** * Create words and bi-grams for all window lengths */ public BagOfBigrams createBagOfPatterns( final int[][] words, final Instance sample, final int w, // index of used windowSize final int dimensionality, final int wordLength) { final byte usedBits = (byte) binlog(this.alphabetSize); final int mask = (1 << (usedBits * wordLength)) - 1; BagOfBigrams bop = new BagOfBigrams(100, sample.classValue()); // create subsequences if (this.windowLengths[w] >= wordLength) { for (int dim = 0; dim < dimensionality; dim++) { for (int offset = 0; offset < words[dim].length; offset++) { MuseWord word = new MuseWord(w, dim, words[dim][offset] & mask, 0); //int dict = this.dict.getWord(word); bop.bob.putOrAdd(word, 1, 1); // add bigrams if (this.windowLengths[this.windowLengths.length-1] < 200 // avoid for too large datasets //&& useBigrams && (offset - this.windowLengths[w] >= 0)) { MuseWord bigram = new MuseWord(w, dim, (words[dim][offset - this.windowLengths[w]] & mask), words[dim][offset] & mask); //int newWord = this.dict.getWord(bigram); bop.bob.putOrAdd(bigram, 1, 1); } } } } return bop; } /** * Create words and bi-grams for all window lengths */ public BagOfBigrams[] createBagOfPatterns( final int[][] wordsForWindowLength, final Instances samples, final int w, // index of used windowSize final int dimensionality, final int wordLength) { List<BagOfBigrams> bagOfPatterns = new ArrayList<>( samples.numInstances() * dimensionality); final byte usedBits = (byte) binlog(this.alphabetSize); final int mask = (1 << (usedBits * wordLength)) - 1; // iterate all samples and create a muse model for each for (int i = 0, j = 0; i < samples.numInstances(); i++, j += dimensionality) { BagOfBigrams bop = new BagOfBigrams(100, samples.get(i).classValue()); // create subsequences if (this.windowLengths[w] >= wordLength) { for (int dim = 0; dim < dimensionality; dim++) { for (int offset = 0; offset < wordsForWindowLength[j + dim].length; offset++) { MuseWord word = new MuseWord(w, dim, wordsForWindowLength[j + dim][offset] & mask, 0); //int dict = this.dict.getWord(word); bop.bob.putOrAdd(word, 1, 1); // add bigrams if (this.windowLengths[this.windowLengths.length-1] < 200 // avoid for too large datasets //&& useBigrams && (offset - this.windowLengths[w] >= 0)) { MuseWord bigram = new MuseWord(w, dim, (wordsForWindowLength[j + dim][offset - this.windowLengths[w]] & mask), wordsForWindowLength[j + dim][offset] & mask); //int newWord = this.dict.getWord(bigram); bop.bob.putOrAdd(bigram, 1, 1); } } } } bagOfPatterns.add(bop); } return bagOfPatterns.toArray(new BagOfBigrams[]{}); } } /** * SFA using the ANOVA F-statistic to determine the best Fourier coefficients * (those that best separate between class labels) as opposed to using the first * ones. */ public static class SFA { // distribution of Fourier values public transient ArrayList<Double>[] orderLine; public HistogramType histogramType = HistogramType.EQUI_DEPTH; public int alphabetSize = 256; public byte neededBits = (byte) binlog(this.alphabetSize); public int wordLength = 0; public boolean initialized = false; public int maxWordLength; // The Momentary Fourier Transform public MFT transformation; // use binning / bucketing public double[][] bins; public SFA(HistogramType histogramType){ this.histogramType = histogramType; } @SuppressWarnings("unchecked") private void init(int l, int alphabetSize) { this.wordLength = l; this.maxWordLength = l; this.alphabetSize = alphabetSize; this.initialized = true; // l-dimensional bins this.alphabetSize = alphabetSize; this.neededBits = (byte) binlog(alphabetSize); this.bins = new double[l][alphabetSize - 1]; for (double[] row : this.bins) { Arrays.fill(row, Double.MAX_VALUE); } this.orderLine = new ArrayList[l]; for (int i = 0; i < this.orderLine.length; i++) { this.orderLine[i] = new ArrayList<>(); } } /** * Extracts sliding windows from the multivariate time series and * trains SFA based on the sliding windows. * At the end of this call, the quantization bins are set. * * @param timeSeries A set of multivariate sample time series * @param windowLength The queryLength of each sliding window * @param wordLength the SFA word-queryLength * @param symbols the SFA alphabet size * @param normMean if set, the mean is subtracted from each sliding window * @param dim the dimension of the multivariate time series to use */ public void fitWindowing(Instances timeSeries, int windowLength, int wordLength, int symbols, boolean normMean, int dim) { ArrayList<double[]> sa = new ArrayList<>(timeSeries.numInstances() * numDimensions(timeSeries) * channelLength(timeSeries) / windowLength); for (Instance t : timeSeries) { Collections.addAll(sa, getDisjointSequences(t.relationalValue(0).get(dim), windowLength, normMean)); } double[][] allSamples = new double[sa.size()][]; for (int i = 0; i < sa.size(); i++) { allSamples[i] = sa.get(i); } fitTransform(allSamples, wordLength, symbols, normMean); } /** * Extracts disjoint subsequences */ public double[][] getDisjointSequences(Instance t, int windowSize, boolean normMean) { // extract subsequences int amount = instanceLength(t) / windowSize; double[][] subsequences = new double[amount][windowSize]; double[] data = toArrayNoClass(t); for (int i = 0; i < amount; i++) { double[] subsequenceData = new double[windowSize]; System.arraycopy(data, i * windowSize, subsequenceData, 0, windowSize); //TODO weird norm bit from SFA code, calls normalisation function but doesnt actually normalise. subsequences[i] = subsequenceData; //z_norm(subsequenceData, normMean); } return subsequences; } public double[] z_norm(double[] data, boolean normMean) { double mean = 0.0; double stddev = 0; // get mean +stddev values double var = 0; for (double value : data) { mean += value; var += value * value; } mean /= data.length; double norm = 1.0 / ((double) data.length); double buf = norm * var - mean * mean; if (buf > 0) { stddev = Math.sqrt(buf); } double inverseStddev = (stddev != 0) ? 1.0 / stddev : 1.0; if (normMean) { for (int i = 0; i < data.length; i++) { data[i] = (data[i] - mean) * inverseStddev; } } else if (inverseStddev != 1.0) { for (int i = 0; i < data.length; i++) { data[i] *= inverseStddev; } } return data; } public void fitTransform(double[][] samples, int wordLength, int symbols, boolean normMean) { if (!this.initialized) { init(wordLength, symbols); if (this.transformation == null) { this.transformation = new MFT(samples[0].length, normMean); } } fillOrderline(samples, wordLength); if (this.histogramType == HistogramType.EQUI_DEPTH) { divideEquiDepthHistogram(); } else if (this.histogramType == HistogramType.EQUI_FREQUENCY) { divideEquiWidthHistogram(); } this.orderLine = null;; } /** * Use equi-width binning to divide the orderline */ protected void divideEquiWidthHistogram() { int i = 0; for (List<Double> elements : this.orderLine) { if (!elements.isEmpty()) { // apply the split double first = elements.get(0); double last = elements.get(elements.size() - 1); double intervalWidth = (last - first) / (this.alphabetSize); for (int c = 0; c < this.alphabetSize - 1; c++) { this.bins[i][c] = intervalWidth * (c + 1) + first; } } i++; } } /** * Use equi-depth binning to divide the orderline */ protected void divideEquiDepthHistogram() { // For each real and imaginary part for (int i = 0; i < this.bins.length; i++) { // Divide into equi-depth intervals double depth = this.orderLine[i].size() / (double) (this.alphabetSize); int pos = 0; long count = 0; for (Double value : this.orderLine[i]) { if (++count > Math.ceil(depth * (pos + 1)) && (pos == 0 || this.bins[i][pos - 1] != value)) { this.bins[i][pos++] = value; } } } } /** * Fills data in the orderline * * @param samples A set of samples */ protected void fillOrderline(double[][] samples, int l) { double[][] transformedSamples = new double[samples.length][]; for (int i = 0; i < samples.length; i++) { // approximation transformedSamples[i] = this.transformation.transform(samples[i], l); for (int j = 0; j < transformedSamples[i].length; j++) { // round to 2 decimal places to reduce noise double value = Math.round(transformedSamples[i][j] * 100.0) / 100.0; this.orderLine[j].add(value); } } // Sort ascending by value for (List<Double> element : this.orderLine) { Collections.sort(element); } } /** * Quantization of a DFT approximation to its SFA word * * @param approximation the DFT approximation of a time series * @return */ public short[] quantization(double[] approximation) { int i = 0; short[] word = new short[approximation.length]; for (double value : approximation) { // lookup character: short c = 0; for (; c < this.bins[i].length; c++) { if (value < this.bins[i][c]) { break; } } word[i++] = c; } return word; } /** * Transforms a single time series to its SFA word * * @param timeSeries a sample * @param approximation the DFT approximation, if available, else pass 'null' * @return */ public short[] transform(double[] timeSeries, double[] approximation) { if (!this.initialized) { throw new RuntimeException("Please call fitTransform() first."); } if (approximation == null) { // get approximation of the time series approximation = this.transformation.transform(timeSeries, this.maxWordLength); } // use lookup table (bins) to get the word from the approximation return quantization(approximation); } /** * Transforms a set of time series to SFA words. * * @param samples a set of samples * @param approximation the DFT approximations, if available, else pass 'null' * @return */ public short[][] transform(double[][] samples, double[][] approximation) { if (!this.initialized) { throw new RuntimeException("Please call fitTransform() first."); } short[][] transform = new short[samples.length][]; for (int i = 0; i < transform.length; i++) { transform[i] = transform(samples[i], approximation[i]); } return transform; } /** * Returns a long containing the values in bytes. */ protected static long fromByteArrayOne(short[] bytes, int to, byte usedBits) { int shortsPerLong = 60 / usedBits; to = Math.min(bytes.length, to); long bits = 0; int start = 0; long shiftOffset = 1; for (int i = start, end = Math.min(to, shortsPerLong + start); i < end; i++) { for (int j = 0, shift = 1; j < usedBits; j++, shift <<= 1) { if ((bytes[i] & shift) != 0) { bits |= shiftOffset; } shiftOffset <<= 1; } } return bits; } protected static long createWord(short[] words, int features, byte usedBits) { return fromByteArrayOne(words, features, usedBits); } public int[] transformWindowingInt(Instance ts, int wordLength) { short[][] words = transformWindowing(ts); int[] intWords = new int[words.length]; for (int i = 0; i < words.length; i++) { intWords[i] = (int) createWord(words[i], wordLength, this.neededBits); } return intWords; } /** * Extracts sliding windows from a time series and transforms it to its SFA * word. * <p> * Returns the SFA words as short[] (from Fourier transformed windows). Each * short corresponds to one character. * * @param timeSeries a sample * @return */ public short[][] transformWindowing(Instance timeSeries) { double[][] mft = this.transformation.transformWindowing(timeSeries, this.maxWordLength); short[][] words = new short[mft.length][]; for (int i = 0; i < mft.length; i++) { words[i] = quantization(mft[i]); } return words; } } /** * The Momentary Fourier Transform is alternative algorithm of * the Discrete Fourier Transform for overlapping windows. It has * a constant computational complexity for in the window queryLength n as * opposed to O(n log n) for the Fast Fourier Transform algorithm. * <p> * It was first published in: * Albrecht, S., Cumming, I., Dudas, J.: The momentary fourier transformation * derived from recursive matrix transformations. In: Digital Signal Processing * Proceedings, 1997., IEEE (1997) * */ public static class MFT implements Serializable { private static final long serialVersionUID = 8508604292241736378L; private int windowSize = 0; private int startOffset = 0; private transient DoubleFFT_1D fft = null; public MFT(int windowSize, boolean normMean) { this.windowSize = windowSize; this.fft = new DoubleFFT_1D(this.windowSize); // ignore DC value? this.startOffset = normMean ? 2 : 0; } public double[] transform(double[] series, int wordLength) { double[] data = new double[this.windowSize]; System.arraycopy(series, 0, data, 0, Math.min(this.windowSize, series.length)); this.fft.realForward(data); data[1] = 0; // DC-coefficient imaginary part // make it even length for uneven windowSize double[] copy = new double[wordLength]; int length = Math.min(this.windowSize - this.startOffset, wordLength); System.arraycopy(data, this.startOffset, copy, 0, length); // norming int sign = 1; for (int i = 0; i < copy.length; i++) { copy[i] *= sign; sign *= -1; } return copy; } /** * Transforms a time series, extracting windows and using *momentary* fourier * transform for each window. Results in one Fourier transform for each * window. Returns only the first l/2 Fourier coefficients for each window. * * @param timeSeries the time series to be transformed * @param l the number of Fourier values to use (equal to l/2 Fourier * coefficients). If l is uneven, l+1 Fourier values are returned. If * windowSize is smaller than l, only the first windowSize Fourier * values are set. * @return returns only the first l/2 Fourier coefficients for each window. */ public double[][] transformWindowing(Instance timeSeries, int l) { int wordLength = Math.min(windowSize, l + this.startOffset); wordLength += wordLength%2; // make it even double[] phis = new double[wordLength]; for (int u = 0; u < phis.length; u += 2) { double uHalve = -u / 2; phis[u] = realPartEPhi(uHalve, this.windowSize); phis[u + 1] = complexPartEPhi(uHalve, this.windowSize); } // means and stddev for each sliding window int end = Math.max(1, instanceLength(timeSeries) - this.windowSize + 1); double[] means = new double[end]; double[] stds = new double[end]; calcIncrementalMeanStddev(this.windowSize, toArrayNoClass(timeSeries), means, stds); double[][] transformed = new double[end][]; // holds the DFT of each sliding window double[] mftData = new double[wordLength]; double[] data = toArrayNoClass(timeSeries); for (int t = 0; t < end; t++) { // use the MFT if (t > 0) { for (int k = 0; k < wordLength; k += 2) { double real1 = (mftData[k] + data[t + this.windowSize - 1] - data[t - 1]); double imag1 = (mftData[k + 1]); double real = complexMultiplyRealPart(real1, imag1, phis[k], phis[k + 1]); double imag = complexMultiplyImagPart(real1, imag1, phis[k], phis[k + 1]); mftData[k] = real; mftData[k + 1] = imag; } } // use the DFT for the first offset else { double[] dft = new double[this.windowSize]; System.arraycopy(toArrayNoClass(timeSeries), 0, dft, 0, Math.min(this.windowSize, data.length)); this.fft.realForward(dft); dft[1] = 0; // DC-coefficient imag part // if windowSize > mftData.queryLength, the remaining data should be 0 now. System.arraycopy(dft, 0, mftData, 0, Math.min(mftData.length, dft.length)); } // normalization for lower bounding double[] copy = new double[l]; System.arraycopy(mftData, this.startOffset, copy, 0, Math.min(l, mftData.length-this.startOffset)); transformed[t] = normalizeFT(copy, stds[t]); } return transformed; } /** * Gets the means and stddevs for all sliding windows of a time series */ public void calcIncrementalMeanStddev( int windowLength, double[] tsData, double[] means, double[] stds) { double sum = 0; double squareSum = 0; // it is faster to multiply than to divide double rWindowLength = 1.0 / (double) windowLength; for (int ww = 0; ww < Math.min(tsData.length, windowLength); ww++) { sum += tsData[ww]; squareSum += tsData[ww] * tsData[ww]; } // first window means[0] = sum * rWindowLength; double buf = squareSum * rWindowLength - means[0] * means[0]; stds[0] = buf > 0 ? Math.sqrt(buf) : 0; // remaining windows for (int w = 1, end = tsData.length - windowLength + 1; w < end; w++) { sum += tsData[w + windowLength - 1] - tsData[w - 1]; means[w] = sum * rWindowLength; squareSum += tsData[w + windowLength - 1] * tsData[w + windowLength - 1] - tsData[w - 1] * tsData[w - 1]; buf = squareSum * rWindowLength - means[w] * means[w]; stds[w] = buf > 0 ? Math.sqrt(buf) : 0; } } /** * Calculate the real part of a multiplication of two complex numbers */ private double complexMultiplyRealPart(double r1, double im1, double r2, double im2) { return r1 * r2 - im1 * im2; } /** * Caluculate the imaginary part of a multiplication of two complex numbers */ private double complexMultiplyImagPart(double r1, double im1, double r2, double im2) { return r1 * im2 + r2 * im1; } /** * Real part of e^(2*pi*u/M) */ private double realPartEPhi(double u, double M) { return Math.cos(2 * Math.PI * u / M); } /** * Imaginary part of e^(2*pi*u/M) */ private double complexPartEPhi(double u, double M) { return -Math.sin(2 * Math.PI * u / M); } /** * Apply normalization to the Fourier coefficients to allow lower bounding in Euclidean space */ private double[] normalizeFT(double[] copy, double std) { double normalisingFactor = 1.0 ;//std > 0 ? 1.0 / std : 1.0; TODO another weird norm bit from SFA code int sign = 1; for (int i = 0; i < copy.length; i++) { copy[i] *= sign * normalisingFactor; sign *= -1; } return copy; } } public static void main(String[] args) throws Exception{ int fold =0; //Minimum working example String dataset = "RacketSports"; Instances train = DatasetLoading.loadDataNullable("E:\\Datasets\\Multivariate_arff\\"+dataset+"\\"+dataset+"_TRAIN.arff"); Instances test = DatasetLoading.loadDataNullable("E:\\Datasets\\Multivariate_arff\\"+dataset+"\\"+dataset+"_TEST.arff"); Instances[] data = resampleMultivariateTrainAndTestInstances(train, test, fold); train = data[0]; test = data[1]; WEASEL_MUSE c; double accuracy; c = new WEASEL_MUSE(); c.setSeed(fold); c.buildClassifier(train); accuracy = ClassifierTools.accuracy(test, c); System.out.println("WEASEL_MUSE accuracy on " + dataset + " fold " + fold + " = " + accuracy); } }
53,341
36.670904
130
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/shapelet_based/FastShapelets.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.shapelet_based; import experiments.data.DatasetLoading; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Random; import java.util.concurrent.TimeUnit; import tsml.classifiers.EnhancedAbstractClassifier; import static utilities.GenericTools.cloneArrayList; import utilities.InstanceTools; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; /** * @inproceedings{rakthanmanon13fastshapelets, author="T. Rakthanmanon and E. Keogh ", title="Fast-Shapelets: A Fast Algorithm for Discovering Robust Time Series Shapelets", booktitle ="Proc. 13th {SDM}", year="2013" } * * @author Aaron Bostrom */ public class FastShapelets extends EnhancedAbstractClassifier implements TechnicalInformationHandler { @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(TechnicalInformation.Type.CONFERENCE); result.setValue(TechnicalInformation.Field.AUTHOR, "T. Rakthanmanon and E. Keogh"); result.setValue(TechnicalInformation.Field.TITLE, "Fast-Shapelets: A Fast Algorithm for Discovering Robust Time Series Shapelets"); result.setValue(TechnicalInformation.Field.JOURNAL, "Proc. 13th SDM"); result.setValue(TechnicalInformation.Field.YEAR, "2013"); return result; } static final int EXTRA_TREE_DEPTH = 2; static final float MIN_PERCENT_OBJ_SPLIT = 0.1f; static final float MAX_PURITY_SPLIT = 0.90f; static final int SH_MIN_LEN = 5; int MIN_OBJ_SPLIT; int numClass, numObj, subseqLength; int[] classFreq, orgClassFreq; ArrayList<ArrayList<Double>> orgData, data; ArrayList<Integer> Org_Label, Label; ArrayList<Integer> classifyList; ArrayList<Shapelet> finalSh; ArrayList<Pair<Integer, Double>> scoreList; //USAX_Map_Type is typedef unordered_map<SAX_word_type, USAX_elm_type> USAX_Map_type; //where a SAX_word_type is just an int. HashMap<Integer, USAX_elm_type> uSAXMap; Random rand; //Obj_list_type is a vector of ints. IE an ArrayList. // Node_Obj_set_type == vector<Obj_list_type> and Obj_list_type == vectorc<int>.. vector<vector<int>> ArrayList<ArrayList<Integer>> nodeObjList; double classEntropy; NN_ED nn; public FastShapelets() { super(CANNOT_ESTIMATE_OWN_PERFORMANCE); nn = new NN_ED(); } @Override public void buildClassifier(Instances data) throws Exception { long start=System.nanoTime(); train(data, 10, 10); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); trainResults.setBuildTime(System.nanoTime()-start); } @Override public String getParameters() { StringBuilder sb = new StringBuilder(); sb.append(super.getParameters()); //Add other fast shapelet parameters, return sb.toString(); } public void train(Instances data, int R, int top_k) { int sax_max_len, sax_len, w; int max_len = data.numAttributes() - 1, min_len = 10, step = 1; //consider whole search space. double percent_mask; Shapelet sh; rand = new Random(seed); numClass = data.numClasses(); numObj = data.numInstances(); sax_max_len = 15; percent_mask = 0.25; //R = 10; //top_k = 10; readTrainData(data); //initialise our data structures. nodeObjList = new ArrayList<>(); finalSh = new ArrayList<>(); uSAXMap = new HashMap<>(); scoreList = new ArrayList<>(); classifyList = new ArrayList<>(); /// Find Shapelet for (int node_id = 1; (node_id == 1) || (node_id < nodeObjList.size()); node_id++) { Shapelet bsf_sh = new Shapelet(); if (node_id <= 1) { setCurData(node_id); } else if (classifyList.get(node_id) == -1) { /// non-leaf node (-1:body node, -2:unused node) setCurData(node_id); } else { continue; } //3 to series length. for (subseqLength = min_len; subseqLength <= max_len; subseqLength += step) { /// Shapelet cannot be too short, e.g. len=1. if (subseqLength < SH_MIN_LEN) { continue; } sax_len = sax_max_len; /// Make w and sax_len both integer w = (int) Math.ceil(1.0 * subseqLength / sax_len); sax_len = (int) Math.ceil(1.0 * subseqLength / w); createSAXList(subseqLength, sax_len, w); randomProjection(R, percent_mask, sax_len); scoreAllSAX(R); sh = findBestSAX(top_k); if (bsf_sh.lessThan(sh)) { bsf_sh = sh; } uSAXMap.clear(); scoreList.clear(); } if (bsf_sh.len > 0) { double[] query = new double[bsf_sh.len]; for (int i = 0; i < bsf_sh.len; i++) { query[i] = this.data.get(bsf_sh.obj).get(bsf_sh.pos + i); } bsf_sh.setTS(query); finalSh.add(bsf_sh); /// post-processing: create tree setNextNodeObj(node_id, bsf_sh); } } } /// From top-k-score SAX /// Calculate Real Infomation Gain // Shapelet findBestSAX(int top_k) { //init the ArrayList with nulls. ArrayList<Pair<Integer, Double>> Dist = new ArrayList<>(); for (int i = 0; i < numObj; i++) { Dist.add(null); } int word; double gain, dist_th, gap; int q_obj, q_pos; USAX_elm_type usax; int label, kk, total_c_in, num_diff; Shapelet sh = new Shapelet(), bsf_sh = new Shapelet(); if (top_k > 0) { Collections.sort(scoreList, new ScoreComparator()); } top_k = Math.abs(top_k); for (int k = 0; k < Math.min(top_k, scoreList.size()); k++) { word = scoreList.get(k).first; usax = uSAXMap.get(word); for (kk = 0; kk < Math.min(usax.sax_id.size(), 1); kk++) { int[] c_in = new int[numClass]; int[] c_out = new int[numClass]; //init the array list with 0s double[] query = new double[subseqLength]; q_obj = usax.sax_id.get(kk).first; q_pos = usax.sax_id.get(kk).second; for (int i = 0; i < numClass; i++) { c_in[i] = 0; c_out[i] = classFreq[i]; } for (int i = 0; i < subseqLength; i++) { query[i] = data.get(q_obj).get(q_pos + i); } double dist; int m = query.length; double[] Q = new double[m]; int[] order = new int[m]; for (int obj = 0; obj < numObj; obj++) { dist = nn.nearestNeighborSearch(query, data.get(obj), obj, Q, order); Dist.set(obj, new Pair<>(obj, dist)); } Collections.sort(Dist, new DistComparator()); total_c_in = 0; for (int i = 0; i < Dist.size() - 1; i++) { Pair<Integer, Double> pair_i = Dist.get(i); Pair<Integer, Double> pair_ii = Dist.get(i + 1); dist_th = (pair_i.second + pair_ii.second) / 2.0; //gap = Dist[i+1].second - dist_th; gap = ((double) (pair_ii.second - dist_th)) / Math.sqrt(subseqLength); label = Label.get(pair_i.first); c_in[label]++; c_out[label]--; total_c_in++; num_diff = Math.abs(numObj - 2 * total_c_in); //gain = CalInfoGain1(c_in, c_out); gain = calcInfoGain2(c_in, c_out, total_c_in, numObj - total_c_in); sh.setValueFew(gain, gap, dist_th); if (bsf_sh.lessThan(sh)) { bsf_sh.setValueAll(gain, gap, dist_th, q_obj, q_pos, subseqLength, num_diff, c_in, c_out); } } } } return bsf_sh; } double calcInfoGain2(int[] c_in, int[] c_out, int total_c_in, int total_c_out) { return classEntropy - ((double) (total_c_in) / numObj * entropyArray(c_in, total_c_in) + (double) (total_c_out) / numObj * entropyArray(c_out, total_c_out)); } /// Score each SAX void scoreAllSAX(int R) { int word; double score; USAX_elm_type usax; for (Map.Entry<Integer, USAX_elm_type> entry : uSAXMap.entrySet()) { word = entry.getKey(); usax = entry.getValue(); score = calcScore(usax, R); scoreList.add(new Pair<>(word, score)); } } /// ***Calc*** double calcScore(USAX_elm_type usax, int R) { double score = -1; int cid, count; double[] c_in = new double[numClass]; // Count object inside hash bucket double[] c_out = new double[numClass]; // Count object outside hash bucket /// Note that if no c_in, then no c_out of that object for (Map.Entry<Integer, Integer> entry : usax.obj_count.entrySet()) { cid = Label.get(entry.getKey()); count = entry.getValue(); c_in[cid] += (count); c_out[cid] += (R - count); } score = calcScoreFromObjCount(c_in, c_out); return score; } /// Score each sax in the matrix double calcScoreFromObjCount(double[] c_in, double[] c_out) { /// multi-class double diff, sum = 0, max_val = Double.NEGATIVE_INFINITY, min_val = Double.POSITIVE_INFINITY; for (int i = 0; i < numClass; i++) { diff = (c_in[i] - c_out[i]); if (diff > max_val) { max_val = diff; } if (diff < min_val) { min_val = diff; } sum += Math.abs(diff); } return (sum - Math.abs(max_val) - Math.abs(min_val)) + Math.abs(max_val - min_val); } /// Count the number of occurrences void randomProjection(int R, double percent_mask, int sax_len) { HashMap<Integer, HashSet<Integer>> Hash_Mark = new HashMap<>(); int word, mask_word, new_word; HashSet<Integer> obj_set, ptr; int num_mask = (int) Math.ceil(percent_mask * sax_len); for (int r = 0; r < R; r++) { mask_word = createMaskWord(num_mask, sax_len); /// random projection and mark non-duplicate object for (Map.Entry<Integer, USAX_elm_type> entry : uSAXMap.entrySet()) { word = entry.getKey(); obj_set = entry.getValue().obj_set; //put the new word and set combo in the hash_mark new_word = word | mask_word; ptr = Hash_Mark.get(new_word); if (ptr == null) { Hash_Mark.put(new_word, new HashSet<>(obj_set)); } else { //add onto our ptr, rather than overwrite. ptr.addAll(obj_set); } } /// hash again for keep the count for (Map.Entry<Integer, USAX_elm_type> entry : uSAXMap.entrySet()) { word = entry.getKey(); new_word = word | mask_word; obj_set = Hash_Mark.get(new_word); //increase the histogram for (Integer o_it : obj_set) { Integer count = entry.getValue().obj_count.get(o_it); count = count == null ? 1 : count + 1; entry.getValue().obj_count.put(o_it, count); } } Hash_Mark.clear(); } } /// create mask word (two random may give same position, we ignore it) int createMaskWord(int num_mask, int word_len) { int a, b; a = 0; for (int i = 0; i < num_mask; i++) { b = 1 << (rand.nextInt()%word_len); //generate a random number between 0 and the word_len a = a | b; } return a; } /// Set variables for next node. They are data, Label, classFreq, numObj void setCurData(int node_id) { if (node_id == 1) { //clone the arrayList data = new ArrayList<>(); for (ArrayList<Double> a : orgData) { data.add(cloneArrayList(a)); } Label = cloneArrayList(Org_Label); //clone the frequnecy array. classFreq = new int[orgClassFreq.length]; System.arraycopy(orgClassFreq, 0, classFreq, 0, orgClassFreq.length); } else { ArrayList<Integer> it = nodeObjList.get(node_id); numObj = it.size(); data.clear(); Label.clear(); for (int i = 0; i < numClass; i++) { classFreq[i] = 0; } int cur_class; //build our data structures based on the node and the labels and histogram. for (Integer in : it) { cur_class = Org_Label.get(in); data.add(orgData.get(in)); Label.add(cur_class); classFreq[cur_class]++; } } classEntropy = entropyArray(classFreq, numObj); } /// new function still in doubt (as in Mueen's paper) double entropyArray(int[] A, int total) { double en = 0; double a; for (int i = 0; i < numClass; i++) { a = (double) A[i] / (double) total; if (a > 0) { en -= a * Math.log(a); } } return en; } void readTrainData(Instances data) { orgData = InstanceTools.fromWekaInstancesList(data); orgClassFreq = new int[numClass]; Org_Label = new ArrayList<>(); for (Instance i : data) { Org_Label.add((int) i.classValue()); orgClassFreq[(int) i.classValue()]++; } } /// Fix card = 4 here !!! //create a sax word of size 4 here as an int. int createSAXWord(double[] sum_segment, int[] elm_segment, double mean, double std, int sax_len) { int word = 0, val = 0; double d = 0; for (int i = 0; i < sax_len; i++) { d = (sum_segment[i] / elm_segment[i] - mean) / std; if (d < 0) { if (d < -0.67) { val = 0; } else { val = 1; } } else if (d < 0.67) { val = 2; } else { val = 3; } word = (word << 2) | (val); } return word; } void createSAXList(int subseq_len, int sax_len, int w) { double ex, ex2, mean, std; double sum_segment[] = new double[sax_len]; int elm_segment[] = new int[sax_len]; int series, j, j_st, k, slot; double d; int word, prev_word; USAX_elm_type ptr; //init the element segments to the W value. for (k = 0; k < sax_len; k++) { elm_segment[k] = w; } elm_segment[sax_len - 1] = subseq_len - (sax_len - 1) * w; for (series = 0; series < data.size(); series++) { ex = ex2 = 0; prev_word = -1; for (k = 0; k < sax_len; k++) { sum_segment[k] = 0; } /// Case 1: Initial for (j = 0; (j < data.get(series).size()) && (j < subseq_len); j++) { d = data.get(series).get(j); ex += d; ex2 += d * d; slot = (int) Math.floor((j) / w); sum_segment[slot] += d; } /// Case 2: Slightly Update for (; (j <= (int) data.get(series).size()); j++) { j_st = j - subseq_len; mean = ex / subseq_len; std = Math.sqrt(ex2 / subseq_len - mean * mean); /// Create SAX from sum_segment word = createSAXWord(sum_segment, elm_segment, mean, std, sax_len); if (word != prev_word) { prev_word = word; //we're updating the reference so no need to re-add. ptr = uSAXMap.get(word); if (ptr == null) { ptr = new USAX_elm_type(); } ptr.obj_set.add(series); ptr.sax_id.add(new Pair<>(series, j_st)); uSAXMap.put(word, ptr); } /// For next update if (j < data.get(series).size()) { double temp = data.get(series).get(j_st); ex -= temp; ex2 -= temp * temp; for (k = 0; k < sax_len - 1; k++) { sum_segment[k] -= data.get(series).get(j_st + (k) * w); sum_segment[k] += data.get(series).get(j_st + (k + 1) * w); } sum_segment[k] -= data.get(series).get(j_st + (k) * w); sum_segment[k] += data.get(series).get(j_st + Math.min((k + 1) * w, subseq_len)); d = data.get(series).get(j); ex += d; ex2 += d * d; } } } } void setNextNodeObj(int node_id, Shapelet sh) { int q_obj = sh.obj; int q_pos = sh.pos; int q_len = sh.len; double dist_th = sh.dist_th; double[] query = new double[q_len]; int left_node_id = node_id * 2; int right_node_id = node_id * 2 + 1; int real_obj; /// Memory Allocation while (nodeObjList.size() <= right_node_id) { nodeObjList.add(new ArrayList<Integer>()); classifyList.add(-2); finalSh.add(new Shapelet()); if (nodeObjList.size() == 2) { /// Note that nodeObjList[0] is not used for (int i = 0; i < numObj; i++) { nodeObjList.get(1).add(i); } } } finalSh.set(node_id, sh); /// Use the shapelet on previous data for (int i = 0; i < q_len; i++) { query[i] = data.get(q_obj).get(q_pos + i); } double dist; int m = query.length; double[] Q = new double[m]; int[] order = new int[m]; for (int obj = 0; obj < numObj; obj++) { dist = nn.nearestNeighborSearch(query, data.get(obj), obj, Q, order); real_obj = nodeObjList.get(node_id).get(obj); int node = dist <= dist_th ? left_node_id : right_node_id; //left or right node? nodeObjList.get(node).add(real_obj); } /// If left/right is pure, or so small, stop spliting int max_c_in = -1, sum_c_in = 0; int max_c_out = -1, sum_c_out = 0; int max_ind_c_in = -1, max_ind_c_out = -1; for (int i = 0; i < sh.c_in.length; i++) { int c_in_i = sh.c_in[i]; int c_out_i = sh.c_out[i]; sum_c_in += c_in_i; if (max_c_in < c_in_i) { max_c_in = c_in_i; max_ind_c_in = i; } sum_c_out += c_out_i; if (max_c_out < c_out_i) { max_c_out = c_out_i; max_ind_c_out = i; } } boolean left_is_leaf = false; boolean right_is_leaf = false; MIN_OBJ_SPLIT = (int) Math.ceil((double) (MIN_PERCENT_OBJ_SPLIT * numObj) / (double) numClass); if ((sum_c_in <= MIN_OBJ_SPLIT) || ((double) max_c_in / (double) sum_c_in >= MAX_PURITY_SPLIT)) { left_is_leaf = true; } if ((sum_c_out <= MIN_OBJ_SPLIT) || ((double) max_c_out / (double) sum_c_out >= MAX_PURITY_SPLIT)) { right_is_leaf = true; } int max_tree_dept = (int) (EXTRA_TREE_DEPTH + Math.ceil(Math.log(numClass) / Math.log(2))); if (node_id >= Math.pow(2, max_tree_dept)) { left_is_leaf = true; right_is_leaf = true; } //set node. classifyList.set(node_id, -1); //set left child. int val = left_is_leaf ? max_ind_c_in : -1; classifyList.set(left_node_id, val); //set right child. val = right_is_leaf ? max_ind_c_out : -1; classifyList.set(right_node_id, val); } @Override public double classifyInstance(Instance instance) throws Exception { int node_id, m; double d, dist_th; double[] dArray = instance.toDoubleArray(); ArrayList<Double> data = new ArrayList<>(); //-1 off length so we don't add the classValue. for (int i = 0; i < dArray.length - 1; i++) { data.add(dArray[i]); } int tree_size = nodeObjList.size(); /// start at the top node node_id = 1; while ((classifyList.get(node_id) < 0) || (node_id > tree_size)) { Shapelet node = finalSh.get(node_id); m = node.len; double[] Q = new double[m]; int[] order = new int[m]; d = nn.nearestNeighborSearch(node.ts, data, 0, Q, order); dist_th = node.dist_th; if (d <= dist_th) { node_id = 2 * node_id; } else { node_id = 2 * node_id + 1; } } return (double) classifyList.get(node_id); } @Override public double[] distributionForInstance(Instance instance) throws Exception { double c=classifyInstance(instance); double[] r=new double[instance.numClasses()]; r[(int)c]=1; return r; } @Override public Capabilities getCapabilities() { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } public static void main(String[] args) throws Exception { final String dotdotSlash = ".." + File.separator; String datasetName = "ItalyPowerDemand"; String datasetLocation = dotdotSlash + dotdotSlash + "resampled data sets" + File.separator + datasetName + File.separator + datasetName; for (int i = 0; i < 100; i++) { Instances train = DatasetLoading.loadDataNullable(datasetLocation + i + "_TRAIN"); Instances test = DatasetLoading.loadDataNullable(datasetLocation + i + "_TEST"); FastShapelets fs = new FastShapelets(); try { fs.buildClassifier(train); double accuracy = utilities.ClassifierTools.accuracy(test, fs); System.out.println("fold " + i + " acc: " + accuracy); } catch (Exception ex) { System.out.println("Exception " + ex); } } } private class ScoreComparator implements Comparator<Pair<Integer, Double>> { @Override //if the left one is bigger put it closer to the top. public int compare(Pair<Integer, Double> t, Pair<Integer, Double> t1) { return Double.compare(t1.second, t.second); } } private class DistComparator implements Comparator<Pair<Integer, Double>> { @Override public int compare(Pair<Integer, Double> t, Pair<Integer, Double> t1) { return Double.compare(t.second, t1.second); } } private class Shapelet { public double gain; public double gap; public double dist_th; public int obj; public int pos; public int len; public int num_diff; int[] c_in; int[] c_out; double[] ts; public Shapelet() { gain = Double.NEGATIVE_INFINITY; gap = Double.NEGATIVE_INFINITY; dist_th = Double.POSITIVE_INFINITY; obj = -1; pos = -1; len = -1; num_diff = -1; } void setValueFew(double gain, double gap, double dist_th) { this.gain = gain; this.gap = gap; this.dist_th = dist_th; } void setValueAll(double gain, double gap, double dist_th, int obj, int pos, int len, int num_diff, int[] in, int[] out) { this.gain = gain; this.gap = gap; this.dist_th = dist_th; this.obj = obj; this.pos = pos; this.len = len; this.num_diff = num_diff; c_in = new int[in.length]; c_out = new int[out.length]; System.arraycopy(in, 0, c_in, 0, in.length); System.arraycopy(out, 0, c_out, 0, out.length); } void setTS(double[] ts) { this.ts = ts; } private boolean lessThan(Shapelet other) { if (gain > other.gain) { return false; } return ((gain < other.gain) || ((gain == other.gain) && (num_diff > other.num_diff)) || ((gain == other.gain) && (num_diff == other.num_diff) && (gap < other.gap))); } } private class USAX_elm_type { HashSet<Integer> obj_set; ArrayList<Pair<Integer, Integer>> sax_id; HashMap<Integer, Integer> obj_count; public USAX_elm_type() { obj_set = new HashSet<>(); sax_id = new ArrayList<>(); obj_count = new HashMap<>(); } } private class Pair<A, B> { public A first; public B second; Pair() { } Pair(A l, B r) { first = l; second = r; } } private class NN_ED { private class Index implements Comparable<Index> { double value; int index; public Index() { } @Override public int compareTo(Index t) { return Math.abs((int) this.value) - Math.abs((int) t.value); } } public NN_ED() { } double nearestNeighborSearch(double[] query, ArrayList<Double> data, int obj_id, double[] Q, int[] order) { double bsf; int m, M; double d; int i; int j; double ex, ex2, mean, std; int loc = 0; m = query.length; M = data.size(); bsf = Double.MAX_VALUE; i = 0; j = 0; ex = ex2 = 0; if (obj_id == 0) { for (i = 0; i < m; i++) { d = query[i]; ex += d; ex2 += d * d; Q[i] = d; } mean = ex / m; std = ex2 / m; std = Math.sqrt(std - mean * mean); for (i = 0; i < m; i++) { Q[i] = (Q[i] - mean) / std; } Index[] Q_tmp = new Index[m]; for (i = 0; i < m; i++) { Q_tmp[i] = new Index(); Q_tmp[i].value = Q[i]; Q_tmp[i].index = i; } Arrays.sort(Q_tmp); for (i = 0; i < m; i++) { Q[i] = Q_tmp[i].value; order[i] = Q_tmp[i].index; } } i = 0; j = 0; ex = ex2 = 0; double[] T = new double[2 * m]; double dist = 0; while (i < M) { d = data.get(i); ex += d; ex2 += d * d; T[i % m] = d; T[(i % m) + m] = d; if (i >= m - 1) { mean = ex / m; std = ex2 / m; std = Math.sqrt(std - mean * mean); j = (i + 1) % m; dist = distance(Q, order, T, j, m, mean, std, bsf); if (dist < bsf) { bsf = dist; loc = i - m + 1; } ex -= T[j]; ex2 -= T[j] * T[j]; } i++; } return bsf; } double distance(double[] Q, int[] order, double[] T, int j, int m, double mean, double std, double best_so_far) { int i; double sum = 0; double bsf2 = best_so_far * best_so_far; for (i = 0; i < m && sum < bsf2; i++) { double x = (T[(order[i] + j)] - mean) / std; sum += (x - Q[i]) * (x - Q[i]); } return Math.sqrt(sum); } double distance(double[] Q, int[] order, double[] T, int j, int m, double mean, double std) { return distance(Q, order, T, j, m, mean, std, Double.MAX_VALUE); } } }
30,424
31.298301
165
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/shapelet_based/LearnShapelets.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.shapelet_based; import experiments.data.DatasetLoading; import java.io.File; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.TimeUnit; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.classifiers.ParameterSplittable; import utilities.StatisticalUtilities; import utilities.InstanceTools; import static utilities.InstanceTools.fromWekaInstancesArray; import static utilities.StatisticalUtilities.calculateSigmoid; import weka.clusterers.SimpleKMeans; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; /** * * @author Original algorithm and adjustments Josef Grabocka, initial conversion * Aaron Bostrom * */ public class LearnShapelets extends EnhancedAbstractClassifier implements ParameterSplittable,TechnicalInformationHandler{ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE); result.setValue(TechnicalInformation.Field.AUTHOR, "J. Grabocka, N. Schilling, M. Wistuba and L. Schmidt-Thieme"); result.setValue(TechnicalInformation.Field.TITLE, "Learning Time-Series Shapelets"); result.setValue(TechnicalInformation.Field.JOURNAL, "Proc. 20th SIGKDD"); result.setValue(TechnicalInformation.Field.YEAR, "2014"); return result; } // length of a time-series public int seriesLength; // length of shapelet public int[] L; // number of latent patterns public int K; // number of classes public int C; // number of segments public int numberOfSegments[]; int L_min; // shapelets double[][][] shapelets; // classification weights double W[][][]; double biasW[]; // accumulate the gradients double[][][] gradHistShapelets; double[][][] gradHistW; double[] gradHistBiasW; // the regularization parameters public double lambdaW=0.01; // scales of the shapelet length public int R=3; public double percentageOfSeriesLength=0.2; // the learning rate public double eta=0.1; // the softmax parameter public double alpha=-30; // the number of iterations public int maxIter=300; public Instances trainSet; // public Instance testSet; // time series data and the label public double[][] train, classValues_train; // public double[] test; public List<Double> nominalLabels; // structures for storing the precomputed terms double D_train[][][][]; //mean square error for each shapelet compared with each shapelet centroid. Formula 20 double E_train[][][][]; // e^alpha*D_r,i,k,j part of Formula 23. double M_train[][][]; //Generalised Soft Minimum. Formula 19. double Psi_train[][][]; //Sum 1->j(e^alpha*D_r,i,k,j'). Denominator of Formula 23. double sigY_train[][]; double D_test[][][]; double E_test[][][]; double M_test[][]; double Psi_test[][]; double sigY_test[]; // temporary variables useful for the derivatives of the shapelets double [][] tmp2; double regWConst, tmp1, tmp3, dLdY, gradW_crk, gradS_rkl, gradBiasW_c, eps = 0.000000000000000000001; Random rand = new Random(); // store the indices of the positive and negative instances per each class List< List<Integer>> posIdxs; List< List<Integer>> negIdxs; List<Integer> instanceIdxs; public boolean enableParallel=true; //Parameter search settings boolean paraSearch=false; double[] lambdaWRange = {0.01, 0.1}; double[] percentageOfSeriesLengthRange = {0.15}; int[] shapeletLengthScaleRange = {2, 4}; public void setParamSearch(boolean b) { paraSearch=b; //default the values to something. //AARON has broken this // if(paraSearch) // fixParameters(); } //Set to defaults recommended by the author public void fixParameters(){ // the regularization parameters lambdaW=0.01; // scales of the shapelet length R=3; percentageOfSeriesLength=0.2; // the learning rate eta=0.1; // the softmax parameter alpha=-30; // the number of iterations maxIter=300; } /* The actual parameter values should be set internally. This integer is just a key to maintain different parameter sets */ public void setParametersFromIndex(int x){ //Map integer: filthy hack,could be done better. Range is 1-8 if(x<=4) lambdaW=lambdaWRange[0]; else lambdaW=lambdaWRange[1]; if(x==1 || x==2||x==5||x==6) percentageOfSeriesLength=percentageOfSeriesLengthRange[0]; else percentageOfSeriesLength=percentageOfSeriesLengthRange[1]; if(x%2==1) R=shapeletLengthScaleRange[0]; else R=shapeletLengthScaleRange[1]; } public String getParas(){ return lambdaW+","+percentageOfSeriesLength+","+R; } public double getAcc(){ return maxAcc; } double maxAcc; // constructor public LearnShapelets() { super(CANNOT_ESTIMATE_OWN_PERFORMANCE); } @Override public void setSeed(int seed) { super.setSeed(seed); rand = new Random(seed); } // initialize the data structures public void initialize() throws Exception { // avoid K=0 if (K == 0) { K = 1; } L_min = (int)(percentageOfSeriesLength * seriesLength); // set the labels to be binary 0 and 1, needed for the logistic loss createOneVsAllTargets(); // initialize the shapelets (complete initialization during the clustering) shapelets = new double[R][][]; // initialize the number of shapelets (by their starting point) and the length of the shapelets numberOfSegments = new int[R]; L = new int[R]; // set the lengths of shapelets and the number of segments // at each scale r int totalSegments = 0; //for each scale we create a number of segments and a shapelet length based on the scale value and our minimum shapelet length. for (int r = 0; r < R; r++) { L[r] = (r + 1) * L_min; numberOfSegments[r] = seriesLength - L[r]; totalSegments += train.length * numberOfSegments[r]; } // set the total number of shapelets per scale as a rule of thumb // to the logarithm of the total segments K = (int)(Math.log(totalSegments)*(C-1)); // initialize the terms for pre-computation D_train = new double[train.length][R][K][]; E_train = new double[train.length][R][K][]; for (int i = 0; i < train.length; i++) { for (int r = 0; r < R; r++) { for (int k = 0; k < K; k++) { D_train[i][r][k] = new double[numberOfSegments[r]]; E_train[i][r][k] = new double[numberOfSegments[r]]; } } } // initialize the placeholders for the precomputed values M_train = new double[train.length][R][K]; Psi_train = new double[train.length][R][K]; sigY_train = new double[train.length][C]; // initialize the weights W = new double[C][R][K]; biasW = new double[C]; for (int c = 0; c < C; c++) { for (int r = 0; r < R; r++) { for (int k = 0; k < K; k++) { W[c][r][k] = 2*eps*rand.nextDouble() - 1; } } biasW[c] = 2*eps*rand.nextDouble() - 1; } // initialize gradient accumulators gradHistW = new double[C][R][K]; gradHistBiasW = new double[C]; gradHistShapelets = new double[R][][]; for(int r=0; r<R; r++) gradHistShapelets[r] = new double[K][ L[r] ]; initializeShapeletsKMeans(); print("Initialization completed: L_min=" + L_min + ", K="+K +", R="+R + ", C="+C + ", lambdaW="+lambdaW); tmp2 = new double[R][]; for(int r=0; r<R; r++) tmp2[r] = new double[numberOfSegments[r]]; // initialize constant term for the regularization regWConst = ((double) 2.0 * lambdaW) / ((double) train.length); // initialize an array of the sizes instanceIdxs = new ArrayList<>(); for (int i = 0; i < train.length; i++) { instanceIdxs.add(i); } } // create one-cs-all targets public void createOneVsAllTargets() { C = nominalLabels.size(); classValues_train = new double[train.length][C]; // initialize the extended representation for (int i = 0; i < train.length; i++) { // firts set everything to zero for (int c = 0; c < C; c++) { classValues_train[i][c] = 0; } // then set the real label index to 1 int indexLabel = nominalLabels.indexOf(trainSet.get(i).classValue()); classValues_train[i][indexLabel] = 1.0; } // initialize the index lists posIdxs = new ArrayList<List<Integer>>(); negIdxs = new ArrayList<List<Integer>>(); // store the indices of the positive and negative instances per each class for (int c = 0; c < C; c++) { List<Integer> posIdx_c = new ArrayList<Integer>(); List<Integer> negIdx_c = new ArrayList<Integer>(); for (int i = 0; i < train.length; i++) if( classValues_train[i][c] == 1.0 ) posIdx_c.add(i); else negIdx_c.add(i); posIdxs.add(posIdx_c); negIdxs.add(negIdx_c); } } // initialize the shapelets from a file provided public void initializeShapeletsFromFile() throws Exception { //for each scale r, i.e. for each set of K shapelets at // length L_min*(r+1) for (int r=0; r<R; r++) { double[][] segments_r = new double[train.length * numberOfSegments[r]][L[r]]; //construct the segments from the train set. for (int i = 0; i < train.length; i++) for (int j = 0; j < numberOfSegments[r]; j++) for (int l = 0; l < L[r]; l++) segments_r[i * numberOfSegments[r] + j][l] = train[i][j + l]; // normalize segments for (int i = 0; i < train.length; i++) for (int j = 0; j < numberOfSegments[r]; j++) segments_r[i * numberOfSegments[r] + j] = StatisticalUtilities.normalize(segments_r[i * numberOfSegments[r] + j]); Instances ins = InstanceTools.toWekaInstances(segments_r); SimpleKMeans skm = new SimpleKMeans(); skm.setNumClusters(K); skm.setMaxIterations(100); //skm.setInitializeUsingKMeansPlusPlusMethod(true); skm.setSeed((int) (rand.nextDouble() * 1000) ); skm.buildClusterer( ins ); Instances centroidsWeka = skm.getClusterCentroids(); shapelets[r] = InstanceTools.fromWekaInstancesArray(centroidsWeka, false); // initialize the gradient history of shapelets if (shapelets[r] == null) print("P not set"); } } // initialize the shapelets from the centroids of the segments public void initializeShapeletsKMeans() throws Exception { //for each scale r, i.e. for each set of K shapelets at // length L_min*(r+1) for (int r=0; r<R; r++) { double[][] segments_r = new double[train.length * numberOfSegments[r]][L[r]]; //construct the segments from the train set. for (int i = 0; i < train.length; i++) for (int j = 0; j < numberOfSegments[r]; j++) for (int l = 0; l < L[r]; l++) segments_r[i * numberOfSegments[r] + j][l] = train[i][j + l]; // normalize segments for (int i = 0; i < train.length; i++) for (int j = 0; j < numberOfSegments[r]; j++) segments_r[i * numberOfSegments[r] + j] = StatisticalUtilities.normalize(segments_r[i * numberOfSegments[r] + j]); Instances ins = InstanceTools.toWekaInstances(segments_r); SimpleKMeans skm = new SimpleKMeans(); skm.setNumClusters(K); skm.setMaxIterations(100); //skm.setInitializeUsingKMeansPlusPlusMethod(true); skm.setSeed((int) (rand.nextDouble() * 1000) ); skm.buildClusterer( ins ); Instances centroidsWeka = skm.getClusterCentroids(); shapelets[r] = InstanceTools.fromWekaInstancesArray(centroidsWeka, false); // initialize the gradient history of shapelets if (shapelets[r] == null) print("P not set"); } } // predict the label value vartheta_i public double predict_i(double[][] M, int c) { double Y_hat_ic = biasW[c]; for (int r = 0; r < R; r++) { for (int k = 0; k < K; k++) { Y_hat_ic += M[r][k] * W[c][r][k]; } } return Y_hat_ic; } // precompute terms public void preCompute(double[][][] D, double[][][] E, double[][] Psi, double[][] M, double[] sigY, double[] series) { // precompute terms for (int r = 0; r < R; r++) { //in most cases Shapelets[r].length == numLatentPatterns, this is not always true. for (int k = 0; k < shapelets[r].length; k++) { for(int j = 0; j < numberOfSegments[r]; j++) { // precompute D D[r][k][j] = 0; double err = 0; for(int l = 0; l < L[r]; l++) { err = series[j + l] - shapelets[r][k][l]; D[r][k][j] += err*err; } D[r][k][j] /= (double)L[r]; // precompute E E[r][k][j] = Math.exp(alpha * D[r][k][j]); } // precompute Psi Psi[r][k] = 0; for(int j = 0; j < numberOfSegments[r]; j++) Psi[r][k] += Math.exp( alpha * D[r][k][j] ); // precompute M M[r][k] = 0; for(int j = 0; j < numberOfSegments[r]; j++) M[r][k] += D[r][k][j]* E[r][k][j]; M[r][k] /= Psi[r][k]; } } for (int c = 0; c < C; c++) { sigY[c] = calculateSigmoid(predict_i(M, c)); } } // compute the accuracy loss of instance i according to the // logistic loss public double accuracyLoss(double[][] M, double[] classValues, int c) { double Y_hat_ic = predict_i(M, c); double sig_y_ic = calculateSigmoid(Y_hat_ic); double returnVal = -classValues[c] * Math.log(sig_y_ic) - (1 - classValues[c]) * Math.log(1 - sig_y_ic); return returnVal; } // compute the accuracy loss of the train set public double accuracyLossTrainSet() { double accuracyLoss = 0; for (int i = 0; i < train.length; i++) { preCompute(D_train[i], E_train[i], Psi_train[i], M_train[i], sigY_train[i], train[i]); for (int c = 0; c < C; c++) { accuracyLoss += accuracyLoss(M_train[i], classValues_train[i], c); } } return accuracyLoss/train.length; } public void learnF(int c, int i) { preCompute(D_train[i], E_train[i], Psi_train[i], M_train[i], sigY_train[i], train[i]); dLdY = -(classValues_train[i][c] - sigY_train[i][c]); for (int r = 0; r < R; r++) { for (int k = 0; k < shapelets[r].length; k++) { // update the weights gradW_crk=dLdY*M_train[i][r][k] + regWConst*W[c][r][k]; gradHistW[c][r][k] += gradW_crk*gradW_crk; W[c][r][k] -= (eta / ( Math.sqrt(gradHistW[c][r][k]) + eps))*gradW_crk; // update the shapelets tmp1 = (2.0 / ((double) L[r] * Psi_train[i][r][k])); // precompute the term for speed up for (int j = 0; j < numberOfSegments[r]; j++) tmp2[r][j] = E_train[i][r][k][j] * (1 + alpha * (D_train[i][r][k][j] - M_train[i][r][k])); for (int l = 0; l < L[r]; l++) { tmp3 = 0; for (int j = 0; j < numberOfSegments[r]; j++) tmp3 += tmp2[r][j] * (shapelets[r][k][l] - train[i][j + l]); gradS_rkl = dLdY * W[c][r][k] * tmp1 * tmp3; gradHistShapelets[r][k][l] += gradS_rkl*gradS_rkl; shapelets[r][k][l] -= (eta / ( Math.sqrt(gradHistShapelets[r][k][l]) + eps))* gradS_rkl; } } } gradBiasW_c = dLdY; gradHistBiasW[c] += gradBiasW_c*gradBiasW_c; biasW[c] -= (eta / ( Math.sqrt(gradHistBiasW[c]) + eps))*gradBiasW_c; } public void learnF() { for (int c = 0; c < C; c++) for (int i=0; i<train.length; i++) { // get a random index from the positive instances of this class int posIdx = posIdxs.get(c).get( rand.nextInt(posIdxs.get(c).size()) ); // get a random index from the negative instances of this class int negIdx = negIdxs.get(c).get( rand.nextInt(negIdxs.get(c).size()) ); // learn the model parameters acording to the objective // of a random positive and negative class learnF(c, posIdx); learnF(c, negIdx); } } // build a classifier using cross-validation to tune hyper-parameters public void buildClassifier(Instances trainData) throws Exception { long startTime=System.nanoTime(); if(paraSearch){ double[] paramsLambdaW; double[] paramsPercentageOfSeriesLength; int[] paramsShapeletLengthScale; paramsLambdaW=lambdaWRange; paramsPercentageOfSeriesLength=percentageOfSeriesLengthRange; paramsShapeletLengthScale=shapeletLengthScaleRange; int noFolds = 2; double bsfAccuracy = 0; int[] params = {0,0,0}; double accuracy = 0; // randomize and stratify the data prior to cross validation trainData.randomize(rand); trainData.stratify(noFolds); int numHpsCombinations=1; for (int i = 0; i < paramsLambdaW.length; i++) { for (int j = 0; j < paramsPercentageOfSeriesLength.length; j++) { for (int k = 0; k < paramsShapeletLengthScale.length; k++) { percentageOfSeriesLength = paramsPercentageOfSeriesLength[j]; R = paramsShapeletLengthScale[k]; lambdaW = paramsLambdaW[i]; print("HPS Combination #"+numHpsCombinations+": {R="+R + ", L="+percentageOfSeriesLength + ", lambdaW="+lambdaW + "}" ); print("--------------------------------------"); double sumAccuracy = 0; //build our test and train sets. for cross-validation. for (int l = 0; l < noFolds; l++) { Instances trainCV = trainData.trainCV(noFolds, l); Instances testCV = trainData.testCV(noFolds, l); // fixed hyper-parameters eta = 0.1; alpha = -30; maxIter=300; print("Learn model for Fold-"+l + ":" ); train(trainCV); //test on the remaining fold. accuracy = utilities.ClassifierTools.accuracy(testCV, this); sumAccuracy += accuracy; print("Accuracy-Fold-"+l + " = " + accuracy ); trainCV=null; testCV=null; } sumAccuracy/=noFolds; print("Accuracy-CV = " + sumAccuracy ); print("--------------------------------------"); if(sumAccuracy > bsfAccuracy){ int[] p = {i,j,k}; params = p; bsfAccuracy = sumAccuracy; } numHpsCombinations++; } } } System.gc(); maxAcc=bsfAccuracy; lambdaW = paramsLambdaW[params[0]]; percentageOfSeriesLength = paramsPercentageOfSeriesLength[params[1]]; R = paramsShapeletLengthScale[params[2]]; eta = 0.1; alpha = -30; maxIter=600; print("Learn final model with best hyper-parameters: R="+R +", L="+percentageOfSeriesLength + ", lambdaW="+lambdaW); } else{ fixParameters(); print("Fixed parameters: R="+R +", L="+percentageOfSeriesLength + ", lambdaW="+lambdaW); } train(trainData); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); trainResults.setBuildTime(System.nanoTime()-startTime); } private void train(Instances data) throws Exception { trainSet = data; seriesLength = trainSet.numAttributes() - 1; //so we don't include the classLabel at the end. nominalLabels = readNominalTargets(trainSet); if(nominalLabels.size() < 2) { System.err.println("Fatal error: Number of classes is " + nominalLabels.size()); return; } //convert the training set into a 2D Matrix. train = fromWekaInstancesArray(trainSet, true); // Z-normalize the training time seriee // for(int i=0; i<train.length; i++) // train[i] = StatisticalUtilities.normalize(train[i]); // initialize the data structures initialize(); // apply the stochastic gradient descent in a series of iterations for (int iter = 0; iter <= maxIter; iter++) { // learn the latent matrices learnF(); // measure the loss if ((iter %(maxIter/3)) == 0 && iter>0) { double lossTrain = accuracyLossTrainSet(); print("Iter="+iter+", Loss="+lossTrain); // if divergence is detected break if ( Double.isNaN(lossTrain) ) break; } } } @Override public double classifyInstance(Instance instance) throws Exception { double[] temp = instance.toDoubleArray(); //remove the class value double[] test=new double[temp.length-1]; System.arraycopy(temp, 0, test, 0, temp.length-1); // z-normalize time series test = StatisticalUtilities.normalize(test); // initialize the terms for pre-computation D_test = new double[R][K][]; E_test = new double[R][K][]; for (int r = 0; r < R; r++) { for (int k = 0; k < K; k++) { D_test[r][k] = new double[numberOfSegments[r]]; E_test[r][k] = new double[numberOfSegments[r]]; } } // initialize the placeholders for the precomputed values M_test = new double[R][K]; Psi_test = new double[R][K]; sigY_test = new double[C]; preCompute(D_test, E_test, Psi_test, M_test, sigY_test, test); double max_Y_hat_ic = Double.MIN_VALUE; int label_i = 0; for (int c = 0; c < C; c++) { double Y_hat_ic = calculateSigmoid(predict_i(M_test, c)); if (Y_hat_ic > max_Y_hat_ic) { max_Y_hat_ic = Y_hat_ic; label_i = c; } } return nominalLabels.get(label_i); } void print(String s){ if(debug) System.out.println(s); } @Override public Capabilities getCapabilities() { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } public static ArrayList<Double> readNominalTargets(Instances instances) { if (instances.size() <= 0) return null; ArrayList<Double> nominalLabels = new ArrayList<>(); for (Instance ins : instances) { boolean alreadyAdded = false; for (Double nominalLabel : nominalLabels) { if (nominalLabel == ins.classValue()) { alreadyAdded = true; break; } } if (!alreadyAdded) { nominalLabels.add(ins.classValue()); } } Collections.sort(nominalLabels); return nominalLabels; } public static void main(String[] args) throws Exception{ if( args.length == 0 ) args = new String[]{"C:\\LocalData\\Dropbox\\TSC Problems", "OliveOil"}; //resample 1 of the italypowerdemand dataset String dataset = args[1]; String fileExtension = File.separator + dataset + File.separator + dataset; String samplePath = args[0] + fileExtension; //load the train and test. Instances testSet = DatasetLoading.loadDataNullable(samplePath + "_TEST"); Instances trainSet = DatasetLoading.loadDataNullable(samplePath + "_TRAIN"); LearnShapelets ls = new LearnShapelets(); ls.setSeed(0); ls.buildClassifier(trainSet); double accuracy = utilities.ClassifierTools.accuracy(testSet, ls); System.out.println(dataset+", LS= " + (1 - accuracy)); } }
28,076
33.662963
135
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/shapelet_based/ShapeletTransformClassifier.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.shapelet_based; import java.io.File; import java.math.BigDecimal; import java.math.BigInteger; import java.math.MathContext; import java.text.SimpleDateFormat; import java.util.*; import java.util.concurrent.TimeUnit; //import com.sun.scenario.effect.impl.sw.sse.SSEBlend_SRC_OUTPeer; import evaluation.evaluators.CrossValidationEvaluator; import evaluation.tuning.ParameterSpace; import experiments.data.DatasetLoading; import machine_learning.classifiers.ensembles.EnhancedRotationForest; import tsml.classifiers.Tuneable; import tsml.transformers.shapelet_tools.DefaultShapeletOptions; import tsml.transformers.shapelet_tools.ShapeletTransformFactoryOptions; import utilities.InstanceTools; import weka.core.*; import weka.classifiers.Classifier; import tsml.transformers.PCA; import tsml.transformers.ShapeletTransform; import tsml.transformers.shapelet_tools.ShapeletTransformFactory; import tsml.transformers.shapelet_tools.ShapeletTransformFactoryOptions.ShapeletTransformOptions; import tsml.transformers.shapelet_tools.ShapeletTransformTimingUtilities; import tsml.transformers.shapelet_tools.distance_functions.ShapeletDistance; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch.SearchType; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearchOptions; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.classifiers.TrainTimeContractable; import fileIO.FullAccessOutFile; import fileIO.OutFile; /** * ShapeletTransformClassifier (STC) * * Builds a time series classifier by * 1. searching for shapelets in the train data, keeping the best numShapeletsInTransform * 2. creating a new train data were each attribute is the distance (sDist) to the shapelet for that attribute * 3. building a classifier * *STC performs a shapelet transform by searching randomly for shapelets for contractHours (default 1 hour) * or through full enumeration if this is possible in the contractHours. The best numShapeletsInTransform (default 1000) * shapelets are kept. It then classifies with a rotation forest of 200 trees. * STC is Contractable and Tuneable, but not Checkpointable yet. * * The transform can be configured with a rannge of ShapeletTransformOptions and the search can be performed with a number * search types. Only FULL and RANDOM are currently supported, but ShapeletSearch.SearchType contains a range of alternatives * */ public class ShapeletTransformClassifier extends EnhancedAbstractClassifier implements TrainTimeContractable, Tuneable { private ShapeletTransform transform; //Configurable ST private Instances shapeletData; //Transformed shapelets header info stored here private Classifier classifier; //Final classifier built on transformed shapelet data // *********************** TRANSFORM STRUCTURE SETTINGS *************************************/ /** Shapelet transform parameters that can be configured through the STC, stored here **/ private transient ShapeletTransformOptions transformOptions; private int numShapeletsInTransform = ShapeletTransform.MAXTRANSFORMSIZE; private ShapeletSearch.SearchType searchType = ShapeletSearch.SearchType.RANDOM;//FULL == enumeration, RANDOM =random sampled to train time contract // Redundant features in the shapelet space are removed prior to building the classifier int[] redundantFeatures; // PCA Option: not currently implemented, as it has not been debugged private boolean performPCA=false; private PCA pca; private int numPCAFeatures=100; //************************** CONTRACTING *************************************/ /* There are two elements to contracting: the shapelet transform contract and the classifier contract. This complicated by the fact the contract time for ST is a parameter to control overfitting. The ST contracting is controlled by the number of shapelets to evaluate. This can either be explicitly set by the user through setNumberOfShapeletsToEvaluate, or, if a contract time is set, it is estimated from the contract. */ private boolean trainTimeContract =false; private int transformContractHours =1;// Hours contract for ST, defaults to 1 hour. //Time limit assigned to transform, this is considered a parameter, as too long results in over fitting //however, if the classifier is contracted, then it is set as 50% of the contract time. private long trainContractTimeNanos = 0; //Time limit for transform + classifier, fixed by user. If <=0, no contract //TO DO: REFACTOR TO SPLIT CONTRACT train=search+transform+classifier private long searchContractTime = TimeUnit.NANOSECONDS.convert(transformContractHours, TimeUnit.HOURS); private long transformContractTime = TimeUnit.NANOSECONDS.convert(transformContractHours, TimeUnit.HOURS); private long classifierContractTime = 0;//Time limit assigned to classifier, based on contractTime, but fixed in buildClassifier in an adhoc way /**** Shapelet Transform Information *************/ private long numShapeletsInProblem = 0; //Number of shapelets in problem if we do a full enumeration private double singleShapeletTime=0; //Estimate of the time to evaluate a single shapelet private double proportionToEvaluate=1;// Proportion of total num shapelets to evaluate based on time contract private long numShapeletsToEvaluate = 0; //Total num shapelets to evaluate over all cases (NOT per case) private long transformBuildTime=0; transient private long finalBuildtrainContractTimeNanos = 0; private boolean multivariate=false; //Quick hack to test if I can get it to work. public void setTransformTime(long t){ transformContractTime=t; } public void setTransformTimeHours(long t){ transformContractHours =(int)t; transformContractTime=TimeUnit.NANOSECONDS.convert(t, TimeUnit.HOURS); } /************* CHECKPOINTING and SAVING ************ Could all move to transformOptions */ //Check pointing is not fully debugged private String checkpointFullPath=""; //location to check point private boolean checkpoint=false; //If these are set, the shapelet meta information is saved to <path>/Workspace/ and the transforms saved to <path>/Transforms private String shapeletOutputPath; private boolean saveShapelets=false; private boolean pruneMatchingShapelets=false; /** * @param pruneMatchingShapelets the pruneMatchingShapelets to set */ public void setPruneMatchingShapelets(boolean pruneMatchingShapelets) { this.pruneMatchingShapelets = pruneMatchingShapelets; } public ShapeletTransformClassifier(){ super(CAN_ESTIMATE_OWN_PERFORMANCE); //Data independent config set here, so user can change them after construction transformOptions=new ShapeletTransformOptions(); configureDefaultShapeletTransform(); EnhancedRotationForest rotf=new EnhancedRotationForest(); rotf.setMaxNumTrees(200); trainEstimateMethod=TrainEstimateMethod.OOB; classifier=rotf; } // Not debugged, doesnt currently work public void usePCA(){ setPCA(true); } public void setPCA(boolean b) { setPCA(b,numPCAFeatures); } public void setPCA(boolean b, int numberEigenvectorsToRetain) { performPCA = b; numPCAFeatures=numberEigenvectorsToRetain; pca=new PCA(numPCAFeatures); } @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.enable(Capabilities.Capability.RELATIONAL_ATTRIBUTES); return result; } @Override public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); //Add the requirement to test if there are at least one of each class long startTime=System.nanoTime(); //for AbstractClassifier copy serialisation if (transformOptions == null){ transformOptions=new ShapeletTransformOptions(); configureDefaultShapeletTransform(); } //Temp to test multivariate if(data.attribute(0).isRelationValued()) multivariate=true; //Give 2/3 time for transform, 1/3 for classifier. Need to only do this if its set to have one. //All in nanos // printDebug("Are we contracting? "+trainTimeContract+" transform contract time ="+trainContractTimeNanos); if(trainTimeContract) {//Always switch to OOB if contracting trainEstimateMethod=TrainEstimateMethod.OOB; transformContractTime = trainContractTimeNanos/2; classifierContractTime = trainContractTimeNanos - transformContractTime; //HACK: Allow 1/3 for the final transform transformContractTime =2*transformContractTime/3; } else{ classifierContractTime=0; } //Data independent parameters are set in the constructor. These are parameters of the data configureDataDependentShapeletTransform(data); //Contracting with the shapelet transform is handled by setting the number of shapelets per series to evaluate. //This is done by estimating the time to evaluate a single shapelet then extrapolating (not in aarons way) //It would be better to just search until the time limit is up, but the whole search structure is series //by series, so this is hopefully just an interim measure if(transformContractTime >0) { // printLineDebug(" Contract time limit = "+ transformContractTime); configureTrainTimeContract(data, transformContractTime); } transform= constructShapeletTransform(data); transform.setSuppressOutput(true); if(transformContractTime >0) { printLineDebug(" Shapelet search contract = "+transformContractTime/1000000000.0); printLineDebug(" Classifier contract = "+classifierContractTime/1000000000.0); double timePerShapelet= transformContractTime /numShapeletsToEvaluate; transform.setContractTime(transformContractTime); transform.setAdaptiveTiming(true); transform.setTimePerShapelet(timePerShapelet); printLineDebug(" time per shapelet = contract = "+timePerShapelet); } //Put this in the options rather than here transform.setPruneMatchingShapelets(pruneMatchingShapelets); printLineDebug(" Begin Transform with "+transform.getClass().getSimpleName()+" Use balanced classes = "+transform.getUseBalancedClass()); shapeletData = transform.fitTransform(data); transformBuildTime=System.nanoTime()-startTime; //Need to store this printLineDebug(" Transform build time = "+transformBuildTime/1000000000.0); redundantFeatures=InstanceTools.removeRedundantTrainAttributes(shapeletData); if(saveShapelets) saveShapeletData(data); if(classifier instanceof EnhancedAbstractClassifier) ((EnhancedAbstractClassifier)classifier).setDebug(debug); if(getEstimateOwnPerformance()) { EnhancedAbstractClassifier eac=((EnhancedAbstractClassifier) classifier); eac.setTrainEstimateMethod(trainEstimateMethod); if(eac.ableToEstimateOwnPerformance()) eac.setEstimateOwnPerformance(true); } if(classifierContractTime>0 && classifier instanceof TrainTimeContractable){ //HERE CHANGE TO ACTUAL TIME LEFT ((TrainTimeContractable) classifier).setTrainTimeLimit(classifierContractTime); } classifier.buildClassifier(shapeletData); trainResults.setTimeUnit(TimeUnit.NANOSECONDS); long endTime=System.nanoTime(); if(getEstimateOwnPerformance()){ estimateOwnPerformance(data); } if(classifier instanceof EnhancedAbstractClassifier) { trainResults.setErrorEstimateTime(((EnhancedAbstractClassifier) classifier).getTrainResults().getErrorEstimateTime()); trainResults.setBuildTime(endTime - startTime-trainResults.getErrorEstimateTime()); } else trainResults.setBuildTime(endTime - startTime); trainResults.setParas(getParameters()); //To help garbage collection shapeletData=new Instances(data,0); trainResults.setBuildPlusEstimateTime(System.nanoTime()-startTime); printLineDebug("************** Finished STC Build with " + transform.getCount() + " shaplets searched, "+transform.getNumberOfShapelets()+" retained, in train time = "+ (trainResults.getBuildTime()/1000000000/60/60.0) + " hours, Train+Estimate time = "+(trainResults.getBuildPlusEstimateTime()/1000000000/60/60.0)+" hours ***************"); } @Override public double classifyInstance(Instance ins) throws Exception{ shapeletData.add(ins); Instances temp = transform.transform(shapeletData); //Delete redundant for(int del:redundantFeatures) temp.deleteAttributeAt(del); if(performPCA){ temp=pca.transform(temp); } Instance test = temp.get(0); shapeletData.remove(0); return classifier.classifyInstance(test); } @Override public double[] distributionForInstance(Instance ins) throws Exception{ shapeletData = new Instances(ins.dataset(),0); shapeletData.add(ins); Instances temp = transform.transform(shapeletData); //Delete redundant for(int del:redundantFeatures) temp.deleteAttributeAt(del); if(performPCA){ temp=pca.transform(temp); } Instance test = temp.get(0); shapeletData.remove(0); return classifier.distributionForInstance(test); } /** * estimating own performance for STC. If OOB is set to * * @param data * @throws Exception from distributionForInstance */ private void estimateOwnPerformance(Instances data) throws Exception { // if the classifier can estimate its own performance, do that. This is not yet in the time contract! boolean doExternalCV=false; doExternalCV=!((classifier instanceof EnhancedAbstractClassifier)&&((EnhancedAbstractClassifier)classifier).ableToEstimateOwnPerformance()); if(doExternalCV) { printLineDebug("Doing a CV with base to estimate accuracy"); int numFolds = setNumberOfFolds(data); CrossValidationEvaluator cv = new CrossValidationEvaluator(); cv.setSeed(seed * 12); cv.setNumFolds(numFolds); trainResults = cv.crossValidateWithStats(classifier, shapeletData); } else{ trainResults = ((EnhancedAbstractClassifier) classifier).getTrainResults(); } } private void copyParameters(ShapeletTransformClassifier other) { // this.numClassifiers = other.numClassifiers; // this.numIntervalsFinder = other.numIntervalsFinder; } public void setShapeletOutputFilePath(String path){ shapeletOutputPath = path; saveShapelets=true; } private void saveShapeletData(Instances data){ printDebug("Saving the transform as an arff file and the transform data in different files. The shapelets will also be saved by the transform in the same location."); //Write shapelet transform to arff file File f= new File(shapeletOutputPath+"ShapeletTransforms/"+data.relationName()); if(!f.exists()) f.mkdirs(); shapeletData.setRelationName(data.relationName()); DatasetLoading.saveDataset(shapeletData,shapeletOutputPath+"ShapeletTransforms/"+data.relationName()+"/"+data.relationName()+seed+"_TRAIN"); f= new File(shapeletOutputPath+"Workspace/"+data.relationName()); if(!f.exists()) f.mkdirs(); FullAccessOutFile of=new FullAccessOutFile(shapeletOutputPath+"Workspace/"+data.relationName()+"/shapleletInformation"+seed+".csv"); String str= getTransformParameters(); Date date = new Date(); SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); of.writeLine("Generated by ShapeletTransformClassifier.java on " + formatter.format(date)); of.writeLine(str); of.writeLine("NumShapelets,"+transform.getNumberOfShapelets()); of.writeLine("Operations count(not sure!),"+transform.getCount()); of.writeString("ShapeletLengths"); ArrayList<Integer> lengths=transform.getShapeletLengths(); for(Integer i:lengths) of.writeString(","+i); /* ArrayList<Shapelet> shapelets= transform.getShapelets(); of.writeLine("SHAPELETS:"); for(Shapelet s:shapelets){ double[] d=s.getUnivariateShapeletContent(); for(double x:d) of.writeString(x+","); of.writeString("\n"); */ of.closeFile(); } public ShapeletTransform constructShapeletTransform(Instances data){ //**** Builds the transform using transformOptions and a search builder ****/ // ShapeletSearchOptions.Builder searchBuilder; // searchBuilder= DefaultShapeletOptions.TIMED_FACTORY_OPTIONS.get("SHAPELET_D").apply(data, ShapeletTransformTimingUtilities.dayNano,(long)seed).getSearchOptions(); // else ShapeletSearchOptions.Builder searchBuilder = new ShapeletSearchOptions.Builder(); if(seedClassifier) searchBuilder.setSeed(2*seed); //For some reason stored twice in the transform options and the search builder. searchBuilder.setMin(transformOptions.getMinLength()); searchBuilder.setMax(transformOptions.getMaxLength()); searchBuilder.setSearchType(searchType); if(numShapeletsInProblem==0) numShapeletsInProblem=ShapeletTransformTimingUtilities.calculateNumberOfShapelets(data.numInstances(), data.numAttributes()-1, transformOptions.getMinLength(), transformOptions.getMaxLength()); transformOptions.setKShapelets(numShapeletsInTransform); searchBuilder.setNumShapeletsToEvaluate(numShapeletsToEvaluate/data.numInstances());//This is ignored if full search is performed transformOptions.setSearchOptions(searchBuilder.build()); //Finally, get the transform from a Factory with the options set by the builder if(multivariate){ System.out.println(" MUTIVARIATE TRUE"); transformOptions.setDistanceType(ShapeletDistance.DistanceType.DEPENDENT); } ShapeletTransformFactoryOptions options=transformOptions.build(); ShapeletTransform st = new ShapeletTransformFactory(options).getTransform(); if(saveShapelets && shapeletOutputPath != null) st.setLogOutputFile(shapeletOutputPath+"Workspace/"+data.relationName()+"/shapelets"+seed+".csv"); return st; } /*********** METHODS TO CONFIGURE TRANSFORM * Note there are two types of parameters: data independent and data dependent. They are now all set here, but * former are set in the constructor, the latter in buildClassifier. We could tidy this up with lambdas * Sets up this default parameters that are not data dependent. This is called in the constructor * and the user can reconfigure these prior to classifier build. These could also be tuned. */ public void configureDefaultShapeletTransform(){ searchType=ShapeletSearch.SearchType.FULL; transformOptions.setDistanceType(ShapeletDistance.DistanceType.IMPROVED_ONLINE); transformOptions.setQualityMeasure(ShapeletQuality.ShapeletQualityChoice.INFORMATION_GAIN); transformOptions.setRescalerType(ShapeletDistance.RescalerType.NORMALISATION); transformOptions.setRoundRobin(true); transformOptions.setCandidatePruning(true); } /** * Sets up the parameters that require the data characteristics (series length, number of classes and number of cases */ public void configureDataDependentShapeletTransform(Instances train){ int n = train.numInstances(); int m = train.numAttributes()-1; if(multivariate) m = utilities.multivariate_tools.MultivariateInstanceTools.channelLength(train); transformOptions.setMinLength(3); transformOptions.setMaxLength(m); //DEtermine balanced or not, if(train.numClasses() > 2) { transformOptions.setBinaryClassValue(true); transformOptions.setClassBalancing(true); }else{ transformOptions.setBinaryClassValue(false); transformOptions.setClassBalancing(false); } if(numShapeletsInTransform==ShapeletTransform.MAXTRANSFORMSIZE)//It has not then been set by the user numShapeletsInTransform= 10*train.numInstances() < ShapeletTransform.MAXTRANSFORMSIZE ? 10*train.numInstances(): ShapeletTransform.MAXTRANSFORMSIZE; //Got to cap this surely! transformOptions.setKShapelets(numShapeletsInTransform); } /** * Specific set up for the DAWAK version (rename) described in * @param train */ public void configureDawakShapeletTransform(Instances train) { configureDefaultShapeletTransform(); if(train.numClasses() > 2) { transformOptions.setBinaryClassValue(true); transformOptions.setClassBalancing(true); }else{ transformOptions.setBinaryClassValue(false); transformOptions.setClassBalancing(false); } if(numShapeletsInTransform==ShapeletTransform.MAXTRANSFORMSIZE)//It has not then been set by the user numShapeletsInTransform= 10*train.numInstances() < ShapeletTransform.MAXTRANSFORMSIZE ? 10*train.numInstances(): ShapeletTransform.MAXTRANSFORMSIZE; //Got to cap this surely! transformOptions.setKShapelets(numShapeletsInTransform); } /** * configuring a ShapeletTransform to the original ST format used in the bakeoff * * @param train data set Work in progress */ public void configureBakeoffShapeletTransform(Instances train){ transformOptions.setDistanceType(ShapeletDistance.DistanceType.NORMAL); if(train.numClasses() <10) transformOptions.setCandidatePruning(true); else transformOptions.setCandidatePruning(false); transformOptions.setBinaryClassValue(false); transformOptions.setClassBalancing(false); if(numShapeletsInTransform==ShapeletTransform.MAXTRANSFORMSIZE)//It has not then been set by the user numShapeletsInTransform= 10*train.numInstances() < ShapeletTransform.MAXTRANSFORMSIZE ? 10*train.numInstances(): ShapeletTransform.MAXTRANSFORMSIZE; //Got to cap this surely! transformOptions.setKShapelets(numShapeletsInTransform); } /** * This method estimates how many shapelets per series (numShapeletsToEvaluate) can be evaluated given a specific time contract. * It should just return this value * It also calculates numShapeletsInTransform and proportionToEvaluate, both stored by the classifier. It can set searchType to FULL, if the proportion * is estimated to be full. * Note the user can set numShapeletsToEvaluate explicitly. The user can also set the contract time explicitly, thus invoking * this method in buildClassifier. If both numShapeletsToEvaluate and time have been set, we have a contradiction from the user. * We assume time take precedence, and overwrite numShapeletsToEvaluate * *NEED TO RECONFIGURE FOR USER SET numShapeletToEvaluate * @param train train data * @param time contract time in nanoseconds */ public void configureTrainTimeContract(Instances train, long time){ //Configure the search options if a contract has been ser // else int n = train.numInstances(); int m; if(multivariate){ int length=train.instance(0).relationalValue(0).instance(0).numAttributes(); int dimensions=train.instance(0).relationalValue(0).numInstances(); m=length; }else m=train.numAttributes() - 1; if(time>0){ searchType = SearchType.RANDOM; numShapeletsInProblem = ShapeletTransformTimingUtilities.calculateNumberOfShapelets(n, m, 3, m); //This is aarons way of doing it based on hard coded estimate of the time for a single operation proportionToEvaluate= estimatePropOfFullSearchAaron(n,m,time); if(proportionToEvaluate==1.0) { searchType = SearchType.FULL; numShapeletsToEvaluate=numShapeletsInProblem; } else numShapeletsToEvaluate = (long)(numShapeletsInProblem*proportionToEvaluate); if(numShapeletsToEvaluate<n)//Got to do 1 per series. Really should reduce if we do this. numShapeletsToEvaluate=n; numShapeletsInTransform = numShapeletsToEvaluate > numShapeletsInTransform ? numShapeletsInTransform : (int) numShapeletsToEvaluate; } } // Tony's way of doing it based on a timing model for predicting for a single shapelet //Point estimate to set prop, could use a hard coded //This is a bit unintuitive, should move full towards a time per shapelet model private double estimatePropOfFullSearchTony(int n, int m, int totalNumShapelets, long time){ double nPower=1.2; double mPower=1.3; double scaleFactor=Math.pow(2,26); singleShapeletTime=Math.pow(n,nPower)*Math.pow(m,mPower)/scaleFactor; long timeRequired=(long)(singleShapeletTime*totalNumShapelets); double p=1; if(timeRequired>time) p=timeRequired/(double)time; return p; } // Aarons way of doing it based on time for a single operation private double estimatePropOfFullSearchAaron(int n, int m, long time){ //nanoToOp is currently a hard coded to 10 nanosecs in ShapeletTransformTimingUtilities. This is a bit crap //HERE we can estimate it for this run long nanoTimeForOp=ShapeletTransformTimingUtilities.nanoToOp; // Operations contract BigInteger allowedNumberOfOperations = new BigInteger(Long.toString(time / nanoTimeForOp)); // Operations required BigInteger requiredNumberOfOperations = ShapeletTransformTimingUtilities.calculateOps(n, m, 1, 1); //Need more operations than we are allowed double p=1; if (requiredNumberOfOperations.compareTo(allowedNumberOfOperations) > 0) { BigDecimal oct = new BigDecimal(allowedNumberOfOperations); BigDecimal oc = new BigDecimal(requiredNumberOfOperations); BigDecimal prop = oct.divide(oc, MathContext.DECIMAL64); p= prop.doubleValue(); } return p; } /** * @return String, comma separated relevant variables, used in Experiment.java to write line 2 of results */ @Override public String getParameters(){ String paras=transform.getShapeletCounts(); //Build time info String result=super.getParameters(); //Shapelet numbers and contract info result+=",numberOfShapeletsInProblem,"+numShapeletsInProblem+",proportionToEvaluate,"+proportionToEvaluate; //transform config result+=",SearchType,"+searchType; result+=","+transformOptions.toString(); result+=","+paras; result+=",Classifier,"+classifier.getClass().getSimpleName(); String classifierParas="No Classifier Para Info"; if(classifier instanceof EnhancedAbstractClassifier) classifierParas=((EnhancedAbstractClassifier)classifier).getParameters(); result+=","+classifierParas; if(trainTimeContract) result+= ",TimeContract(ns), " +trainContractTimeNanos; else result+=",NoContract"; result+= ",TransformActualBuildTime,"+transformBuildTime+",trainContractTimeNanos,"+ trainContractTimeNanos +",transformContractTime,"+ transformContractTime; result+=",EstimateOwnPerformance,"+getEstimateOwnPerformance(); if(getEstimateOwnPerformance()) { result += ",trainEstimateMethod," + trainEstimateMethod; } return result; } /** * * @return a string containing just the transform parameters */ public String getTransformParameters(){ String paras=transform.getShapeletCounts(); String str= "TransformActualBuildTime,"+transformBuildTime+",totalTimeContract,"+ trainContractTimeNanos +",transformTimeContract,"+ transformContractTime; //Shapelet numbers and contract info str+=",numberOfShapeletsInProblem,"+numShapeletsInProblem+",proportionToEvaluate,"+proportionToEvaluate; //transform config str+=",SearchType,"+searchType; str+=","+transformOptions.toString(); str+=","+paras; return str; } public long getTransformOpCount(){ return transform.getCount(); } public void setTrainTimeLimit(long amount) { trainTimeContract=true; trainContractTimeNanos = amount; } @Override public boolean withinTrainContract(long start) { return start<trainContractTimeNanos; } public void setNumberOfShapeletsToEvaluate(long numS){ numShapeletsToEvaluate = numS; } public void setNumberOfShapeletsInTransform(int numS){ numShapeletsInTransform = numS; } /** * Checkpoint methods */ public void setSavePath(String path){ checkpointFullPath=path; } public void copyFromSerObject(Object obj) throws Exception{ if(!(obj instanceof ShapeletTransformClassifier)) throw new Exception("Not a ShapeletTransformClassifier object"); //Copy meta data ShapeletTransformClassifier st=(ShapeletTransformClassifier)obj; //We assume the classifiers have not been built, so are basically copying over the set up classifier=st.classifier; shapeletOutputPath=st.shapeletOutputPath; transform=st.transform; shapeletData=st.shapeletData; int[] redundantFeatures=st.redundantFeatures; transformBuildTime=st.transformBuildTime; trainResults =st.trainResults; numShapeletsInTransform =st.numShapeletsInTransform; searchType =st.searchType; numShapeletsToEvaluate =st.numShapeletsToEvaluate; seed =st.seed; seedClassifier=st.seedClassifier; trainContractTimeNanos =st.trainContractTimeNanos; } /*********** SETTERS AND GETTERS : Methods for manual configuration **********/ /** * Set how shapelets are assessed * @param qual Quality measure type, options are INFORMATION_GAIN,F_STAT,KRUSKALL_WALLIS,MOODS_MEDIAN */ public void setQualityMeasure(ShapeletQuality.ShapeletQualityChoice qual){ transformOptions.setQualityMeasure(qual); } public void setRescalerType(ShapeletDistance.RescalerType r){ transformOptions.setRescalerType(r); } /** * Set how shapelets are searched for in a given series. * @param type: Search type with valid values * SearchType {FULL, FS, GENETIC, RANDOM, LOCAL, MAGNIFY, TIMED_RANDOM, SKIPPING, TABU, * REFINED_RANDOM, IMP_RANDOM, SUBSAMPLE_RANDOM, SKEWED, BO_SEARCH}; */ public void setSearchType(ShapeletSearch.SearchType type) { searchType = type; } public void setClassifier(Classifier c){ classifier=c; } public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE); result.setValue(TechnicalInformation.Field.AUTHOR, "authors"); result.setValue(TechnicalInformation.Field.YEAR, "A shapelet transform for time series classification"); result.setValue(TechnicalInformation.Field.TITLE, "stuff"); result.setValue(TechnicalInformation.Field.JOURNAL, "places"); result.setValue(TechnicalInformation.Field.VOLUME, "vol"); result.setValue(TechnicalInformation.Field.PAGES, "pages"); return result; } /** * From the interface Tuneable * @return the range of parameters to tune over */ @Override public ParameterSpace getDefaultParameterSearchSpace(){ ParameterSpace ps=new ParameterSpace(); String[] maxNumShapelets={"100","200","300","400","500","600","700","800","900","1000"}; ps.addParameter("T", maxNumShapelets); return ps; } /** * Parses a given list of options to set the parameters of the classifier. * We use this for the tuning mechanism, setting parameters through setOptions <!-- options-start --> * Valid options are: <p/> * <pre> -S * Number of shapelets kept in the transform.</pre> * More to follow <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception{ String numShapeletsString= Utils.getOption('S', options); if (numShapeletsString.length() != 0) numShapeletsInTransform = Integer.parseInt(numShapeletsString); else throw new Exception("in setOptions Unable to read number of intervals, -T flag is not set"); } public static void main(String[] args) throws Exception { // String dataLocation = "C:\\Temp\\TSC\\"; String dataLocation = "E:\\Data\\TSCProblems2018\\"; String saveLocation = "C:\\Temp\\TSC\\"; String datasetName = "FordA"; int fold = 0; Instances train= DatasetLoading.loadDataNullable(dataLocation+datasetName+File.separator+datasetName+"_TRAIN"); Instances test= DatasetLoading.loadDataNullable(dataLocation+datasetName+File.separator+datasetName+"_TEST"); String trainS= saveLocation+datasetName+File.separator+"TrainCV.csv"; String testS=saveLocation+datasetName+File.separator+"TestPreds.csv"; String preds=saveLocation+datasetName; System.out.println("Data Loaded"); ShapeletTransformClassifier st= new ShapeletTransformClassifier(); //st.saveResults(trainS, testS); st.setShapeletOutputFilePath(saveLocation+datasetName+"Shapelets.csv"); st.setMinuteLimit(2); System.out.println("Start transform"); long t1= System.currentTimeMillis(); st.configureDefaultShapeletTransform(); st.configureTrainTimeContract(train,st.trainContractTimeNanos); Instances stTrain=st.transform.fitTransform(train); long t2= System.currentTimeMillis(); System.out.println("BUILD TIME "+((t2-t1)/1000)+" Secs"); OutFile out=new OutFile(saveLocation+"ST_"+datasetName+".arff"); out.writeString(stTrain.toString()); } }
36,099
44.812183
205
java
tsml-java
tsml-java-master/src/main/java/tsml/classifiers/shapelet_based/ShapeletTree.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.classifiers.shapelet_based; import java.io.File; import java.io.FileReader; import java.util.*; import experiments.data.DatasetLoading; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.transformers.shapelet_tools.OrderLineObj; import tsml.transformers.shapelet_tools.class_value.NormalClassValue; import tsml.transformers.shapelet_tools.distance_functions.ShapeletDistance; import tsml.transformers.shapelet_tools.quality_measures.InformationGain; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality; import utilities.ClassifierTools; import utilities.class_counts.ClassCounts; import weka.core.*; import tsml.transformers.shapelet_tools.Shapelet; public class ShapeletTree extends EnhancedAbstractClassifier implements TechnicalInformationHandler{ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE); result.setValue(TechnicalInformation.Field.AUTHOR, "L Ye, E Keogh"); result.setValue(TechnicalInformation.Field.TITLE, "Time series shapelets: a new primitive for data mining"); result.setValue(TechnicalInformation.Field.JOURNAL, "Proc. 15th SIGKDD"); result.setValue(TechnicalInformation.Field.YEAR, "2009"); return result; } public ShapeletQuality getQuality() { return quality; } public void setQuality(ShapeletQuality.ShapeletQualityChoice qualityChoice) { this.quality = new ShapeletQuality(qualityChoice); } public ShapeletDistance getSubseqDistance() { return subseqDistance; } public void setSubseqDistance(ShapeletDistance subseqDistance) { this.subseqDistance = subseqDistance; } public void setCandidatePruning(boolean f) { this.useCandidatePruning = f; this.candidatePruningStartPercentage = f ? 10 : 100; } protected int candidatePruningStartPercentage; protected boolean useCandidatePruning; protected Comparator<Shapelet> shapeletComparator = new Shapelet.LongOrder(); //qualiyu/ distance and class value. protected ShapeletQuality quality; protected ShapeletDistance subseqDistance; protected NormalClassValue classValue; private ShapeletNode root; private String logFileName; private int minLength, maxLength; public ShapeletTree(){ super(CANNOT_ESTIMATE_OWN_PERFORMANCE); this.root = new ShapeletNode(); setQuality(ShapeletQuality.ShapeletQualityChoice.INFORMATION_GAIN); subseqDistance = new ShapeletDistance(); classValue = new NormalClassValue(); } public void setShapeletMinMaxLength(int minLength, int maxLength){ this.minLength = minLength; this.maxLength = maxLength; } @Override public void buildClassifier(Instances data) throws Exception{ if(minLength < 1 || maxLength < 1){ if(debug) System.out.println("Shapelet minimum or maximum length is incorrectly specified. Min = "+minLength+" max = "+maxLength+" setting to whole series"); minLength=3; maxLength=data.numAttributes()-1; } long t1=System.nanoTime(); root.initialiseNode(data, minLength, maxLength,0); trainResults.setBuildTime(System.nanoTime()-t1); trainResults.setParas(getParameters()); } @Override public double classifyInstance(Instance instance) { return root.classifyInstance(instance); } private Shapelet getRootShapelet(){ return this.root.shapelet; } /** * * @param classDist * @return */ protected void initQualityBound(ClassCounts classDist) { if (!useCandidatePruning) return; quality.initQualityBound(classDist, candidatePruningStartPercentage); } private class ShapeletNode { private ShapeletNode leftNode; private ShapeletNode rightNode; private double classDecision; private Shapelet shapelet; public ShapeletNode() { leftNode = null; rightNode = null; classDecision = -1; } public void initialiseNode(Instances data, int minShapeletLength, int maxShapeletLength, int level) throws Exception{ subseqDistance.init(data); classValue.init(data); if(debug) System.out.println(data.numInstances()); // 1. check whether this is a leaf node with only one class present double firstClassValue = classValue.getClassValue(data.instance(0)); boolean oneClass = true; for(int i = 1; i < data.numInstances(); i++){ if(classValue.getClassValue(data.instance(i)) != firstClassValue){ oneClass = false; break; } } if(oneClass){ this.classDecision = firstClassValue; // no need to find shapelet, base case } else { // recursively call method to create left and right children nodes try{ // 1. find the best shapelet to split the data this.shapelet = findBestShapelet(1,data,minShapeletLength, maxShapeletLength); // 2. split the data using the shapelet and create new data sets double dist; ArrayList<Instance> splitLeft = new ArrayList<Instance>(); ArrayList<Instance> splitRight = new ArrayList<Instance>(); subseqDistance.setShapelet(shapelet); //set the shapelet for the distance function. for(int i = 0; i < data.numInstances(); i++){ dist = subseqDistance.calculate(data.instance(i), i); if(debug) System.out.println(shapelet.splitThreshold + " " + dist); (dist < shapelet.splitThreshold ? splitLeft : splitRight).add(data.instance(i)); } // 5. initialise and recursively compute children nodes leftNode = new ShapeletNode(); rightNode = new ShapeletNode(); // System.out.println("SplitLeft:"); Instances leftInstances = new Instances(data, splitLeft.size()); leftInstances.addAll(splitLeft); Instances rightInstances = new Instances(data, splitRight.size()); rightInstances.addAll(splitRight); leftNode.initialiseNode(leftInstances, minShapeletLength, maxShapeletLength, (level+1)); // System.out.println("SplitRight:"); rightNode.initialiseNode(rightInstances, minShapeletLength, maxShapeletLength, (level+1)); }catch(Exception e){ System.out.println("Problem initialising tree node: "+e); e.printStackTrace(); } } } public double classifyInstance(Instance instance){ if (this.leftNode == null) { return this.classDecision; } else { double distance; subseqDistance.setShapelet(shapelet); distance = subseqDistance.calculate(instance, 0); if (distance < this.shapelet.splitThreshold) { return leftNode.classifyInstance(instance); } else { return rightNode.classifyInstance(instance); } } } } //# public double timingForSingleShapelet(Instances data, int minShapeletLength, int maxShapeletLength) { long startTime = System.nanoTime(); this.findBestShapelet(1, data, minShapeletLength, maxShapeletLength); long finishTime = System.nanoTime(); return (double)(finishTime - startTime) / 1000000000.0; } // edited from findBestKShapeletsCached private Shapelet findBestShapelet(int numShapelets, Instances data, int minShapeletLength, int maxShapeletLength){ ArrayList<Shapelet> kShapelets = new ArrayList<Shapelet>(); // store (upto) the best k shapelets overall ArrayList<Shapelet> seriesShapelets = new ArrayList<Shapelet>(); // temp store of all shapelets for each time series Shapelet bestShapelet = null; for(int i = 0; i < data.numInstances(); i++){ subseqDistance.setSeries(i); double[] wholeCandidate = data.instance(i).toDoubleArray(); for(int length = minShapeletLength; length <= maxShapeletLength; length++){ for(int start = 0; start <= wholeCandidate.length - length-1; start++){ //-1 = avoid classVal - handle later for series with no class val Shapelet candidateShapelet = checkCandidate(data, data.instance(i), i, start, length); if(bestShapelet == null){ bestShapelet = candidateShapelet; } if(shapeletComparator.compare(bestShapelet, candidateShapelet) > 0){ bestShapelet = candidateShapelet; } } } } if(debug) System.out.println("final.quality = " + bestShapelet.getQualityValue()); return bestShapelet; } /** * * @param shapelets the input Shapelets to remove self similar Shapelet objects from * @return a copy of the input ArrayList with self-similar shapelets removed */ private static ArrayList<Shapelet> removeSelfSimilar(ArrayList<Shapelet> shapelets){ // return a new pruned array list - more efficient than removing // self-similar entries on the fly and constantly reindexing ArrayList<Shapelet> outputShapelets = new ArrayList<Shapelet>(); boolean[] selfSimilar = new boolean[shapelets.size()]; // to keep tract of self similarity - assume nothing is similar to begin with for(int i = 0; i < shapelets.size(); i++){ selfSimilar[i] = false; } for(int i = 0; i < shapelets.size();i++){ if(!selfSimilar[i]){ outputShapelets.add(shapelets.get(i)); for(int j = i+1; j < shapelets.size(); j++){ if(!selfSimilar[i] && selfSimilarity(shapelets.get(i),shapelets.get(j))){ // no point recalc'ing if already self similar to something selfSimilar[j] = true; } } } } return outputShapelets; } /** * * @param k the maximum number of shapelets to be returned after combining the two lists * @param kBestSoFar the (up to) k best shapelets that have been observed so far, passed in to combine with shapelets from a new series * @param timeSeriesShapelets the shapelets taken from a new series that are to be merged in descending order of fitness with the kBestSoFar * @return an ordered ArrayList of the best k (or less) Shapelet objects from the union of the input ArrayLists */ private ArrayList<Shapelet> combine(int k, ArrayList<Shapelet> kBestSoFar, ArrayList<Shapelet> timeSeriesShapelets){ ArrayList<Shapelet> newBestSoFar = new ArrayList<Shapelet>(); kBestSoFar.addAll(timeSeriesShapelets); Collections.sort(kBestSoFar); if(kBestSoFar.size()<k) return kBestSoFar; // no need to return up to k, as there are not k shapelets yet for(int i = 0; i < k; i++){ newBestSoFar.add(kBestSoFar.get(i)); } return newBestSoFar; } protected Shapelet checkCandidate(Instances inputData, Instance series,int series_id, int start, int length) { //init qualityBound. initQualityBound(classValue.getClassDistributions()); //set the candidate. This is the instance, start and length. subseqDistance.setCandidate(series, start, length, 0); // create orderline by looping through data set and calculating the subsequence // distance from candidate to all data, inserting in order. ArrayList<OrderLineObj> orderline = new ArrayList<>(); int dataSize = inputData.numInstances(); for (int i = 0; i < dataSize; i++) { //Check if it is possible to prune the candidate if (quality.pruneCandidate()) { return null; } double distance = subseqDistance.calculate(inputData.instance(i), i); //this could be binarised or normal. double classVal = classValue.getClassValue(inputData.instance(i)); // without early abandon, it is faster to just add and sort at the end orderline.add(new OrderLineObj(distance, classVal)); //Update qualityBound - presumably each bounding method for different quality measures will have a different update procedure. quality.updateOrderLine(orderline.get(orderline.size() - 1)); } Shapelet shapelet = new Shapelet(subseqDistance.getCandidate(), series_id, start, quality.getQualityMeasure()); //this class distribution could be binarised or normal. shapelet.calculateQuality(orderline, classValue.getClassDistributions()); shapelet.classValue = classValue.getShapeletValue(); //set classValue of shapelet. (interesing to know). //as per the way. We select our Shapelet and assess it's quality through the various methods. But we then calculate a splitting threshold with IG. shapelet.splitThreshold = InformationGain.calculateSplitThreshold(orderline, classValue.getClassDistributions()); return shapelet; } private static double entropy(TreeMap<Double, Integer> classDistributions){ if(classDistributions.size() == 1){ return 0; } double thisPart; double toAdd; int total = 0; for(Double d : classDistributions.keySet()){ total += classDistributions.get(d); } // to avoid NaN calculations, the individual parts of the entropy are calculated and summed. // i.e. if there is 0 of a class, then that part would calculate as NaN, but this can be caught and // set to 0. ArrayList<Double> entropyParts = new ArrayList<Double>(); for(Double d : classDistributions.keySet()){ thisPart =(double) classDistributions.get(d) / total; toAdd = -thisPart * Math.log10(thisPart) / Math.log10(2); if(Double.isNaN(toAdd)) toAdd=0; entropyParts.add(toAdd); } double entropy = 0; for(int i = 0; i < entropyParts.size(); i++){ entropy += entropyParts.get(i); } return entropy; } /** * * @param fileName * @return */ public static Instances loadData(String fileName){ Instances data = null; try{ FileReader r; r = new FileReader(fileName); data = new Instances(r); data.setClassIndex(data.numAttributes() - 1); } catch(Exception e){ System.out.println(" Error =" + e + " in method loadData"); } return data; } private static boolean selfSimilarity(int seriesId, int startPos, int length, Shapelet[] selectedShapelets){ boolean selfSimilarity = false; for(Shapelet shapelet : selectedShapelets){ if(shapelet != null){ if(seriesId == shapelet.seriesId){ if(startPos >= shapelet.startPos && startPos <= shapelet.startPos + shapelet.getLength()) //candidate starts within exisiting shapelet { selfSimilarity = true; } if(shapelet.startPos >= startPos && shapelet.startPos <= startPos + length){ selfSimilarity = true; } } } } return selfSimilarity; } private static boolean selfSimilarity(Shapelet candidate, TreeSet<Shapelet> setOfShapelets){ boolean selfSimilarity = false; for(Shapelet shapelet : setOfShapelets){ if(shapelet != null){ if(candidate.seriesId == shapelet.seriesId){ if(candidate.startPos >= shapelet.startPos && candidate.startPos <= shapelet.startPos + shapelet.getLength()) //candidate starts within exisiting shapelet { selfSimilarity = true; } if(shapelet.startPos >= candidate.startPos && shapelet.startPos <= candidate.startPos + candidate.getLength()){ selfSimilarity = true; } } } } return selfSimilarity; } private static boolean selfSimilarity(Shapelet shapelet, Shapelet candidate){ if(candidate.seriesId == shapelet.seriesId){ if(candidate.startPos >= shapelet.startPos && candidate.startPos < shapelet.startPos + shapelet.getLength()){ //candidate starts within exisiting shapelet return true; } if(shapelet.startPos >= candidate.startPos && shapelet.startPos < candidate.startPos + candidate.getLength()){ return true; } } return false; } public static String getTime(){ Calendar calendar = new GregorianCalendar(); return calendar.get(Calendar.DAY_OF_MONTH)+"/"+calendar.get(Calendar.MONTH)+"/"+calendar.get(Calendar.YEAR)+" - "+calendar.get(Calendar.HOUR_OF_DAY)+":"+calendar.get(Calendar.MINUTE)+":"+calendar.get(Calendar.SECOND)+" AM"; } public static void main(String[] args) throws Exception { final String resampleLocation = "D:\\Research TSC\\Data\\TSCProblems2018"; final String dataset = "ItalyPowerDemand"; final String filePath = resampleLocation + File.separator + dataset + File.separator + dataset; System.out.println(filePath); Instances test, train; test = DatasetLoading.loadDataNullable(filePath + "_TEST"); train = DatasetLoading.loadDataNullable(filePath + "_TRAIN"); ShapeletTree stc = new ShapeletTree(); stc.setShapeletMinMaxLength(3, train.numAttributes()-1); stc.buildClassifier(train); double accuracy = ClassifierTools.accuracy(test, stc); System.out.println("ShapeletTree accuracy uis: " + accuracy); } }
19,525
38.606491
231
java
tsml-java
tsml-java-master/src/main/java/tsml/clusterers/EnhancedAbstractClusterer.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.clusterers; import weka.clusterers.AbstractClusterer; import weka.core.Instances; import weka.core.Randomizable; import java.util.ArrayList; import static utilities.InstanceTools.deleteClassAttribute; /** * Enhanced abstract clusterer class for time series and vector clusterers. Extends the Weka AbstractClusterer class. * * @author Matthew Middlehurst */ public abstract class EnhancedAbstractClusterer extends AbstractClusterer implements Randomizable { protected int seed = 0; protected boolean seedClusterer = false; protected boolean copyInstances = true; protected double[] assignments; protected ArrayList<Integer>[] clusters; protected Instances train; @Override public void buildClusterer(Instances data) throws Exception { train = copyInstances ? new Instances(data) : data; deleteClassAttribute(train); } public double[] getAssignments() { return assignments; } public ArrayList<Integer>[] getClusters() { return clusters; } @Override public int getSeed(){ return seed; } @Override public void setSeed(int seed) { this.seed = seed; seedClusterer = true; } public void setCopyInstances(boolean b) { copyInstances = b; } }
2,069
27.356164
117
java
tsml-java
tsml-java-master/src/main/java/tsml/clusterers/KShape.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.clusterers; import experiments.data.DatasetLoading; import org.apache.commons.math3.complex.Complex; import org.apache.commons.math3.transform.DftNormalization; import org.apache.commons.math3.transform.FastFourierTransformer; import org.apache.commons.math3.transform.TransformType; import weka.clusterers.NumberOfClustersRequestable; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.matrix.EigenvalueDecomposition; import weka.core.matrix.Matrix; import java.util.ArrayList; import java.util.Arrays; import java.util.Random; import static utilities.ClusteringUtilities.randIndex; import static utilities.ClusteringUtilities.zNormalise; import static utilities.GenericTools.indexOfMax; import static utilities.InstanceTools.deleteClassAttribute; /** * Class for the KShape clustering algorithm. * * @author Matthew Middlehurst */ public class KShape extends EnhancedAbstractClusterer implements NumberOfClustersRequestable { //Paparrizos, John, and Luis Gravano. //"k-shape: Efficient and accurate clustering of time series." //Proceedings of the 2015 ACM SIGMOD International Conference on Management of Data. ACM, 2015. private int k = 2; private int maxIterations = 100; private Instances centroids; public KShape() { } @Override public int numberOfClusters() { return k; } @Override public void setNumClusters(int numClusters) throws Exception { k = numClusters; } public void setMaxIterations(int i) { maxIterations = i; } @Override public void buildClusterer(Instances data) throws Exception { super.buildClusterer(data); zNormalise(train); ArrayList<Attribute> atts = new ArrayList(train.numAttributes()); for (int i = 0; i < train.numAttributes(); i++) { atts.add(new Attribute("att" + i)); } centroids = new Instances("centroids", atts, k); for (int i = 0; i < k; i++) { centroids.add(new DenseInstance(1, new double[train.numAttributes()])); } Random rand; if (!seedClusterer) { rand = new Random(); } else { rand = new Random(seed); } assignments = new double[train.numInstances()]; //Randomly assign clusters for (int i = 0; i < assignments.length; i++) { assignments[i] = (int) Math.ceil(rand.nextDouble() * k) - 1; } SBD sbd = new SBD(); int iterations = 0; double[] prevCluster = new double[train.numInstances()]; prevCluster[0] = -1; //While clusters change and less than max iterations while (!Arrays.equals(assignments, prevCluster) && iterations < maxIterations) { prevCluster = Arrays.copyOf(assignments, assignments.length); //Select centroids for (int i = 0; i < k; i++) { centroids.set(i, shapeExtraction(train, centroids.get(i), i)); } //Set each instance to the cluster of its closest centroid using shape based distance for (int i = 0; i < train.numInstances(); i++) { double minDist = Double.MAX_VALUE; for (int n = 0; n < k; n++) { double dist = sbd.calculateDistance(centroids.get(n), train.get(i)); if (dist < minDist) { minDist = dist; assignments[i] = n; } } } iterations++; } //Create and store an ArrayList for each cluster containing indexes of //points inside the cluster. clusters = new ArrayList[k]; for (int i = 0; i < k; i++) { clusters[i] = new ArrayList(); } for (int i = 0; i < train.numInstances(); i++) { clusters[(int) assignments[i]].add(i); } } @Override public int clusterInstance(Instance inst) throws Exception { Instance newInst = copyInstances ? new DenseInstance(inst) : inst; int clsIdx = inst.classIndex(); if (clsIdx >= 0){ newInst.setDataset(null); newInst.deleteAttributeAt(clsIdx); } zNormalise(newInst); double minDist = Double.MAX_VALUE; int closestCluster = 0; for (int i = 0; i < centroids.size(); ++i) { SBD sbd = new SBD(newInst, centroids.get(i), false); if (sbd.dist < minDist) { minDist = sbd.dist; closestCluster = i; } } return closestCluster; } private Instance shapeExtraction(Instances data, Instance centroid, int centroidNum) { Instances subsample = new Instances(data, 0); int seriesSize = centroid.numAttributes(); double sum = 0; for (int i = 0; i < seriesSize; i++) { sum += centroid.value(i); } boolean sumZero = sum == 0; //Take subsample of instances in centroids cluster for (int i = 0; i < data.numInstances(); i++) { if (assignments[i] == centroidNum) { //If the centroid sums to 0 add full instance to the subsample if (sumZero) { subsample.add(data.get(i)); } else { SBD sbd = new SBD(centroid, data.get(i), true); subsample.add(sbd.yShift); } } } //Return instances of 0s as centroid if subsample empty if (subsample.numInstances() == 0) { return new DenseInstance(1, new double[centroid.numAttributes()]); } zNormalise(subsample); double[][] subsampleArray = new double[subsample.numInstances()][]; for (int i = 0; i < subsample.numInstances(); i++) { subsampleArray[i] = subsample.get(i).toDoubleArray(); } //Calculate eignenvectors for subsample Matrix matrix = new Matrix(subsampleArray); Matrix matrixT = matrix.transpose(); matrix = matrixT.times(matrix); Matrix identity = Matrix.identity(seriesSize, seriesSize); Matrix ones = new Matrix(seriesSize, seriesSize, 1); ones = ones.times(1.0 / seriesSize); identity = identity.minus(ones); matrix = identity.times(matrix).times(identity); // todo If we dont add a max iterations it can fail to converge // EigenvalueDecomposition.maxIter = 100000; EigenvalueDecomposition eig = matrix.eig(); // EigenvalueDecomposition.maxIter = -1; Matrix v = eig.getV(); double[] eigVector = new double[centroid.numAttributes()]; double[] eigVectorNeg = new double[centroid.numAttributes()]; double eigSum = 0; double eigSumNeg = 0; int col = 0; while (true) { for (int i = 0; i < seriesSize; i++) { eigVector[i] = v.get(i, col); eigVectorNeg[i] = -eigVector[i]; double firstVal = subsample.get(0).value(i); eigSum += (firstVal - eigVector[i]) * (firstVal - eigVector[i]); eigSumNeg += (firstVal - eigVectorNeg[i]) * (firstVal - eigVectorNeg[i]); } //Hack to move to next column if the correct values dont appear on the first one for some reason //I have no idea why this happens or which datasets this may happen in if (Math.round(eigSum) == subsample.get(0).numAttributes() && Math.round(eigSumNeg) == subsample.get(0).numAttributes()) { col++; } else { break; } } Instance newCent; if (eigSum < eigSumNeg) { newCent = new DenseInstance(1, eigVector); } else { newCent = new DenseInstance(1, eigVectorNeg); } //Normalise and return eigenvector as new centroid zNormalise(newCent); return newCent; } public static void main(String[] args) throws Exception { String dataset = "Trace"; Instances inst = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\UnivariateARFF\\" + dataset + "/" + dataset + "_TRAIN.arff"); Instances inst2 = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\UnivariateARFF\\" + dataset + "/" + dataset + "_TEST.arff"); inst.setClassIndex(inst.numAttributes() - 1); inst.addAll(inst2); KShape k = new KShape(); k.setSeed(0); k.k = inst.numClasses(); k.buildClusterer(inst); System.out.println(k.clusters.length); System.out.println(Arrays.toString(k.assignments)); System.out.println(Arrays.toString(k.clusters)); System.out.println(randIndex(k.assignments, inst)); } //Class for calculating Shape Based Distance public static class SBD { private double dist; private Instance yShift; private FastFourierTransformer fft; public SBD() { } private SBD(Instance first, Instance second, boolean calcShift) { calculateDistance(first, second, calcShift); } public double[][] createDistanceMatrix(Instances data){ double[][] distMatrix = new double[data.numInstances()][]; for (int i = 0; i < data.numInstances(); i++){ distMatrix[i] = new double[data.numInstances()]; Instance first = data.get(i); for (int n = 0; n < data.numInstances(); n++){ distMatrix[i][n] = calculateDistance(first, data.get(n)); } } return distMatrix; } public double[][] createBottomHalfDistanceMatrix(Instances data){ double[][] distMatrix = new double[data.numInstances()][]; for (int i = 0; i < data.numInstances(); i++){ distMatrix[i] = new double[i + 1]; Instance first = data.get(i); for (int n = 0; n < i; n++){ distMatrix[i][n] = calculateDistance(first, data.get(n)); } } return distMatrix; } public double calculateDistance(Instance first, Instance second) { calculateDistance(first, second, false); return dist; } private void calculateDistance(Instance first, Instance second, boolean calcShift) { int oldLength = first.numAttributes(); int oldLengthY = second.numAttributes(); int maxLength = Math.max(oldLength, oldLengthY); int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(maxLength) / Math.log(2))); //FFT and IFFT fft = new FastFourierTransformer(DftNormalization.STANDARD); Complex[] firstC = fft(first, oldLength, nfft); Complex[] secondC = fft(second, oldLengthY, nfft); for (int i = 0; i < nfft; i++) { firstC[i] = firstC[i].multiply(secondC[i].conjugate()); } firstC = fft.transform(firstC, TransformType.INVERSE); //Calculate NCCc values double firstNorm = sumSquare(first); double secondNorm = sumSquare(second); double norm = Math.sqrt(firstNorm * secondNorm); double[] ncc = new double[oldLength * 2 - 1]; int idx = 0; for (int i = nfft - oldLength + 1; i < nfft; i++) { ncc[idx++] = firstC[i].getReal() / norm; } for (int i = 0; i < oldLength; i++) { ncc[idx++] = firstC[i].getReal() / norm; } double maxValue = 0; int shift = -1; //Largest NCCc value and index for (int i = 0; i < ncc.length; i++) { if (ncc[i] > maxValue) { maxValue = ncc[i]; shift = i; } } dist = 1 - maxValue; //Create y', shifting the second instance in a direction and padding with 0s if (calcShift) { shift -= maxLength - 1; yShift = new DenseInstance(1, new double[oldLengthY]); if (shift >= 0) { for (int i = 0; i < oldLengthY - shift; i++) { yShift.setValue(i + shift, second.value(i)); } } else { for (int i = 0; i < oldLengthY + shift; i++) { yShift.setValue(i, second.value(i - shift)); } } } } //Run FFT and return array of complex numbers private Complex[] fft(Instance inst, int oldLength, int nfft) { Complex[] complex = new Complex[nfft]; for (int i = 0; i < oldLength; i++) { complex[i] = new Complex(inst.value(i), 0); } for (int i = oldLength; i < nfft; i++) { complex[i] = new Complex(0, 0); } return fft.transform(complex, TransformType.FORWARD); } private double sumSquare(Instance inst) { double sum = 0; for (int i = 0; i < inst.numAttributes(); i++) { sum += inst.value(i) * inst.value(i); } return sum; } } }
14,373
32.741784
130
java
tsml-java
tsml-java-master/src/main/java/tsml/clusterers/TTC.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.clusterers; import experiments.data.DatasetLoading; import machine_learning.clusterers.CAST; import machine_learning.clusterers.KMedoids; import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW; import weka.clusterers.NumberOfClustersRequestable; import weka.core.DenseInstance; import weka.core.EuclideanDistance; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import java.util.Arrays; import static utilities.ClusteringUtilities.*; import static utilities.InstanceTools.deleteClassAttribute; /** * Class for a dictionary based clustering algorithm, very experimental. * * @author Matthew Middlehurst */ public class TTC extends EnhancedAbstractClusterer implements NumberOfClustersRequestable { //Aghabozorgi, Saeed, et al. //"A hybrid algorithm for clustering of time series data based on affinity search technique." //The Scientific World Journal 2014 (2014). private double affinityThreshold = 0.01; private int k = 2; private double[][] distanceMatrix; private ArrayList<Integer>[] subclusters; private KMedoids kmedoids; public TTC() { } @Override public int numberOfClusters() throws Exception { return k; } @Override public void setNumClusters(int numClusters) throws Exception { k = numClusters; } @Override public void buildClusterer(Instances data) throws Exception { super.buildClusterer(data); zNormalise(train); EuclideanDistance ed = new EuclideanDistance(); ed.setDontNormalize(true); distanceMatrix = createDistanceMatrix(train, ed); //Cluster using the CAST algorithm CAST cast = new CAST(distanceMatrix); cast.setAffinityThreshold(affinityThreshold); cast.buildClusterer(train); subclusters = cast.getClusters(); ArrayList<double[]> affinities = cast.getClusterAffinities(); double[][] prototypes = new double[subclusters.length][train.numAttributes()]; //Take average of each cluster for (int i = 0; i < subclusters.length; i++) { for (int n = 0; n < train.numAttributes(); n++) { for (int g = 0; g < subclusters[i].size(); g++) { prototypes[i][n] += train.get(subclusters[i].get(g)).value(n) * (1 - affinities.get(i)[g]); } prototypes[i][n] /= subclusters[i].size(); } } Instances cl = new Instances(train, subclusters.length); for (int i = 0; i < subclusters.length; i++) { cl.add(new DenseInstance(1, prototypes[i])); } //Use KMedoids using DTW distance to cluster discretised data kmedoids = new KMedoids(); kmedoids.setDistanceFunction(new DTW()); kmedoids.setNumClusters(k); kmedoids.setNormaliseData(false); kmedoids.setCopyInstances(false); if (seedClusterer) kmedoids.setSeed(seed); kmedoids.buildClusterer(cl); ArrayList<Integer>[] ptClusters = kmedoids.getClusters(); assignments = new double[train.size()]; //Assign each instance to the cluster assigned it its subcluster using KMedoids for (int i = 1; i < k; i++) { for (int n = 0; n < ptClusters[i].size(); n++) { ArrayList<Integer> subcluster = subclusters[ptClusters[i].get(n)]; for (int g = 0; g < subcluster.size(); g++) { assignments[subcluster.get(g)] = i; } } } clusters = new ArrayList[k]; for (int i = 0; i < k; i++) { clusters[i] = new ArrayList(); } for (int i = 0; i < train.size(); i++) { clusters[(int) assignments[i]].add(i); } } @Override public int clusterInstance(Instance inst) throws Exception { Instance newInst = copyInstances ? new DenseInstance(inst) : inst; int clsIdx = inst.classIndex(); if (clsIdx >= 0){ newInst.setDataset(null); newInst.deleteAttributeAt(clsIdx); } zNormalise(newInst); return kmedoids.clusterInstance(newInst); } public static void main(String[] args) throws Exception { String dataset = "Trace"; Instances inst = DatasetLoading.loadDataNullable("Z:\\ArchiveData\\Univariate_arff\\" + dataset + "/" + dataset + "_TRAIN.arff"); Instances inst2 = DatasetLoading.loadDataNullable("Z:\\ArchiveData\\Univariate_arff\\" + dataset + "/" + dataset + "_TEST.arff"); inst.setClassIndex(inst.numAttributes() - 1); inst.addAll(inst2); TTC k = new TTC(); k.setSeed(0); k.k = inst.numClasses(); k.buildClusterer(inst); System.out.println(k.clusters.length); System.out.println(Arrays.toString(k.clusters)); System.out.println(randIndex(k.assignments, inst)); } }
5,770
33.147929
112
java
tsml-java
tsml-java-master/src/main/java/tsml/clusterers/UnsupervisedShapelets.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.clusterers; import experiments.data.DatasetLoading; import machine_learning.clusterers.KMeans; import org.apache.commons.math3.complex.Complex; import org.apache.commons.math3.transform.DftNormalization; import org.apache.commons.math3.transform.FastFourierTransformer; import org.apache.commons.math3.transform.TransformType; import weka.clusterers.NumberOfClustersRequestable; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import static utilities.ArrayUtilities.*; import static utilities.ClusteringUtilities.randIndex; import static utilities.GenericTools.min; import static utilities.InstanceTools.deleteClassAttribute; import static utilities.InstanceTools.toWekaInstances; /** * Class for the UnsupervisedShapelets clustering algorithm. * * @author Matthew Middlehurst */ public class UnsupervisedShapelets extends EnhancedAbstractClusterer implements NumberOfClustersRequestable { //Zakaria, Jesin, Abdullah Mueen, and Eamonn Keogh. //"Clustering time series using unsupervised-shapelets." //2012 IEEE 12th International Conference on Data Mining. IEEE, 2012. private int k = 2; private boolean useKMeans = true; private int numKMeansFolds = 20; private int[] shapeletLengths = {50}; private boolean exhaustiveSearch = false; private double randomSearchProportion = -1; private boolean allowUnassignedCluster = false; private ArrayList<UShapelet> shapelets; private KMeans shapeletClusterer; private Instances header; private int numShapeletsToUse; private int numInstances; private double firstGap; public UnsupervisedShapelets() { } @Override public int numberOfClusters() { return k; } @Override public void setNumClusters(int numClusters) throws Exception { k = numClusters; } public void setUseKMeans(boolean b){ useKMeans = b; } public void setNumKMeansFolds(int i) { numKMeansFolds = i; } public void setShapeletLengths(int[] arr){ shapeletLengths = arr; } public void setExhaustiveSearch(boolean b){ exhaustiveSearch = b; } public void setRandomSearchProportion(double d){ randomSearchProportion = d; } private void setAllowUnassignedCluster(boolean b) { allowUnassignedCluster = b; } @Override public void buildClusterer(Instances data) throws Exception { super.buildClusterer(data); extractUShapelets(train); clusterData(train); } @Override public int clusterInstance(Instance inst) throws Exception { Instance newInst = copyInstances ? new DenseInstance(inst) : inst; int clsIdx = inst.classIndex(); if (clsIdx >= 0){ newInst.setDataset(null); newInst.deleteAttributeAt(clsIdx); } if (useKMeans) { Instance shapeletDists = new DenseInstance(numShapeletsToUse); for (int i = 0; i < numShapeletsToUse; i++) { shapeletDists.setValue(i, shapelets.get(i).computeDistance(inst)); } shapeletDists.setDataset(header); return shapeletClusterer.clusterInstance(shapeletDists); } else { double minDist = Double.MAX_VALUE; int minIdx = -1; for (int i = 0; i < shapelets.size(); i++) { double dist = shapelets.get(i).computeDistance(inst); if (dist < minDist){ minDist = dist; minIdx = i; } } return minIdx; } } public Instance shapeletTransform(Instance inst) throws Exception { Instance shapeletDists = new DenseInstance(numShapeletsToUse); for (int i = 0; i < numShapeletsToUse; i++) { shapeletDists.setValue(i, shapelets.get(i).computeDistance(inst)); } shapeletDists.setDataset(header); return shapeletDists; } private void extractUShapelets(Instances data) { if (data.numAttributes() / 2 < min(shapeletLengths)) { shapeletLengths = new int[]{data.numAttributes() / 2}; } ArrayList<Integer> indicies = null; if (!useKMeans) { assignments = new double[data.numInstances()]; indicies = new ArrayList<>(assignments.length); for (int i = assignments.length - 1; i >= 0; i--){ indicies.add(i); } } Random rand; if (!seedClusterer) { rand = new Random(); } else { rand = new Random(seed); } shapelets = new ArrayList(); numInstances = data.size(); Instance inst = data.firstInstance(); boolean finished = false; int iteration = 0; while (!finished) { ArrayList<UShapelet> shapeletCandidates = new ArrayList(); if (exhaustiveSearch){ //Finds all candidate shapelets on all instances for (int shapeletLength : shapeletLengths) { for (int j = 0; j < data.numInstances(); j++) { inst = data.get(j); for (int n = 0; n < inst.numAttributes() - shapeletLength; n++) { UShapelet candidate = new UShapelet(n, shapeletLength, inst); candidate.computeGap(data); shapeletCandidates.add(candidate); } } } } else if (randomSearchProportion > 0){ //Finds all candidate shapelets on a random selection of instances int seriesToSelect = (int) Math.ceil(data.numInstances() * randomSearchProportion); ArrayList<Integer> randomIndicies = new ArrayList<>(data.numInstances()); for (int i = 0; i < data.numInstances(); i++){ randomIndicies.add(i); } for (int shapeletLength : shapeletLengths) { for (int j = 0; j < seriesToSelect; j++) { inst = data.get(randomIndicies.remove(rand.nextInt(randomIndicies.size()))); for (int n = 0; n < inst.numAttributes() - shapeletLength; n++) { UShapelet candidate = new UShapelet(n, shapeletLength, inst); candidate.computeGap(data); shapeletCandidates.add(candidate); } } } } else { //Finds all candidate shapelets on the selected instance for (int shapeletLength : shapeletLengths) { for (int n = 0; n < inst.numAttributes() - shapeletLength; n++) { UShapelet candidate = new UShapelet(n, shapeletLength, inst); candidate.computeGap(data); shapeletCandidates.add(candidate); } } } double maxGap = -1; int maxGapIndex = -1; //Finds the shapelet with the highest gap value for (int i = 0; i < shapeletCandidates.size(); i++) { if (shapeletCandidates.get(i).gap > maxGap) { maxGap = shapeletCandidates.get(i).gap; maxGapIndex = i; } } if (!useKMeans && iteration == 0){ firstGap = maxGap; } else if (!useKMeans && maxGap < firstGap / 2){ break; } //Adds the shapelet with the best gap value to the pool of shapelets UShapelet best = shapeletCandidates.get(maxGapIndex); shapelets.add(best); double[] distances = best.computeDistances(data); ArrayList<Double> lesserDists = new ArrayList(); double maxDist = -1; int maxDistIndex = -1; //Finds the instance with the max dist to the shapelet and all with a dist lower than the distance used //to generate the gap value for (int i = 0; i < distances.length; i++) { if (distances[i] < best.dt) { lesserDists.add(distances[i]); } else if (distances[i] > maxDist) { maxDist = distances[i]; maxDistIndex = i; } } //Use max dist instance to generate new shapelet and remove low distance instances if (lesserDists.size() == 1) { finished = true; } else { inst = data.get(maxDistIndex); double mean = mean(lesserDists); double cutoff = mean + standardDeviation(lesserDists, mean); Instances newData = new Instances(data, 0); for (int i = 0; i < data.numInstances(); i++) { if (distances[i] >= cutoff) { newData.add(data.get(i)); } else if (!useKMeans){ assignments[indicies.remove(data.numInstances() - i)] = iteration; } } data = newData; if (data.size() <= 1) { finished = true; } else{ iteration++; } } } if (!useKMeans){ for (int idx: indicies){ assignments[idx] = allowUnassignedCluster ? -1 : iteration; } } } private void clusterData(Instances data) throws Exception { if (useKMeans) { Instances distanceMap; double[][] foldClusters = new double[shapelets.size()][]; double[][] distanceMatrix = new double[numInstances][1]; double minRandIndex = Double.MAX_VALUE; KMeans bestClusterer = null; //Create a distance matrix by calculating the distance of shapelet i and previous shapelets to each time //series for (int i = 0; i < shapelets.size(); i++) { UShapelet shapelet = shapelets.get(i); double[] distances = shapelet.computeDistances(data); double minDist = Double.MAX_VALUE; for (int n = 0; n < numInstances; n++) { distanceMatrix[n] = Arrays.copyOf(distanceMatrix[n], i + 1); distanceMatrix[n][i] = distances[n]; } distanceMap = toWekaInstances(distanceMatrix); //Build multiple kmeans clusterers using the one with the smallest squared distance for (int n = 0; n < numKMeansFolds; n++) { KMeans kmeans = new KMeans(); kmeans.setNumClusters(k); kmeans.setNormaliseData(false); kmeans.setCopyInstances(false); if (seedClusterer) kmeans.setSeed(seed + (n + 7) * (i + 7)); kmeans.buildClusterer(distanceMap); double dist = kmeans.clusterSquaredDistance(distanceMap); if (dist < minDist) { minDist = dist; foldClusters[i] = kmeans.getAssignments(); bestClusterer = kmeans; } } //If the rand index of this output of clusters compared to the previous one is greater than the current //best use this output of clusters double randIndex = 1; if (i > 0) { randIndex = 1 - randIndex(foldClusters[i - 1], foldClusters[i]); } if (randIndex < minRandIndex) { minRandIndex = randIndex; shapeletClusterer = bestClusterer; header = new Instances(distanceMap, 0); numShapeletsToUse = i; } } assignments = foldClusters[numShapeletsToUse]; clusters = new ArrayList[k]; for (int i = 0; i < k; i++) { clusters[i] = new ArrayList(); } for (int i = 0; i < numInstances; i++) { clusters[(int) assignments[i]].add(i); } } else{ List<Double> u = unique(assignments); clusters = new ArrayList[u.size()]; for (int i = 0; i < clusters.length; i++) { clusters[i] = new ArrayList(); } for (int i = 0; i < numInstances; i++) { clusters[(int) assignments[i]].add(i); } } } private double mean(ArrayList<Double> dists) { double meanSum = 0; for (Double dist : dists) { meanSum += dist; } return meanSum / dists.size(); } private double standardDeviation(ArrayList<Double> dists, double mean) { double sum = 0; double temp; for (Double dist : dists) { temp = dist - mean; sum += temp * temp; } double meanOfDiffs = sum / dists.size(); return Math.sqrt(meanOfDiffs); } public static void main(String[] args) throws Exception { String dataset = "Earthquakes"; Instances inst = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\UnivariateARFF\\" + dataset + "/" + dataset + "_TRAIN.arff"); Instances inst2 = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\UnivariateARFF\\" + dataset + "/" + dataset + "_TEST.arff"); inst.setClassIndex(inst.numAttributes() - 1); inst.addAll(inst2); UnsupervisedShapelets us = new UnsupervisedShapelets(); us.setSeed(0); us.k = inst.numClasses(); us.buildClusterer(inst); System.out.println(us.clusters.length); System.out.println(Arrays.toString(us.assignments)); System.out.println(Arrays.toString(us.clusters)); System.out.println(randIndex(us.assignments, inst)); } //Class for a single Unsupervised Shapelet with methods to calculate distance to time series and the gap value private class UShapelet { //Where the shapelet starts int startPoint; //Length of the shapelet int length; //Series the shapelet is extracted from double[] series; double gap = 0; double dt = 0; UShapelet(int startPoint, int length, Instance inst) { this.startPoint = startPoint; this.length = length; this.series = inst.toDoubleArray(); } //finds the highest gap value and corresponding distance for this shapelet on the input dataset void computeGap(Instances data) { double[] sortedDistances = computeDistances(data); Arrays.sort(sortedDistances); for (int i = 0; i < sortedDistances.length - 1; i++) { double dist = (sortedDistances[i] + sortedDistances[i + 1]) / 2; ArrayList<Double> lesserDists = new ArrayList(); ArrayList<Double> greaterDists = new ArrayList(); //separate instance distances based on whether they are greater or less than the current dist for (double sortedDistance : sortedDistances) { if (sortedDistance < dist) { lesserDists.add(sortedDistance); } else { greaterDists.add(sortedDistance); } } double ratio = (double) lesserDists.size() / greaterDists.size(); if (1.0 / k < ratio) { double lesserMean = mean(lesserDists); double greaterMean = mean(greaterDists); double lesserStdev = standardDeviation(lesserDists, lesserMean); double greaterStdev = standardDeviation(greaterDists, greaterMean); //gap value for this distance double gap = greaterMean - greaterStdev - (lesserMean + lesserStdev); if (gap > this.gap) { this.gap = gap; this.dt = dist; } } } } //Lowest euclidean distance of the shapelet to each instance in the input dataset double[] computeDistances(Instances data) { double[] distances = new double[data.numInstances()]; double[] shapelet = zNormalise(); double sumy = sum(shapelet); double sumy2 = sumPow2(shapelet); int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(data.numAttributes()) / Math.log(2))); Complex[] yfft = new Complex[nfft]; for (int n = 0; n < nfft; n++) { if (n < length) yfft[n] = new Complex(shapelet[length - n - 1], 0); else yfft[n] = new Complex(0, 0); } FastFourierTransformer fft = new FastFourierTransformer(DftNormalization.STANDARD); yfft = fft.transform(yfft, TransformType.FORWARD); for (int i = 0; i < data.numInstances(); i++) { distances[i] = computeDistance(data.get(i).toDoubleArray(), sumy, sumy2, nfft, fft, yfft); } return distances; } double computeDistance(Instance data) { double[] shapelet = zNormalise(); double sumy = sum(shapelet); double sumy2 = sumPow2(shapelet); int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(data.numAttributes()) / Math.log(2))); Complex[] yfft = new Complex[nfft]; for (int n = 0; n < nfft; n++) { if (n < length) yfft[n] = new Complex(shapelet[length - n - 1], 0); else yfft[n] = new Complex(0, 0); } FastFourierTransformer fft = new FastFourierTransformer(DftNormalization.STANDARD); yfft = fft.transform(yfft, TransformType.FORWARD); return computeDistance(data.toDoubleArray(), sumy, sumy2, nfft, fft, yfft); } double computeDistance(double[] inst, double sumy, double sumy2, int nfft, FastFourierTransformer fft, Complex[] yfft) { Complex[] xfft = new Complex[nfft]; for (int n = 0; n < nfft; n++) { if (n < inst.length) xfft[n] = new Complex(inst[n], 0); else xfft[n] = new Complex(0, 0); } xfft = fft.transform(xfft, TransformType.FORWARD); Complex[] zfft = new Complex[nfft]; for (int n = 0; n < nfft; n++) { zfft[n] = xfft[n].multiply(yfft[n]); } zfft = fft.transform(zfft, TransformType.INVERSE); double[] cumsumx = cumsum(inst); double[] cumsumx2 = cumsumPow2(inst); double[] dists = new double[inst.length - length]; for (int i = 0; i < dists.length; i++){ double sumx = cumsumx[i + length] - cumsumx[i]; double sumx2 = cumsumx2[i + length] - cumsumx2[i]; double meanx = sumx / length; double sigmax2 = sumx2 / length - Math.pow(meanx, 2); double sigmax = Math.sqrt(sigmax2); dists[i] = (sumx2 - 2 * sumx * meanx + length * Math.pow(meanx, 2)) / sigmax2 - 2 * (zfft[i + length].getReal() - sumy * meanx) / sigmax + sumy2; } return Math.sqrt(min(dists)) / Math.sqrt(length); } //return the shapelet using the series, start point and shapelet length double[] zNormalise() { double meanSum = 0; for (int i = startPoint; i < startPoint + length; i++) { meanSum += series[i]; } double mean = meanSum / length; double stdevSum = 0; double temp; for (int i = startPoint; i < startPoint + length; i++) { temp = series[i] - mean; stdevSum += temp * temp; } double stdev = Math.sqrt(stdevSum / length); double[] output = new double[length]; if (stdev != 0) { for (int i = startPoint; i < startPoint + length; i++) { output[i - startPoint] = (series[i] - mean) / stdev; } } return output; } } }
21,860
35.495826
130
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TSCapabilities.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; /** * Capabilities mechanism similar to that used by Weka, needs example usage * * @author Aaron Bostrom, 2020 */ public class TSCapabilities { /** the object that owns this capabilities instance */ protected TSCapabilitiesHandler owner; /** the set for storing the active capabilities */ protected Set<TSCapability> capabilities; public int numCapabilities(){ return capabilities.size(); } public TSCapabilities(){ capabilities = new HashSet<>(); } public TSCapabilities(final TSCapabilitiesHandler owner) { this(); this.owner = owner; } public TSCapabilities enable(TSCapability capability){ capabilities.add(capability); return this; } public TSCapabilities enableOr(TSCapability either, TSCapability or){ capabilities.add(new Or(either, or)); return this; } public TSCapabilities enableAnd(TSCapability either, TSCapability and){ capabilities.add(new And(either, and)); return this; } public TSCapabilities disable(TSCapability capability){ capabilities.remove(capability); return this; } public boolean handles(TSCapability capability){ return capabilities.contains(capability); } public boolean test(TimeSeriesInstances data) { return this.capabilities.stream().allMatch(e -> e.test(data)); } public boolean test(TimeSeriesInstance inst){ return this.capabilities.stream().allMatch(e -> e.test(inst)); } public static abstract class TSCapability{ public abstract boolean test(TimeSeriesInstances data); public abstract boolean test(TimeSeriesInstance inst); } public static TSCapability EQUAL_LENGTH = new EqualLength(); public static TSCapability UNEQUAL_LENGTH = new Not(new EqualLength()); public static TSCapability UNIVARIATE = new Not(new Multivariate()); public static TSCapability MULTIVARIATE = new Multivariate(); public static TSCapability NO_MISSING_VALUES = new Not(new MissingValues()); public static TSCapability MISSING_VALUES = new MissingValues(); public static TSCapability MULTI_OR_UNIVARIATE = new Or(UNIVARIATE, MULTIVARIATE); public static TSCapability EQUAL_OR_UNEQUAL_LENGTH = new Or(EQUAL_LENGTH, UNEQUAL_LENGTH); public static TSCapability MIN_LENGTH(int length){ return new MinLength(length); } //adapter wrapper to flip a boolean to simplify capabilities objects. private static final class Not extends TSCapability{ TSCapability cap; private Not(TSCapability capability){ cap = capability; } @Override public boolean test(TimeSeriesInstances data) { return !cap.test(data); } @Override public boolean test(TimeSeriesInstance inst) { return !cap.test(inst); } @Override public int hashCode(){ return cap.hashCode(); } } private static final class Or extends TSCapability{ Set<TSCapability> caps; private Or(TSCapability... capabilites){ caps = Stream.of(capabilites).collect(Collectors.toSet()); } @Override public boolean test(TimeSeriesInstances data) { boolean out = false; for(TSCapability cap : caps) out |= cap.test(data); return out; } @Override public boolean test(TimeSeriesInstance inst) { boolean out = false; for(TSCapability cap : caps) out |= cap.test(inst); return out; } @Override public int hashCode(){ return caps.hashCode(); } } private static final class And extends TSCapability{ Set<TSCapability> caps; private And(TSCapability... capabilites){ caps = Stream.of(capabilites).collect(Collectors.toSet()); } @Override public boolean test(TimeSeriesInstances data) { boolean out = false; for(TSCapability cap : caps) out &= cap.test(data); return out; } @Override public boolean test(TimeSeriesInstance inst) { boolean out = false; for(TSCapability cap : caps) out &= cap.test(inst); return out; } @Override public int hashCode(){ return caps.hashCode(); } } protected static final class EqualLength extends TSCapability{ @Override public boolean test(TimeSeriesInstances data) { return data.isEqualLength(); } @Override public boolean test(TimeSeriesInstance inst) { return inst.isEqualLength(); } } protected static final class Multivariate extends TSCapability{ @Override public boolean test(TimeSeriesInstances data) { return data.isMultivariate(); } @Override public boolean test(TimeSeriesInstance inst) { return inst.isMultivariate(); } } protected static final class MissingValues extends TSCapability{ @Override public boolean test(TimeSeriesInstances data) { return data.hasMissing(); } @Override public boolean test(TimeSeriesInstance inst) { return inst.hasMissing(); } } protected static final class MinLength extends TSCapability{ int minL; protected MinLength(int minL){ this.minL = minL; } @Override public boolean test(TimeSeriesInstances data) { return data.getMinLength() >= minL; } @Override public boolean test(TimeSeriesInstance inst) { return inst.getMinLength() >= minL; } } }
6,934
27.657025
94
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TSCapabilitiesHandler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; /** * * * @author Aaron Bostrom, 2020 */ public interface TSCapabilitiesHandler { public TSCapabilities getTSCapabilities(); }
929
34.769231
76
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TimeSeries.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; import java.io.Serializable; import java.util.*; import java.util.stream.Collectors; import java.util.stream.DoubleStream; import java.util.stream.Stream; /** * Class to store a time series. The series can have different indices (time stamps) and store missing values (NaN). * * The model for the indexes is the first is always zero the other indexes are in units of md.increment * Hopefully most of this can be encapsulated, so if the data has equal increments then indices is null and the user * * @author Aaron Bostrom, 2020 */ public class TimeSeries implements Iterable<Double>, Serializable { public final static double DEFAULT_VALUE = Double.NaN; private final static List<Double> EMPTY_INDICES = Collections.emptyList(); private List<Double> series; private List<Double> indices = EMPTY_INDICES; // just for internal use private TimeSeries() {} /** * Create a TimeSeries object from an array of time series data. * * @param data time series raw data */ public TimeSeries(double[] data){ series = new ArrayList<Double>(); for(double value : data) series.add(value); } /** * Create a TimeSeries object from a list of time series data. * * @param data time series raw data */ public TimeSeries(List<Double> data) { series = new ArrayList<>(data); } /** * Create a TimeSeries object from another TimeSeries object. * * @param other TimeSeries object */ public TimeSeries(TimeSeries other) { this(other.series); } /** * Returns the length of the series. * * @return int length of series */ public int getSeriesLength() { return series.size(); } /** * Returns whether there is a valid value at the index passed. * i.e. if index is out of range or NaN, returns false. * * @param index to check * @return true if valid, false if not */ public boolean hasValidValueAt(int index) { // test whether its out of range, or NaN return index < series.size() && Double.isFinite(series.get(index)); } /** * Returns value at passed index. * * @param index to get value from * @return value at index */ public double getValue(int index){ return series.get(index); } /** * Returns a value at a specific index in the time series. This method conducts unboxing so use getValue if you care about performance. * * @param index to get value from * @return value at index */ public Double get(int index) { return series.get(index); } /** * Returns value at index passed, or default value if no valid value at index. * * @param index to get value from * @return value at index, or default value if not valid at index */ public double getOrDefault(int index) { return hasValidValueAt(index) ? getValue(index) : DEFAULT_VALUE; } /** * Returns a DoubleStream of values in series. * * @return stream of values in series */ public DoubleStream streamValues() { return series.stream().mapToDouble(Double::doubleValue); } /** * Returns Stream of doubles for values in series. * * @return stream of doubles in series */ public Stream<Double> stream() { return series.stream(); } /** * Returns all values in series. * * @return values in series */ public List<Double> getSeries() { return series; } /** * @return List<Double> */ public List<Double> getIndices() { return indices; } /** * Returns the series, separated by commas. * * @return series, comma separated */ @Override public String toString(){ StringBuilder sb = new StringBuilder(); for(double val : series) { sb.append(val).append(','); } return sb.substring(0, sb.length() - 1); } /** * Returns all values in the series. * * @return values in series */ public double[] toValueArray() { return getSeries().stream().mapToDouble(Double::doubleValue).toArray(); } /** * Returns a new TimeSeries object containing only values at indexes passed. * * @param indexesToKeep from series * @return a new TimeSeries object */ public TimeSeries getVSlice(int[] indexesToKeep) { return new TimeSeries(getVSliceArray(indexesToKeep)); } /** * Returns a new TimeSeries object containing only the value at the index * passed. * * @param indexToKeep from series * @return a new TimeSeries object */ public TimeSeries getVSlice(int indexToKeep) { return getVSlice(new int[] {indexToKeep}); } /** * Returns a new TimeSeries object containing only values at indexes passed. * * @param indexesToKeep from series * @return a new TimeSeries object */ public TimeSeries getVSlice(List<Integer> indexesToKeep) { return getVSlice(indexesToKeep.stream().mapToInt(Integer::intValue).toArray()); } /** * Returns a new TimeSeries object containing all values apart from index * passed. * * @param indexToRemove from series * @return a new TimeSeries object */ public TimeSeries getVSliceComplement(int indexToRemove) { return getVSliceComplement(new int[] {indexToRemove}); } /** * Returns a new TimeSeries object containing all values apart from indexes * passed. * * @param indexesToRemove from series * @return a new TimeSeries object */ public TimeSeries getVSliceComplement(int[] indexesToRemove) { return new TimeSeries(getVSliceComplementArray(indexesToRemove)); } /** * Returns a new TimeSeries object containing all values apart from indexes * passed. * * @param indexesToRemove from series * @return a new TimeSeries object */ public TimeSeries getVSliceComplement(List<Integer> indexesToRemove) { return getVSliceComplement(indexesToRemove.stream().mapToInt(Integer::intValue).toArray()); } /** * Returns a list of series containing all values apart from indexes passed. * * This is useful if you want to delete a column/truncate the array, but * without modifying the original dataset. * * @param indexesToRemove from series * @return a list of new series */ public List<Double> getVSliceComplementList(List<Integer> indexesToRemove){ //if the current index isn't in the removal list, then copy across. List<Double> out = new ArrayList<>(this.getSeriesLength() - indexesToRemove.size()); for(int i=0; i<this.getSeriesLength(); ++i){ if(!indexesToRemove.contains(i)) out.add(getOrDefault(i)); } return out; } /** * Returns a list of series containing all values apart from indexes passed. * * @param indexesToRemove from series * @return a list of new series */ public List<Double> getVSliceComplementList(int[] indexesToRemove) { return getVSliceComplementList(Arrays.stream(indexesToRemove).boxed().collect(Collectors.toList())); } /** * Returns a list of series containing all values apart from index passed. * * @param indexToRemove from series * @return a list of new series */ public List<Double> getVSliceComponentList(int indexToRemove) { return getVSliceComplementList(new int[] {indexToRemove}); } /** * Returns an array of series containing all values apart from indexes passed. * * @param indexesToRemove from series * @return an array of new series */ public double[] getVSliceComplementArray(int[] indexesToRemove){ return getVSliceComplementArray(Arrays.stream(indexesToRemove).boxed().collect(Collectors.toList())); } /** * Returns an array of series containing all values apart from indexes passed. * * @param indexesToRemove from series * @return an array of new series */ public double[] getVSliceComplementArray(List<Integer> indexesToRemove){ return getVSliceComplementList(indexesToRemove).stream().mapToDouble(Double::doubleValue).toArray(); } /** * Returns an array of series containing all values apart from index passed. * * @param indexToRemove from series * @return an array of new series */ public double[] getVSliceComplementArray(int indexToRemove) { return getVSliceComplementArray(new int[] {indexToRemove}); } /** * Returns a list of series containing only values at indexes passed. * * @param indexesToKeep from series * @return a list of new series */ public List<Double> getVSliceList(List<Integer> indexesToKeep){ //if the current index isn't in the removal list, then copy across. List<Double> out = new ArrayList<>(indexesToKeep.size()); for(int i=0; i<this.getSeriesLength(); ++i){ if(indexesToKeep.contains(i)) out.add(getOrDefault(i)); } return out; } /** * Returns a list of series containing only values at indexes passed. * * @param indexesToKeep from series * @return a list of new series */ public List<Double> getVSliceList(int[] indexesToKeep) { return getVSliceList(Arrays.stream(indexesToKeep).boxed().collect(Collectors.toList())); } /** * Returns a list of series containing only the value at index passed. * * @param indexToKeep from series * @return a list of new series */ public List<Double> getVSliceList(int indexToKeep) { return getVSliceList(new int[] {indexToKeep}); } /** * Returns an array of series containing only the value at index passed. * * @param indexToKeep from series * @return a list of new series */ public double[] getVSliceArray(int indexToKeep) { return getVSliceArray(new int[] {indexToKeep}); } /** * Returns an array of series containing only values at indexes passed. * * @param indexesToKeep from series * @return a list of new series */ public double[] getVSliceArray(int[] indexesToKeep) { return getVSliceArray(Arrays.stream(indexesToKeep).boxed().collect(Collectors.toList())); } /** * Returns an array of series containing only values at indexes passed. * * @param indexesToKeep from series * @return a list of new series */ public double[] getVSliceArray(List<Integer> indexesToKeep) { return getVSliceList(indexesToKeep).stream().mapToDouble(Double::doubleValue).toArray(); } /** * Returns an iterator, iterating over the series. * * @return series iterator */ @Override public Iterator<Double> iterator() { return series.iterator(); } /** * Returns a list of the portion of the series between the specified start, * inclusive, and end, exclusive. * * @param startInclusive index to start from (inclusive) * @param endExclusive index to end from (exclusive) * @return Sliding window of series */ public List<Double> getVSliceList(int startInclusive, int endExclusive) { return series.subList(startInclusive, endExclusive); } /** * Returns an array of the portion of the series between the specified start, * inclusive, and end, exclusive. * * @param startInclusive index to start from (inclusive) * @param endExclusive index to end from (exclusive) * @return Sliding window of series */ public double[] getVSliceArray(int startInclusive, int endExclusive) { return getVSliceList(startInclusive, endExclusive).stream().mapToDouble(d -> d).toArray(); } /** * Returns a new TimeSeries object containing a portion of the series between * the specified start, inclusive, and end, exclusive. * * @param startInclusive index to start from (inclusive) * @param endExclusive index to end from (exclusive) * @return Sliding window of series */ public TimeSeries getVSlice(int startInclusive, int endExclusive) { final TimeSeries ts = new TimeSeries(); ts.series = getVSliceList(startInclusive, endExclusive); return ts; } /** * Returns whether a TimeSeries object is equal to another based if the series * are exactly the same. * * @param other object * @return true if equal, false if not */ @Override public boolean equals(final Object other) { if (!(other instanceof TimeSeries)) { return false; } final TimeSeries that = (TimeSeries) other; return Objects.equals(series, that.series); } /** * Returns an int of the hash code based on the series. * * @return hash code */ @Override public int hashCode() { return Objects.hash(series); } /** * Example */ public static void main(String[] args) { TimeSeries ts = new TimeSeries(new double[]{1,2,3,4}); } }
14,190
29.584052
139
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TimeSeriesInstance.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; import java.io.Serializable; import java.util.*; import java.util.stream.Collectors; import java.util.stream.Stream; /** * Data structure able to store a time series instance. it can be standard * (univariate, no missing, equally sampled series) or complex (multivariate, * unequal length, unequally spaced, univariate or multivariate time series). * * Should Instances be immutable after creation? Meta data is calculated on * creation, mutability can break this * * @author Aaron Bostrom, 2020 */ public class TimeSeriesInstance implements Iterable<TimeSeries>, Serializable { /* Meta Information */ private boolean isMultivariate; private boolean isEquallySpaced; // todo compute whether timestamps are equally spaced private boolean hasMissing; private boolean isEqualLength; private int minLength; private int maxLength; /** * Returns whether data is multivariate. * * @return true if multivariate, false if not */ public boolean isMultivariate() { return isMultivariate; } /** * Returns whether data is equally spaced. * * @return true if equally spaced, false if not */ public boolean isEquallySpaced() { return isEquallySpaced; } /** * Returns whether data has missing values in. * * @return true if missing values, false if not */ public boolean hasMissing() { return hasMissing; } /** * Returns whether data is equal length. * * @return true if equal length, false if not */ public boolean isEqualLength() { return isEqualLength; } /** * Returns the minimum length of the data. * * @return minimum length */ public int getMinLength() { return minLength; } /** * Returns the maximum length of the data. * * @return maximum length */ public int getMaxLength() { return maxLength; } /* End Meta Information */ /* Data */ private List<TimeSeries> seriesDimensions; private int labelIndex = -1; private double targetValue = Double.NaN; /** * Create a TimeSeriesInstance object from a target value and a list of time * series data. For regression. * * @param targetValue value * @param series raw data */ public TimeSeriesInstance(double targetValue, List<? extends TimeSeries> series) { this.seriesDimensions = new ArrayList<>(series); this.targetValue = targetValue; dataChecks(); } /** * Create a TimeSeriesInstance object from a label index and a list of time * series data. For classification. * * @param labelIndex of class label * @param series raw data */ public TimeSeriesInstance(int labelIndex, List<? extends TimeSeries> series) { this.seriesDimensions = new ArrayList<>(series); this.labelIndex = labelIndex; dataChecks(); } /** * Construct a labelled instance from raw data. * * @param series raw data * @param label target */ public TimeSeriesInstance(List<? extends List<Double>> series, int label) { this(series, Double.NaN); targetValue = labelIndex = label; dataChecks(); } public TimeSeriesInstance(List<? extends List<Double>> series, double targetValue) { // process the input list to produce TimeSeries Objects. // this allows us to pad if need be, or if we want to squarify the data etc. seriesDimensions = new ArrayList<TimeSeries>(); for (List<Double> ts : series) { seriesDimensions.add(new TimeSeries(ts)); } this.targetValue = targetValue; dataChecks(); } /** * Construct an regressed instance from raw data. * * @param data series * @param targetValue */ public TimeSeriesInstance(double[][] data, double targetValue) { seriesDimensions = new ArrayList<TimeSeries>(); for (double[] in : data) { seriesDimensions.add(new TimeSeries(in)); } this.targetValue = targetValue; dataChecks(); } public TimeSeriesInstance(double[] singleDimension) { this(new double[][] {singleDimension}); } /** * Construct an labelled instance from raw data. * * @param data series * @param labelIndex * @param classLabels */ public TimeSeriesInstance(double[][] data, int labelIndex, String[] classLabels) { seriesDimensions = new ArrayList<TimeSeries>(); for (double[] in : data) { seriesDimensions.add(new TimeSeries(in)); } targetValue = this.labelIndex = labelIndex; dataChecks(); } /** * Construct an regressed instance from raw data. * * @param data * @param labelIndex */ public TimeSeriesInstance(double[][] data, int labelIndex) { seriesDimensions = new ArrayList<TimeSeries>(); for (double[] in : data) { seriesDimensions.add(new TimeSeries(in)); } this.labelIndex = labelIndex; dataChecks(); } /** * Retarget the class label, shallow copying the data from another inst. * @param other * @param classLabelIndex */ public TimeSeriesInstance(TimeSeriesInstance other, int classLabelIndex) { seriesDimensions = other.seriesDimensions; labelIndex = classLabelIndex; targetValue = classLabelIndex; dataChecks(); } /** * Returns a discretised label index. * * @param labelIndex to discretise * @return discretised label index */ public static int discretiseLabelIndex(double labelIndex) { final int i; if (Double.isNaN(labelIndex)) { i = -1; } else { i = (int) labelIndex; /* Check the given double is an integer, i.e. 3.0 == 3. Protects against abuse through implicit label indexing integer casting, i.e. 3.3 --> 3. The user should do this themselves, otherwise it's safest to assume a non-integer value (e.g. 7.4) is an error and raise exception. */ if (labelIndex != i) { throw new IllegalArgumentException("cannot discretise " + labelIndex + " to an int: " + i); } } return i; } /** * Construct a labelled instance from raw data with label in double form * (but should be an integer value). * * @param data * @param labelIndex * @param classLabels */ public TimeSeriesInstance(double[][] data, double labelIndex, String[] classLabels) { this(data, discretiseLabelIndex(labelIndex), classLabels); } /** * Construct an instance from raw data. Copies over regression target / * labelling variables. This is only intended for internal use in avoiding * copying the data again after a vslice / hslice. * * @param data series * @param other TimeSeriesInstance */ private TimeSeriesInstance(double[][] data, TimeSeriesInstance other) { this(data, Double.NaN); labelIndex = other.labelIndex; targetValue = other.targetValue; dataChecks(); } /** * Create a TimeSeriesInstance object from raw data. * * @param data series */ public TimeSeriesInstance(double[][] data) { this(data, Double.NaN); } /** * Create a TimeSeriesInstance object from raw data. * * @param data series */ public TimeSeriesInstance(List<? extends List<Double>> data) { this(data, Double.NaN); } private TimeSeriesInstance() {} public TimeSeriesInstance(double targetValue, TimeSeries[] data) { this(targetValue, Arrays.asList(data)); } public TimeSeriesInstance(int labelIndex, TimeSeries[] data) { this(labelIndex, Arrays.asList(data)); } /** * Performs data checks to calculate what types of data are inside. */ private void dataChecks() { if (seriesDimensions == null) { throw new NullPointerException("no series dimensions"); } calculateIfMultivariate(); calculateLengthBounds(); calculateIfMissing(); } /** * Calculates whether the data is multivariate. */ private void calculateIfMultivariate() { isMultivariate = seriesDimensions.size() > 1; } /** * Calculates the length bounds of the data. * (Minimum length, maximum length and equal length) */ private void calculateLengthBounds() { minLength = seriesDimensions.stream().mapToInt(TimeSeries::getSeriesLength).min().getAsInt(); maxLength = seriesDimensions.stream().mapToInt(TimeSeries::getSeriesLength).max().getAsInt(); isEqualLength = minLength == maxLength; } /** * Calculates whether the data has missing values. */ private void calculateIfMissing() { // if any of the series have a NaN value, across all dimensions then this is true. hasMissing = seriesDimensions.stream().anyMatch(e -> e.streamValues().anyMatch(Double::isNaN)); } /** * Returns how many dimensions there are in the series. * * @return number of dimensions */ public int getNumDimensions() { return seriesDimensions.size(); } /** * Returns the label index. * * @return label index */ public int getLabelIndex() { return labelIndex; } /** * Returns a list of values from each dimension in the series at the index. * * @param dimensionIndex to get values from * @return a list of values */ public List<Double> getVSliceList(int dimensionIndex) { List<Double> out = new ArrayList<>(getNumDimensions()); for (TimeSeries ts : seriesDimensions) { out.add(ts.getValue(dimensionIndex)); } return out; } /** * Returns an array of values from the dimension in the series at the index. * * @param dimensionIndex to get values from * @return an array of values */ public double[] getVSliceArray(int dimensionIndex) { double[] out = new double[getNumDimensions()]; int i = 0; for (TimeSeries ts : seriesDimensions) { out[i++] = ts.getValue(dimensionIndex); } return out; } /** * Returns a 2d list: a list for each dimension; of values at the indexes. * * @param indexesToKeep to get values from * @return a 2d list of values */ public List<List<Double>> getVSliceList(int[] indexesToKeep) { return getVSliceList(Arrays.stream(indexesToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 2d list: a list for each dimension; of values at the indexes. * * @param indexesToKeep to get values from * @return a 2d list of values */ public List<List<Double>> getVSliceList(List<Integer> indexesToKeep) { List<List<Double>> out = new ArrayList<>(getNumDimensions()); for (TimeSeries ts : seriesDimensions) { out.add(ts.getVSliceList(indexesToKeep)); } return out; } /** * Returns a 2d array: an array for each dimension; of values at the indexes. * * @param indexesToKeep to get values from * @return a 2d array of values */ public double[][] getVSliceArray(int[] indexesToKeep) { return getVSliceArray(Arrays.stream(indexesToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 2d array: an array for each dimension; of values at the indexes. * * @param indexesToKeep to get values from * @return a 2d array of values */ public double[][] getVSliceArray(List<Integer> indexesToKeep) { double[][] out = new double[getNumDimensions()][]; int i = 0; for (TimeSeries ts : seriesDimensions) { out[i++] = ts.getVSliceArray(indexesToKeep); } return out; } /** * Returns a TimeSeriesInstance containing values at the indexes from each * dimension. * * @param indexesToKeep to get values from * @return a new TimeSeriesInstance */ public TimeSeriesInstance getVSlice(List<Integer> indexesToKeep) { return new TimeSeriesInstance(getVSliceArray(indexesToKeep), this); } /** * Returns a TimeSeriesInstance containing values at the indexes from each * dimension. * * @param indexesToKeep to get values from * @return a new TimeSeriesInstance */ public TimeSeriesInstance getVSlice(int[] indexesToKeep) { return getVSlice(Arrays.stream(indexesToKeep).boxed().collect(Collectors.toList())); } /** * Returns a TimeSeriesInstance containing values at the index from each * dimension. * * @param index to get values from * @return a new TimeSeriesInstance */ public TimeSeriesInstance getVSlice(int index) { return getVSlice(new int[]{index}); } public TimeSeries getHSliceTS(int index) { return getHSliceTS(Collections.singletonList(index)).get(0); } public List<TimeSeries> getHSliceTS(List<Integer> indices) { final List<TimeSeries> result = new ArrayList<>(); for(Integer i : indices) { result.add(seriesDimensions.get(i)); } return result; } public List<TimeSeries> getHSliceTS(int fromInclusive, int toExclusive) { return seriesDimensions.subList(fromInclusive, toExclusive); } public TimeSeries[] getHSliceTS(int[] indices) { return getHSliceTS(Arrays.stream(indices).boxed().collect(Collectors.toList()).stream().mapToInt(i -> i).toArray()); } /** * Returns the series at the dimension passed. * * @param dim to get * @return list of series */ public List<Double> getHSliceList(int dim) { return seriesDimensions.get(dim).getSeries(); } /** * Returns the series at the dimension passed. * * @param dim to get * @return array of series */ public double[] getHSliceArray(int dim) { return seriesDimensions.get(dim).toValueArray(); } /** * Returns a 2d list: a list for each dimension index passed; containing the * series. * * TODO: not a clone. may need to be careful... * * @param dimensionsToKeep indexes * @return 2d list of series */ public List<List<Double>> getHSliceList(int[] dimensionsToKeep) { return getHSliceList(Arrays.stream(dimensionsToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 2d list: a list for each dimension index passed; containing the * series. * * TODO: not a clone. may need to be careful... * * @param dimensionsToKeep indexes * @return 2d list of series */ public List<List<Double>> getHSliceList(List<Integer> dimensionsToKeep) { List<List<Double>> out = new ArrayList<>(dimensionsToKeep.size()); for (Integer dim : dimensionsToKeep) out.add(seriesDimensions.get(dim).getSeries()); return out; } /** * Returns a 2d array: an array for each dimension index passed; containing the * series. * * @param dimensionsToKeep indexes * @return 2d array of series */ public double[][] getHSliceArray(int[] dimensionsToKeep) { return getHSliceArray(Arrays.stream(dimensionsToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 2d array: an array for each dimension index passed; containing the * series. * * @param dimensionsToKeep indexes * @return 2d array of series */ public double[][] getHSliceArray(List<Integer> dimensionsToKeep) { double[][] out = new double[dimensionsToKeep.size()][]; int i = 0; for (Integer dim : dimensionsToKeep) { out[i++] = seriesDimensions.get(dim).toValueArray(); } return out; } /** * Returns a TimeSeriesInstance containing each dimension of series from * indexes passed. * * @param dimensionsToKeep indexes * @return a new TimeSeriesInstance */ public TimeSeriesInstance getHSlice(List<Integer> dimensionsToKeep) { if(seriesDimensions.size() == 1 && dimensionsToKeep.size() == 1 && dimensionsToKeep.get(0) == 0) { return this; } final TimeSeriesInstance tsi = new TimeSeriesInstance(); tsi.seriesDimensions = new ArrayList<>(dimensionsToKeep.size()); for(Integer i : dimensionsToKeep) { tsi.seriesDimensions.add(seriesDimensions.get(i)); } tsi.targetValue = targetValue; tsi.labelIndex = labelIndex; tsi.dataChecks(); return tsi; } /** * Returns a TimeSeriesInstance containing each dimension of series from * indexes passed. * * @param dimensionsToKeep indexes * @return a new TimeSeriesInstance */ public TimeSeriesInstance getHSlice(int[] dimensionsToKeep) { return getHSlice(Arrays.stream(dimensionsToKeep).boxed().collect(Collectors.toList())); } /** * Returns a TimeSeriesInstance containing the dimension of series from * index passed. * * @param dimensionToKeep indexes * @return a new TimeSeriesInstance */ public TimeSeriesInstance getHSlice(int dimensionToKeep) { return getHSlice(new int[]{dimensionToKeep}); } /** * Returns a string containing: * num dimensions and class label index * then for each dimension: the series * * @return instance info, then for each dimension: series */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Num Dimensions: ").append(getNumDimensions()).append(" Class Label Index: ").append(labelIndex); for (TimeSeries ts : seriesDimensions) { sb.append(System.lineSeparator()); sb.append(ts.toString()); } return sb.toString(); } /** * Returns a 2d array, containing all dimensions and series values. * * @return each dimension of series */ public double[][] toValueArray() { double[][] output = new double[this.seriesDimensions.size()][]; for (int i = 0; i < output.length; ++i) { //clone the data so the underlying representation can't be modified output[i] = seriesDimensions.get(i).toValueArray(); } return output; } /** * Returns a transposed 2d array. * * @return transposed array */ public double[][] toTransposedArray() { double[][] original = this.toValueArray(); double[][] transposed = new double[maxLength][seriesDimensions.size()]; // for each dimension for (int i = 0; i < seriesDimensions.size(); i++) { // for each value in series for (int j = 0; j < maxLength; j++) { transposed[j][i] = original[i][j]; } } return transposed; } /** * Returns the TimeSeries object at the index. * * @param index to get * @return TimeSeries object */ public TimeSeries get(int index) { return this.seriesDimensions.get(index); } /** * Returns the target value. * * @return target value */ public double getTargetValue() { return targetValue; } /** * Returns an iterator, iterating over the each dimension of series. * * @return dimension iterator */ @Override public Iterator<TimeSeries> iterator() { return seriesDimensions.iterator(); } /** * Returns a Stream of TimeSeries objects. * * @return Stream of TimeSeries. */ public Stream<TimeSeries> stream() { return seriesDimensions.stream(); } /** * Returns a new TimeSeriesInstance object containing the dimensions from * the specified start, inclusive, and end, exclusive. * * @param startInclusive index of dimension to start from (inclusive) * @param endExclusive index of dimension to end at (exclusive) * @return new TimeSeriesInstance object */ public TimeSeriesInstance getHSlice(int startInclusive, int endExclusive) { // copy construct a new inst final TimeSeriesInstance tsi = new TimeSeriesInstance(); tsi.labelIndex = labelIndex; tsi.targetValue = targetValue; // trim current data to a subset tsi.seriesDimensions = seriesDimensions.subList(startInclusive, endExclusive); tsi.dataChecks(); return tsi; } /** * Returns a 2d list, containing the dimensions from the specified start, * inclusive and end, exclusive. * * @param startInclusive index of dimension to start from (inclusive) * @param endExclusive index of dimension to end at (exclusive) * @return 2d list */ public List<List<Double>> getHSliceList(int startInclusive, int endExclusive) { return seriesDimensions.subList(startInclusive, endExclusive).stream().map(TimeSeries::getSeries).collect(Collectors.toList()); } /** * Returns a 2d array, containing the dimensions from the specified start, * inclusive and end, exclusive. * * @param startInclusive index of dimension to start from (inclusive) * @param endExclusive index of dimension to end at (exclusive) * @return 2d array */ public double[][] getHSliceArray(int startInclusive, int endExclusive) { return getHSliceList(startInclusive, endExclusive).stream().map(dim -> dim.stream().mapToDouble(d -> d).toArray()).toArray(double[][]::new); } /** * Returns a 2d list, containing all dimensions but series cut between the * specified start, inclusive, and end, exclusive. * * @param startInclusive index to start from (inclusive) * @param endExclusive index to end from (exclusive) * @return 2d list */ public List<List<Double>> getVSliceList(int startInclusive, int endExclusive) { return seriesDimensions.stream().map(dim -> dim.getVSliceList(startInclusive, endExclusive)).collect(Collectors.toList()); } /** * Returns a 2d array, containing all dimensions but series cut between the * specified start, inclusive, and end, exclusive. * * @param startInclusive index to start from (inclusive) * @param endExclusive index to end from (exclusive) * @return 2d array */ public double[][] getVSliceArray(int startInclusive, int endExclusive) { return getVSliceList(startInclusive, endExclusive).stream().map(dim -> dim.stream().mapToDouble(d -> d).toArray()).toArray(double[][]::new); } /** * Returns a new TimeSeriesInstance object, containing all dimensions but * series cut between the specified start, inclusive, and end, exclusive. * * @param startInclusive index to start from (inclusive) * @param endExclusive index to end from (exclusive) * @return new TimeSeriesInstance object */ public TimeSeriesInstance getVSlice(int startInclusive, int endExclusive) { // copy construct a new inst final TimeSeriesInstance tsi = new TimeSeriesInstance(); // trim current data to a subset tsi.seriesDimensions = seriesDimensions.stream().map(dim -> dim.getVSlice(startInclusive, endExclusive)).collect(Collectors.toList()); tsi.labelIndex = labelIndex; tsi.targetValue = labelIndex; tsi.dataChecks(); return tsi; } /** * Returns whether a TimeSeriesInstance object is equal to another based if * label index is equal, target value is equal and series are equal. * * @param other object * @return true if equal, false if not */ @Override public boolean equals(final Object other) { if (!(other instanceof TimeSeriesInstance)) { return false; } final TimeSeriesInstance that = (TimeSeriesInstance) other; return labelIndex == that.labelIndex && Double.compare(that.targetValue, targetValue) == 0 && seriesDimensions.equals(that.seriesDimensions); } /** * Returns an int of the hash code based on the series and label index. * * @return hash code */ @Override public int hashCode() { return Objects.hash(seriesDimensions, labelIndex); } /** * Returns whether data has label index. * * @return true if label index is set, fasle if not */ public boolean isLabelled() { // is labelled if label index points to a class label return labelIndex >= 0; } /** * Returns whether data is regressed. * * @return true if regressed, false if not */ public boolean isRegressed() { // is regressed if the target value is set return !Double.isNaN(targetValue); } /** * Returns whether data is for a classification problem. * * @return true if classification problem, false if not */ public boolean isClassificationProblem() { // if a set of class labels are set then it's a classification problem return labelIndex >= 0; } /** * Returns whether data is for a regression problem. * * @return true if regression problem, false if not */ public boolean isRegressionProblem() { return !isClassificationProblem(); } }
26,784
29.334088
148
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TimeSeriesInstanceTest.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; import org.junit.Before; import org.junit.Test; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; public class TimeSeriesInstanceTest { private double[][] arrayA; private List<List<Double>> listA; private double[][] arrayB; private List<List<Double>> listB; private String[] classLabels; private String classLabelA; private String classLabelB; private int classLabelAIndex; private int classLabelBIndex; private TimeSeriesInstance instA; private TimeSeriesInstance instB; @Before public void before() { arrayA = new double[][] { {1,2,3,4}, {5,6,7,8} }; listA = Arrays.stream(arrayA).map(a -> Arrays.stream(a).boxed().collect(Collectors.toList())).collect(Collectors.toList()); arrayB = new double[][] { {9,10,11,12}, {13,14,15,16} }; listB = Arrays.stream(arrayB).map(a -> Arrays.stream(a).boxed().collect(Collectors.toList())).collect(Collectors.toList()); classLabelA = "A"; classLabelB = "B"; classLabels = new String[] {classLabelA, classLabelB}; classLabelAIndex = 0; classLabelBIndex = 1; instA = new TimeSeriesInstance(arrayA, classLabelAIndex, classLabels); instB = new TimeSeriesInstance(arrayB, classLabelBIndex, classLabels); } @Test public void testCtorArray() { instB = new TimeSeriesInstance(arrayB); assertEquals(-1, instB.getLabelIndex()); assertEquals(Double.NaN, instB.getTargetValue(), 0d); assert2DArrayEquals(arrayB, instB.toValueArray()); } @Test public void testCtorArrayLabelled() { instB = new TimeSeriesInstance(arrayB, classLabelBIndex, classLabels); assertEquals(classLabelBIndex, instB.getLabelIndex()); assertEquals(classLabelBIndex, instB.getTargetValue(), 0d); assert2DArrayEquals(arrayB, instB.toValueArray()); } @Test public void testToValueArrayA() { final double[][] array = instA.toValueArray(); for(int i = 0; i < array.length; i++) { assertArrayEquals(arrayA[i], array[i], 0d); } } @Test public void testToValueArrayB() { final double[][] array = instB.toValueArray(); for(int i = 0; i < array.length; i++) { assertArrayEquals(arrayB[i], array[i], 0d); } } @Test public void testGet() { for(int i = 0; i < arrayA.length; i++) { assertArrayEquals(arrayA[i], instA.get(i).toValueArray(), 0d); assertArrayEquals(arrayB[i], instB.get(i).toValueArray(), 0d); } } @Test public void testNumDimensions() { assertEquals(arrayA.length, instA.getNumDimensions()); assertEquals(arrayB.length, instB.getNumDimensions()); } @Test public void testCtorArrayRegressed() { // assertEquals(instA, new TimeSeriesInstance(array, 3d)); // todo } @Test public void testCtorList() { instB = new TimeSeriesInstance(listB); assertEquals(-1, instB.getLabelIndex()); assertEquals(Double.NaN, instB.getTargetValue(), 0d); assert2DArrayEquals(arrayB, instB.toValueArray()); } @Test public void testCtorListRegressed() { // instB = new TimeSeriesInstance(list, classLabelBIndex, classLabels); // assertEquals(instA, instB); // todo } @Test public void testClassLabel() { assertEquals(classLabelAIndex, instA.getLabelIndex()); assertEquals(classLabelBIndex, instB.getLabelIndex()); } // todo test target value (do in ctors?) // todo test hslice // todo test vslice // todo test metadata / stats public static void assert2DArrayEquals(double[][] expected, double[][] actual) { for(int i = 0; i < Math.max(expected.length, actual.length); i++) { assertArrayEquals(expected[i], actual[i], 0d); } } }
4,965
32.328859
131
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TimeSeriesInstances.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; import java.io.Serializable; import java.util.*; import java.util.stream.Collectors; import java.util.stream.Stream; /** * Data structure able to handle unequal length, unequally spaced, univariate or * multivariate time series. * * @author Aaron Bostrom, 2020 */ public class TimeSeriesInstances implements Iterable<TimeSeriesInstance>, Serializable { /* Meta Information */ private String description = ""; private String problemName = "default"; private boolean isEquallySpaced = true; private boolean hasMissing; private boolean isEqualLength; private boolean isMultivariate; private boolean hasTimeStamps; // this could be by dimension, so could be a list. private int minLength; private int maxLength; private int maxNumDimensions; /** * Returns the highest number of dimensions from the instances in the data. * * @return highest number of dimensions */ public int getMaxNumDimensions() { return maxNumDimensions; } /** * Returns the problem name. * * @return String problem name */ public String getProblemName() { return problemName; } /** * Returns whether the data has time stamps or not. * * @return boolean true if has time stamps, false if not */ public boolean hasTimeStamps() { return hasTimeStamps; } /** * Returns whether data has missing values in. * * @return true if missing values, false if not */ public boolean hasMissing() { return hasMissing; } /** * Returns whether data is equally spaced. * * @return true if equally spaced, false if not */ public boolean isEquallySpaced() { return isEquallySpaced; } /** * Returns whether data is multivariate. * * @return true if multivariate, false if not */ public boolean isMultivariate() { return isMultivariate; } /** * Returns whether data is equal length. * * @return true if equal length, false if not */ public boolean isEqualLength() { return isEqualLength; } /** * Returns the minimum length of the data. * * @return minimum length */ public int getMinLength() { return minLength; } /** * Returns the maximum length of the data. * * @return maximum length */ public int getMaxLength() { return maxLength; } /** * Returns the number of class labels in the data. * * @return number of class labels */ public int numClasses() { return classLabels.length; } /** * Sets the problem name to the one passed. * * @param problemName to set */ public void setProblemName(String problemName) { this.problemName = problemName; } /** * Returns the description of the data if set, or null if not. * * @return description of data */ public String getDescription() { return description; } /** * Sets the description of the data. * * @param description of data */ public void setDescription(String description) { this.description = description; } /* End Meta Information */ private List<TimeSeriesInstance> seriesCollection = new ArrayList<>(); // mapping for class labels. so ["apple","orange"] => [0,1] // this could be optional for example regression problems. public static String[] EMPTY_CLASS_LABELS = new String[0]; private String[] classLabels = EMPTY_CLASS_LABELS; private int[] classCounts; public TimeSeriesInstances(final String[] classLabels) { this.classLabels = classLabels; minLength = Integer.MAX_VALUE; maxLength = 0; maxNumDimensions = 0; } public TimeSeriesInstances(final List<? extends List<? extends List<Double>>> rawData, List<Double> targetValues) { int index = 0; for (final List<? extends List<Double>> series : rawData) { //using the add function means all stats should be correctly counted. seriesCollection.add(new TimeSeriesInstance(series, targetValues.get(index++))); } dataChecks(); } public TimeSeriesInstances(final List<? extends List<? extends List<Double>>> rawData, String[] classLabels, final List<Double> labelIndices) { this(rawData, labelIndices.stream().map(TimeSeriesInstance::discretiseLabelIndex).collect(Collectors.toList()), classLabels); } public TimeSeriesInstances(final List<? extends List<? extends List<Double>>> rawData, final List<Integer> labelIndexes, String[] classLabels) { this.classLabels = classLabels; int index = 0; for (final List<? extends List<Double>> series : rawData) { //using the add function means all stats should be correctly counted. seriesCollection.add(new TimeSeriesInstance(series, labelIndexes.get(index++).intValue())); } dataChecks(); } public TimeSeriesInstances(double[][][] rawData, double[] labelIndices, String[] labels) { this(rawData, Arrays.stream(labelIndices).mapToInt(TimeSeriesInstance::discretiseLabelIndex).toArray(), labels); } public TimeSeriesInstances(double[][][] rawData, double[] targetValues) { int index = 0; for (double[][] series : rawData) { //using the add function means all stats should be correctly counted. seriesCollection.add(new TimeSeriesInstance(series, targetValues[index++])); } } public TimeSeriesInstances(final double[][][] rawData, int[] labelIndexes, String[] labels) { classLabels = labels; int index = 0; for (double[][] series : rawData) { //using the add function means all stats should be correctly counted. seriesCollection.add(new TimeSeriesInstance(series, labelIndexes[index++], classLabels)); } dataChecks(); } public TimeSeriesInstances(List<? extends TimeSeriesInstance> data) { this(data, EMPTY_CLASS_LABELS); } public TimeSeriesInstances(List<? extends TimeSeriesInstance> data, String[] classLabels) { this.classLabels = classLabels; seriesCollection.addAll(data); dataChecks(); } public TimeSeriesInstances(TimeSeriesInstance[] data, String[] classLabels) { this(Arrays.asList(data), classLabels); } public TimeSeriesInstances(TimeSeriesInstance[] data) { this(Arrays.asList(data)); } private void dataChecks() { if (seriesCollection == null) { throw new NullPointerException("no series collection"); } if (classLabels == null) { throw new NullPointerException("no class labels"); } calculateLengthBounds(); calculateIfMissing(); calculateIfMultivariate(); calculateNumDimensions(); } private void calculateClassCounts() { classCounts = new int[classLabels.length]; for (TimeSeriesInstance inst : seriesCollection) { classCounts[inst.getLabelIndex()]++; } } private void calculateLengthBounds() { minLength = seriesCollection.stream().mapToInt(TimeSeriesInstance::getMinLength).min().orElse(-1); maxLength = seriesCollection.stream().mapToInt(TimeSeriesInstance::getMaxLength).max().orElse(-1); isEqualLength = minLength == maxLength; } private void calculateNumDimensions() { maxNumDimensions = seriesCollection.stream().mapToInt(TimeSeriesInstance::getNumDimensions).max().getAsInt(); } private void calculateIfMultivariate() { isMultivariate = seriesCollection.stream().map(TimeSeriesInstance::isMultivariate).anyMatch(Boolean::booleanValue); } private void calculateIfMissing() { // if any of the instance have a missing value then this is true. hasMissing = seriesCollection.stream().map(TimeSeriesInstance::hasMissing).anyMatch(Boolean::booleanValue); } /** * Returns a String array containing the class labels. * * @return array containing class labels */ public String[] getClassLabels() { return classLabels; } /** * Returns a string containing all of the class labels separated by a space. * * @return class labels formatted */ public String getClassLabelsFormatted() { StringBuilder output = new StringBuilder(" "); for (String s : classLabels) output.append(s).append(" "); return output.toString(); } /** * Returns an array containing the counter of each class. * * @return an array of class counts */ public int[] getClassCounts() { calculateClassCounts(); return classCounts; } /** * Adds a new TimeSeriesInstance to the data. * * @param newSeries to add */ public void add(final TimeSeriesInstance newSeries) { seriesCollection.add(newSeries); //guard for if we're going to force update classCounts after. if (classCounts != null && newSeries.getLabelIndex() < classCounts.length) classCounts[newSeries.getLabelIndex()]++; if(seriesCollection.size() == 1) { // was empty / this is the first inst being added to the list // therefore metadata is set to -1's // need to change minLength to a very large number else the -1 invalid value is always the minimum length minLength = Integer.MAX_VALUE; } minLength = Math.min(newSeries.getMinLength(), minLength); maxLength = Math.max(newSeries.getMaxLength(), maxLength); maxNumDimensions = Math.max(newSeries.getNumDimensions(), maxNumDimensions); hasMissing |= newSeries.hasMissing(); isEqualLength = minLength == maxLength; isMultivariate |= newSeries.isMultivariate(); } /** * Returns a string containing: * class labels * then for each dimension: * - num dimensions, class label index * - the series * * @return class labels, then for instance: num dimensions, class label index * and the series */ @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("Labels: [").append(classLabels[0]); for (int i = 1; i < classLabels.length; i++) { sb.append(','); sb.append(classLabels[i]); } sb.append(']').append(System.lineSeparator()); for (final TimeSeriesInstance series : seriesCollection) { sb.append(series.toString()); sb.append(System.lineSeparator()); } return sb.toString(); } /** * Returns a 3d array, first index is which instance, 2nd index is which * dimension in the instance, 3rd index is the array of values in that dimension. * * @return values in 3d array format */ public double[][][] toValueArray() { final double[][][] output = new double[seriesCollection.size()][][]; for (int i = 0; i < output.length; ++i) { // clone the data so the underlying representation can't be modified output[i] = seriesCollection.get(i).toValueArray(); } return output; } /** * Returns an array containing each class index. * * @return array of class indexes */ public int[] getClassIndexes() { int[] out = new int[numInstances()]; int index = 0; for (TimeSeriesInstance inst : seriesCollection) { out[index++] = inst.getLabelIndex(); } return out; } /** * Returns an array containing the value(s) from each instance at the index * passed. * * Assumes equal number of dimensions. * * @param index to get data from * @return array of values at index */ public double[] getVSliceArray(int index) { double[] out = new double[numInstances() * seriesCollection.get(0).getNumDimensions()]; int i = 0; for (TimeSeriesInstance inst : seriesCollection) { for (TimeSeries ts : inst) // if the index isn't always valid, populate with NaN values. out[i++] = ts.hasValidValueAt(index) ? ts.getValue(index) : Double.NaN; } return out; } /** * Returns a 3d List containing the values at the indexes passes from each * instance, including all dimensions within each instance. * * @param indexesToKeep to get * @return 3d list of values at indexes passed */ public List<List<List<Double>>> getVSliceList(int[] indexesToKeep) { return getVSliceList(Arrays.stream(indexesToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 3d List containing the values at the indexes passes from each * instance, including all dimensions within each instance. * * @param indexesToKeep to get * @return 3d list of values at indexes passed */ public List<List<List<Double>>> getVSliceList(List<Integer> indexesToKeep) { List<List<List<Double>>> out = new ArrayList<>(numInstances()); for (TimeSeriesInstance inst : seriesCollection) { out.add(inst.getVSliceList(indexesToKeep)); } return out; } /** * Returns a 3d array containing the values at the indexes passes from each * instance, including all dimensions within each instance. * * @param indexesToKeep to get * @return 3d array of values at indexes passed */ public double[][][] getVSliceArray(int[] indexesToKeep) { return getVSliceArray(Arrays.stream(indexesToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 3d array containing the values at the indexes passes from each * instance, including all dimensions within each instance. * * @param indexesToKeep to get * @return 3d array of values at indexes passed */ public double[][][] getVSliceArray(List<Integer> indexesToKeep) { double[][][] out = new double[numInstances()][][]; int i = 0; for (TimeSeriesInstance inst : seriesCollection) { out[i++] = inst.getVSliceArray(indexesToKeep); } return out; } /** * Returns a 2d array containing the values at each instance at the dimension * passed. e.g. passing '2' will give the values from each instance at the 3rd * dimension. * * Assumes equal number of dimensions. * * @param dimensionToKeep to get * @return 2d array of values */ public double[][] getHSliceArray(int dimensionToKeep) { double[][] out = new double[numInstances()][]; int i = 0; for (TimeSeriesInstance inst : seriesCollection) { // if the index isn't always valid, populate with NaN values. out[i++] = inst.getHSliceArray(dimensionToKeep); } return out; } /** * Returns a 3d list containing the values for each instance, at the * dimensions passed. e.g. '[0, 1]' would return the values for every instance * at the first and second dimensions. * * @param dimensionsToKeep to get * @return 3d list of values */ public List<List<List<Double>>> getHSliceList(int[] dimensionsToKeep) { return getHSliceList(Arrays.stream(dimensionsToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 3d list containing the values for each instance, at the * dimensions passed. e.g. '[0, 1]' would return the values for every instance * at the first and second dimensions. * * @param dimensionsToKeep to get * @return 3d list of values */ public List<List<List<Double>>> getHSliceList(List<Integer> dimensionsToKeep) { List<List<List<Double>>> out = new ArrayList<>(numInstances()); for (TimeSeriesInstance inst : seriesCollection) { out.add(inst.getHSliceList(dimensionsToKeep)); } return out; } /** * Returns a 3d array containing the values for each instance, at the * dimensions passed. e.g. '[0, 1]' would return the values for every instance * at the first and second dimensions. * * @param dimensionsToKeep to get * @return 3d array of values */ public double[][][] getHSliceArray(int[] dimensionsToKeep) { return getHSliceArray(Arrays.stream(dimensionsToKeep).boxed().collect(Collectors.toList())); } /** * Returns a 3d array containing the values for each instance, at the * dimensions passed. e.g. '[0, 1]' would return the values for every instance * at the first and second dimensions. * * @param dimensionsToKeep to get * @return 3d array of values */ public double[][][] getHSliceArray(List<Integer> dimensionsToKeep) { double[][][] out = new double[numInstances()][][]; int i = 0; for (TimeSeriesInstance inst : seriesCollection) { out[i++] = inst.getHSliceArray(dimensionsToKeep); } return out; } /** * Returns the TimeSeriesInstance at the index given. * * @param index to get * @return TimeSeriesInstance at index */ public TimeSeriesInstance get(final int index) { return seriesCollection.get(index); } /** * Returns all of the TimeSeriesInstance objects inside. * * @return list of TimeSeriesInstance */ public List<TimeSeriesInstance> getAll() { return seriesCollection; } /** * Returns the number of instances inside. * * @return number of instances. */ public int numInstances() { return seriesCollection.size(); } /** * Creates a Histogram of lengths of the time series * * @return Histogram of lengths */ public Map<Integer, Integer> getHistogramOfLengths() { Map<Integer, Integer> out = new TreeMap<>(); for (TimeSeriesInstance inst : seriesCollection) { for (TimeSeries ts : inst) { out.merge(ts.getSeriesLength(), 1, Integer::sum); } } return out; } /** * Returns an iterator, iterating over the each instance. * * @return instance iterator */ @Override public Iterator<TimeSeriesInstance> iterator() { return seriesCollection.iterator(); } /** * Returns a Stream of TimeSeriesInstance objects. * * @return Stream of TimeSeries. */ public Stream<TimeSeriesInstance> stream() { return seriesCollection.stream(); } /** * Returns a 3d List containing the values, for each instance, for each dimension, * between the start (inclusive) and end (exclusive). * * @param startInclusive to start at (inclusive) * @param endExclusive to end at (exclusive) * @return 3d list of values */ public List<List<List<Double>>> getVSliceList(int startInclusive, int endExclusive) { return seriesCollection.stream().map(inst -> inst.getVSliceList(startInclusive, endExclusive)).collect(Collectors.toList()); } /** * Returns a TimeSeriesInstances object containing instances with values, * between the start (inclusive) and end (exclusive). * * @param startInclusive to start at (inclusive) * @param endExclusive to end at (exclusive) * @return TimeSeriesInstances object */ public TimeSeriesInstances getVSlice(int startInclusive, int endExclusive) { final TimeSeriesInstances tsi = new TimeSeriesInstances(classLabels); tsi.seriesCollection = seriesCollection.stream().map(inst -> inst.getVSlice(startInclusive, endExclusive)).collect(Collectors.toList()); tsi.dataChecks(); return tsi; } /** * Returns a 3d array containing the values, for each instance, for each dimension, * between the start (inclusive) and end (exclusive). * * @param startInclusive to start at (inclusive) * @param endExclusive to end at (exclusive) * @return 3d array of values */ public double[][][] getVSliceArray(int startInclusive, int endExclusive) { return seriesCollection.stream().map(inst -> inst.getVSliceArray(startInclusive, endExclusive)).toArray(double[][][]::new); } /** * Returns a 3d list containing the values, for each instance, from the start * dimension (inclusive) and end dimension (exclusive) given. * * @param startInclusive to start at (inclusive) * @param endExclusive to end at (exclusive) * @return 3d list of values */ public List<List<List<Double>>> getHSliceList(int startInclusive, int endExclusive) { return seriesCollection.stream().map(inst -> inst.getHSliceList(startInclusive, endExclusive)).collect(Collectors.toList()); } /** * Returns a 3d array containing the values, for each instance, from the start * dimension (inclusive) and end dimension (exclusive) given. * * @param startInclusive to start at (inclusive) * @param endExclusive to end at (exclusive) * @return 3d array of values */ public double[][][] getHSliceArray(int startInclusive, int endExclusive) { return seriesCollection.stream().map(inst -> inst.getHSliceArray(startInclusive, endExclusive)).toArray(double[][][]::new); } /** * Returns a TimeSeriesInstances object containing instances with values, * from the start dimension (inclusive) and end dimension (exclusive) * * @param startInclusive to start at (inclusive) * @param endExclusive to end at (exclusive) * @return TimeSeriesInstances object */ public TimeSeriesInstances getHSlice(int startInclusive, int endExclusive) { final TimeSeriesInstances tsi = new TimeSeriesInstances(classLabels); tsi.seriesCollection = seriesCollection.stream().map(inst -> inst.getHSlice(startInclusive, endExclusive)).collect(Collectors.toList()); tsi.dataChecks(); return tsi; } /** * Returns whether a TimeSeriesInstances object is equal to another based if * series collection is the exact same and the class labels are the exact same. * * @param other object * @return true if equal, false if not */ @Override public boolean equals(final Object other) { if (!(other instanceof TimeSeriesInstances)) { return false; } final TimeSeriesInstances that = (TimeSeriesInstances) other; return Objects.equals(seriesCollection, that.seriesCollection) && Arrays.equals(classLabels, that.classLabels); } /** * Returns an int of the hash code based on the series collection and class * labels. * * @return hash code */ @Override public int hashCode() { return Objects.hash(seriesCollection, classLabels); } /** * Returns whether data is for a classification problem. * * @return true if classification problem, false if not */ public boolean isClassificationProblem() { // if a set of class labels are set then it's a classification problem return classLabels.length >= 0; } /** * Returns whether data is for a regression problem. * * @return true if regression problem, false if not */ public boolean isRegressionProblem() { return !isClassificationProblem(); } /** * Get inst index binned by class. * I.e. class 1 contains {3,5,6} and class 2 contains {1,2,4} * * @return */ public List<List<Integer>> getInstIndicesByClass() { final List<List<Integer>> bins = new ArrayList<>(numClasses()); for(int i = 0; i < numClasses(); i++) { bins.add(new ArrayList<>()); } for(int i = 0; i < numInstances(); i++) { final TimeSeriesInstance inst = get(i); final int labelIndex = inst.getLabelIndex(); final List<Integer> bin = bins.get(labelIndex); bin.add(i); } return bins; } public List<TimeSeriesInstances> getInstsByClass() { return getInstIndicesByClass().stream().map( indices -> new TimeSeriesInstances( indices.stream().map(this::get).collect(Collectors.toList()), getClassLabels())).collect(Collectors.toList()); } public void addAll(Iterable<TimeSeriesInstance> insts) { for(TimeSeriesInstance inst : insts) { add(inst); } } }
25,729
31.446406
148
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TimeSeriesInstancesTest.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; public class TimeSeriesInstancesTest { }
835
37
76
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/TimeSeriesTest.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers; import org.junit.Before; import org.junit.Test; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static org.junit.Assert.*; public class TimeSeriesTest { private double first; private double second; private double third; private double fourth; private double[] array; private List<Double> list; private TimeSeries ts; @Before public void before() { first = 8.9; second = -2.4; third = Double.NaN; fourth = 6.3; array = new double[] {first, second, third, fourth}; list = Arrays.stream(array).boxed().collect(Collectors.toList()); ts = new TimeSeries(array); } @Test public void testCtorArray() { ts = new TimeSeries(array); assertArrayEquals(array, ts.toValueArray(), 0d); } @Test public void testCtorList() { ts = new TimeSeries(list); assertEquals(list, ts.getSeries()); } @Test public void testCopyCtor() { assertEquals(ts, new TimeSeries(ts)); } @Test public void testSize() { assertEquals(array.length, ts.getSeriesLength()); } @Test public void testValidValueAt() { assertTrue(ts.hasValidValueAt(0)); assertTrue(ts.hasValidValueAt(1)); assertFalse(ts.hasValidValueAt(2)); assertTrue(ts.hasValidValueAt(3)); } @Test public void testGet() { for(int i = 0; i < ts.getSeriesLength(); i++) { assertEquals(new Double(array[i]), ts.get(i)); assertEquals(array[i], ts.getValue(i), 0d); if(Double.isNaN(array[i])) { assertEquals(TimeSeries.DEFAULT_VALUE, ts.getOrDefault(i), 0d); } } } // todo test hslice // todo test vslice // todo test metadata / stats }
2,696
26.804124
79
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/ts_fileIO/TSReader.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers.ts_fileIO; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.Reader; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Scanner; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; /** * File for reading sktime format data into TimeSeriesInstances object * * @author Aaron Bostrom, pushed 22/4/2020 */ public class TSReader { // need to change this to a map function. public static final String PROBLEM_NAME = "@problemName"; public static final String TIME_STAMPS = "@timeStamps"; public static final String CLASS_LABEL = "@classLabel"; public static final String UNIVARIATE = "@univariate"; public static final String MISSING = "@missing"; public static final String DATA = "@data"; private HashMap<String, String> variables; private final Scanner m_scanner; private String currentToken; private int m_Lines; private String description; private String problemName; private boolean univariate; private boolean missing; private boolean timeStamps; private boolean classLabel; private List<String> classLabels; TimeSeriesInstances m_data; private List<TimeSeriesInstance> raw_data; public TSReader(Reader reader) throws IOException { variables = new HashMap<>(); //m_Tokenizer = new StreamTokenizer(reader); m_scanner = new Scanner(reader); m_scanner.useDelimiter("[\\s,]+"); readHeader(); System.out.println(variables); System.out.println(classLabels); CreateTimeSeriesInstances(); } private void CreateTimeSeriesInstances() throws IOException { // read each line and extract a data Instance raw_data = new ArrayList<>(); // extract the multivariate series, and the possible label. while (m_scanner.hasNextLine()) { String line = m_scanner.nextLine(); Scanner lineScanner = new Scanner(line); lineScanner.useDelimiter("((?=[:,])|(?<=[:,]))"); raw_data.add(readMultivariateInstance(lineScanner)); lineScanner.close(); } // create timeseries instances object. m_data = new TimeSeriesInstances(raw_data, classLabels.toArray(new String[classLabels.size()])); m_data.setProblemName(problemName); // m_data.setHasTimeStamps(timeStamps); // todo this has been temp removed, should be computed from the data m_data.setDescription(description); } public TimeSeriesInstances GetInstances() { return m_data; } private TimeSeriesInstance readMultivariateInstance(Scanner lineScanner) throws IOException { List<List<Double>> multi_timeSeries = new ArrayList<>(); String classValue = ""; ArrayList<Double> timeSeries = new ArrayList<>(); while (lineScanner.hasNext()){ getNextToken(lineScanner); // this means we're about to get the class value if (currentToken.equalsIgnoreCase(":") && classLabel) { // add the current time series to the list. multi_timeSeries.add(timeSeries); timeSeries = new ArrayList<>(); } else { double val; try{ val = Double.parseDouble(currentToken); }catch(NumberFormatException ex){ val = Double.NaN; } timeSeries.add(val); classValue = currentToken; } } // don't add the last series to the list, instead extract the first element and // figure out what the class value is. int classVal = classLabel ? classLabels.indexOf(classValue) : -1; return new TimeSeriesInstance(multi_timeSeries, classVal); } // this function reads upto the @data bit in the file. protected void readHeader() throws IOException { // first token should be @problem name. as we skip whitespace and comments. skipComments(); getNextToken(); do { if (currentToken.equalsIgnoreCase(CLASS_LABEL)) { ExtractClassLabels(); } else { variables.put(currentToken, getNextToken()); getNextToken(); } } while (!currentToken.equalsIgnoreCase(DATA)); // these are required. problemName = variables.get(PROBLEM_NAME); if (problemName == null) { errorMessage("keyword " + PROBLEM_NAME + " expected"); } if (variables.get(UNIVARIATE) == null) { errorMessage("keyword " + UNIVARIATE + " expected"); } else { univariate = Boolean.parseBoolean(variables.get(UNIVARIATE)); } // set optionals. if (variables.get(MISSING) != null) missing = Boolean.parseBoolean(variables.get(MISSING)); if (variables.get(TIME_STAMPS) != null) timeStamps = Boolean.parseBoolean(variables.get(TIME_STAMPS)); m_scanner.nextLine(); //clear our this bit. } private void ExtractClassLabels() throws IOException { classLabels = new ArrayList<>(); if(m_scanner.hasNextBoolean()) classLabel = m_scanner.nextBoolean(); if (!classLabel) return; while (!getNextToken().contains("@")){ classLabels.add(currentToken); } } protected void skipComments(){ while (m_scanner.findInLine("\\s*\\#.*") != null) {m_scanner.nextLine();} } /** * Gets next token, checking for a premature and of line. * * @throws IOException if it finds a premature end of line */ protected String getNextToken() throws IOException { return getNextToken(m_scanner); } protected String getNextToken(Scanner scanner) throws IOException { currentToken = scanner.next(); //recurse until we find a valid token. if (currentToken.equalsIgnoreCase(",")) getNextToken(scanner); //System.out.println("t: "+currentToken); return currentToken; } /** * Throws error message with line number and last token read. * * @param msg the error message to be thrown * @throws IOException containing the error message */ protected void errorMessage(String msg) throws IOException { String str = msg + ", read " + currentToken; if (m_Lines > 0) { int line = Integer.parseInt(str.replaceAll(".* line ", "")); str = str.replaceAll(" line .*", " line " + (m_Lines + line - 1)); } throw new IOException(str); } public static void main(String[] args) throws IOException { // String local_path = "D:\\Work\\Data\\Univariate_ts\\"; // String m_local_path = "D:\\Work\\Data\\Multivariate_ts\\"; String local_path = "Z:\\ArchiveData\\Univariate_ts\\"; String m_local_path = "Z:\\ArchiveData\\Multivariate_ts\\"; String[] paths = {/*local_path,*/ m_local_path}; for (String path : paths){ File dir = new File(path); for (File file : dir.listFiles()){ String filepath = path + file.getName() + "\\" + file.getName(); File f = new File(filepath + "_TRAIN" + ".ts"); long time = System.nanoTime(); TSReader ts_reader = new TSReader(new FileReader(f)); System.out.println("after: " + (System.nanoTime() - time)); } } } }
8,536
32.217899
115
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/ts_fileIO/TSWriter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers.ts_fileIO; import java.io.BufferedWriter; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.Writer; import java.io.*; import java.text.DecimalFormat; import java.util.Arrays; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; public class TSWriter { TimeSeriesInstances data; BufferedWriter writer; public void setData(TimeSeriesInstances dat){ data = dat; } DecimalFormat df = new DecimalFormat(".########"); public void setDestination(OutputStream output) { writer = new BufferedWriter(new OutputStreamWriter(output)); } public void setDesination(File output) throws FileNotFoundException { setDestination(new FileOutputStream(output)); } BufferedWriter getWriter(){ return writer; } public TSWriter(){ df.setMaximumFractionDigits(6); } public TSWriter(File output) throws IOException{ this(); setDesination(output); } public void writeBatch(){ PrintWriter outW = new PrintWriter(getWriter()); //writer header info first. outW.println("@problemName " + data.getProblemName()); outW.println("@timeStamps " + data.hasTimeStamps()); outW.println("@missing " + data.hasMissing()); outW.println("@univariate " + !data.isMultivariate()); outW.println("@dimensions " + data.getMaxNumDimensions()); outW.println("@equalLength " + data.isEqualLength()); outW.println("@seriesLength " + data.getMaxLength()); //outW.println("@classLabel " + ); outW.print("@classLabel "); outW.print(data.getClassLabels() != null && data.getClassLabels().length > 0); outW.println(data.getClassLabelsFormatted()); outW.println("@data"); //then writer data. StringBuilder sb = new StringBuilder(); for(TimeSeriesInstance inst : data){ for(TimeSeries ts : inst){ for(Double d : ts.getSeries()) sb.append(df.format(d.doubleValue())).append(","); sb.replace(sb.length()-1,sb.length(),":"); //we use colon to separate dimensions, overwriter the last comma. } sb.append(data.getClassLabels()[inst.getLabelIndex()]); //append the class label. sb.append("\n"); } outW.print(sb.toString()); outW.close(); } }
3,451
32.192308
124
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/utilities/Converter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ /** * @author Aaron Bostrom and George Oastler */ package tsml.data_containers.utilities; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import experiments.data.DatasetLoading; import org.apache.commons.lang3.ArrayUtils; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; public class Converter { public static boolean isMultivariate(Instances data) { return data.get(0).attribute(0).isRelationValued(); } public static boolean isUnivariate(Instances data) { return !isMultivariate(data); } public static TimeSeriesInstances fromArff(Instances data){ List<List<List<Double>>> raw_data = new ArrayList<>(data.numInstances()); List<Double> label_indexes = new ArrayList<>(data.numInstances()); //if no class attribute set, set it to the last one. if(data.classIndex() == -1) data.setClassIndex(data.numAttributes()-1); //we multivariate if(isMultivariate(data)){ for(int i=0; i<data.numInstances(); i++){ Instances timeseries = data.get(i).relationalValue(data.get(i).attribute(0)); //number of channels is numInstances raw_data.add(new ArrayList<>(timeseries.numInstances())); for(int j=0; j<timeseries.numInstances(); j++){ raw_data.get(i).add(new ArrayList<>(timeseries.numAttributes())); for(int k=0; k< timeseries.get(j).numAttributes(); k++){ raw_data.get(i).get(j).add(timeseries.get(j).value(k)); } } label_indexes.add(data.get(i).value(1)); } } else{ for(int i=0; i<data.numInstances(); i++){ //add dimension 0 raw_data.add(new ArrayList<>(1)); raw_data.get(i).add(new ArrayList<>(data.get(i).numAttributes()-1)); //remove class attribute. for(int j=0; j< data.get(i).numAttributes(); j++){ //skip class index. if(data.classIndex() == j) label_indexes.add(data.get(i).value(j)); else raw_data.get(i).get(0).add(data.get(i).value(j)); } } } // construct the output TimeSeriesInstances obj from raw data and labels final TimeSeriesInstances output; if(data.classAttribute().isNumeric()) { // regression problem. Assume label indices are regression target values output = new TimeSeriesInstances(raw_data, label_indexes); } else if(data.classAttribute().isNominal()) { // classification problem. Assume label indices point to a corresponding class String[] labels = new String[data.classAttribute().numValues()]; for(int i=0; i< labels.length; i++) labels[i] = data.classAttribute().value(i); output = new TimeSeriesInstances(raw_data, labels, label_indexes); } else { throw new IllegalArgumentException("cannot handle non-numeric and non-nominal labels"); } output.setProblemName(data.relationName()); return output; } public static TimeSeriesInstance fromArff(Instance instance) { final Instances data = new Instances(instance.dataset(), 1); data.add(instance); final TimeSeriesInstances tsInsts = fromArff(data); return tsInsts.get(0); } public static Instances toArff(TimeSeriesInstances data){ double[][][] values = data.toValueArray(); int[] classIndexes = data.getClassIndexes(); String[] classLabels = data.getClassLabels(); int numAttributes = data.getMaxLength(); int numChannels = data.getMaxNumDimensions(); if(data.isMultivariate()){ //create relational attributes. ArrayList<Attribute> relational_atts = createAttributes(numAttributes); Instances relationalHeader = new Instances("", relational_atts, numChannels); relationalHeader.setRelationName("relationalAtt"); //create the relational and class value attributes. ArrayList<Attribute> attributes = new ArrayList<>(); Attribute relational_att = new Attribute("relationalAtt", relationalHeader, 0); attributes.add(relational_att); attributes.add(new Attribute("ClassLabel", Arrays.stream(classLabels).collect(Collectors.toList()))); //create output data set. Instances output = new Instances(data.getProblemName(), attributes, data.numInstances()); for(int i=0; i < data.numInstances(); i++){ //create each row. //only two attribtues, relational and class. output.add(new DenseInstance(2)); //set relation for the dataset/ Instances relational = new Instances(relationalHeader, data.get(i).getNumDimensions()); //each dense instance is row/ which is actually a channel. for(int j=0; j< data.get(i).getNumDimensions(); j++){ double[] vals = new double[numAttributes]; System.arraycopy(values[i][j], 0, vals, 0, values[i][j].length); for(int k=values[i][j].length; k<numAttributes; k++) vals[k] = Double.NaN; //all missing values are NaN. relational.add(new DenseInstance(1.0, vals)); } int index = output.instance(i).attribute(0).addRelation(relational); //set the relational attribute. output.instance(i).setValue(0, index); //set class value. output.instance(i).setValue(1, (double)classIndexes[i]); } output.setClassIndex(output.numAttributes()-1); //System.out.println(relational); return output; } //if its not multivariate its univariate. ArrayList<Attribute> attributes = createAttributes(numAttributes); //add the class label at the end. attributes.add(new Attribute("ClassLabel", Arrays.stream(classLabels).collect(Collectors.toList()))); Instances output = new Instances(data.getProblemName(), attributes, data.numInstances()); output.setClassIndex(output.numAttributes() - 1); //create the Instance. for (int i = 0; i < data.numInstances(); i++) { //we know it's univariate so it has only one dimension. double[] vals = new double[numAttributes+1]; System.arraycopy(values[i][0], 0, vals, 0, values[i][0].length); for(int j=values[i][0].length; j<numAttributes; j++) vals[j] = Double.NaN; //all missing values are NaN. vals[vals.length-1] = classIndexes[i]; //put class val at the end. output.add(new DenseInstance(1.0, vals)); } return output; } public static Instance toArff(TimeSeriesInstance tsinst, String[] Labels) { final TimeSeriesInstances tsinsts = new TimeSeriesInstances(new TimeSeriesInstance[]{tsinst}, Labels); final Instances insts = toArff(tsinsts); return insts.get(0); } private static ArrayList<Attribute> createAttributes(int numAttributes) { ArrayList<Attribute> relational_atts = new ArrayList<>(); for (int i = 0; i < numAttributes; i++) { relational_atts.add(new Attribute(("TimeSeriesData_" + i).intern())); } return relational_atts; } public static void main(String[] args) throws Exception { final Instances[] instances = DatasetLoading.sampleBasicMotions(0); Instances insts = instances[0]; final TimeSeriesInstance tsinst = fromArff(insts.get(0)); System.out.println(tsinst); } }
9,116
41.802817
113
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/utilities/Splitter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers.utilities; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; //This class if for weird hacky dimension wise operations we need to do when interfacing with Weka classifiers //that can only take univariate data. public class Splitter{ /** * @param inst * @return List<TimeSeriesInstance> */ //splitty splitty. public static List<TimeSeriesInstance> splitTimeSeriesInstance(TimeSeriesInstance inst){ int[][] indexes = new int[inst.getNumDimensions()][1]; for(int i=0; i< indexes.length; i++) indexes[i] = new int[]{i}; return splitTimeSeriesInstance(inst, indexes); } /** * @param inst * @return List<TimeSeriesInstances> */ //horizontally slice into univariate TimeSeriesInstances. //can slice {{0},{1,2}} public static List<TimeSeriesInstance> splitTimeSeriesInstance(TimeSeriesInstance inst, int[][] slicingIndexes){ List<TimeSeriesInstance> output = new ArrayList<>(slicingIndexes.length); for(int[] i : slicingIndexes){ TimeSeriesInstance temp = inst.getHSlice(i); output.add(temp); } return output; } /** * @param inst * @return List<TimeSeriesInstances> */ //horizontally slice into univariate TimeSeriesInstances. //can slice {{0},{1,2}} public static List<TimeSeriesInstances> splitTimeSeriesInstances(TimeSeriesInstances inst, int[][] slicingIndexes){ List<TimeSeriesInstances> output = new ArrayList<>(inst.getMaxNumDimensions()); for(int[] i : slicingIndexes){ TimeSeriesInstances temp = new TimeSeriesInstances(inst.getHSliceArray(i), inst.getClassIndexes(), inst.getClassLabels()); output.add(temp); } return output; } public static List<TimeSeriesInstances> splitTimeSeriesInstances(TimeSeriesInstances inst){ int[][] indexes = new int[inst.getMaxNumDimensions()][]; for(int i=0; i< indexes.length; i++) indexes[i] = new int[]{i}; return splitTimeSeriesInstances(inst, indexes); } /** * @param inst_dims * @return TimeSeriesInstance */ //mergey mergey //could merge dimension slices like. {0,1}, {2}, {3,4} public static TimeSeriesInstance mergeTimeSeriesInstance(List<TimeSeriesInstance> inst_dims){ List<TimeSeries> ts_data = new ArrayList<>(); for(TimeSeriesInstance inst : inst_dims){ double[][] out = inst.toValueArray(); //concat the hslice. for(double[] o : out) ts_data.add(new TimeSeries(o)); } return new TimeSeriesInstance(inst_dims.get(0).getLabelIndex(), ts_data); } //could merge dimension slices like. {0,1}, {2}, {3,4} public static TimeSeriesInstances mergeTimeSeriesInstances(List<TimeSeriesInstances> inst_dims){ TimeSeriesInstances out = new TimeSeriesInstances(inst_dims.get(0).getClassLabels()); for ( int i=0; i<inst_dims.get(0).numInstances(); i++ ){ List<TimeSeriesInstance> single_instance = new ArrayList<>(); //each TSInstances is a HSlice of the data. for(TimeSeriesInstances dim : inst_dims){ single_instance.add(dim.get(i)); } out.add(mergeTimeSeriesInstance(single_instance)); } return out; } }
4,370
34.536585
134
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/utilities/TimeSeriesCollector.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers.utilities; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BinaryOperator; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collector; import java.util.stream.Collectors; import java.util.stream.Stream; public class TimeSeriesCollector implements Collector<Double, List<Double>, TimeSeriesSummaryStatistics> { @Override public BiConsumer<List<Double>, Double> accumulator() { return (list, val) -> list.add(val); } @Override public Set<Characteristics> characteristics() { HashSet<Characteristics> set = new HashSet<Characteristics>(); set.add(Characteristics.UNORDERED); return set; } //merge two lists in parallel. @Override public BinaryOperator<List<Double>> combiner() { return (list1, list2) -> Stream.concat(list1.stream(), list2.stream()).collect(Collectors.toList()); } @Override public Supplier<List<Double>> supplier() { return ArrayList<Double>::new; } @Override public Function<List<Double>, TimeSeriesSummaryStatistics> finisher() { return TimeSeriesSummaryStatistics::new; } }
2,077
32.516129
107
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/utilities/TimeSeriesResampler.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers.utilities; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import weka.core.Instances; public class TimeSeriesResampler { public static TrainTest resampleTrainTest(TimeSeriesInstances train, TimeSeriesInstances test, long seed){ if(seed == 0) return new TrainTest(train, test); //create combined list. List<TimeSeriesInstance> all = new ArrayList<>(train.numInstances() + test.numInstances()); all.addAll(train.getAll()); all.addAll(test.getAll()); int[] classCounts = train.getClassCounts(); //build the map. Map<Integer, List<TimeSeriesInstance>> classBins = new HashMap<>(); for(TimeSeriesInstance inst : all){ List<TimeSeriesInstance> values = classBins.computeIfAbsent(inst.getLabelIndex(), k -> new ArrayList<>()); values.add(inst); } Random r = new Random(seed); List<TimeSeriesInstance> new_train = new ArrayList<>(); List<TimeSeriesInstance> new_test = new ArrayList<>(); for(Integer classVal : classBins.keySet()){ int occurences = classCounts[classVal.intValue()]; List<TimeSeriesInstance> bin = classBins.get(classVal); randomize(bin,r); //randomise the bin. new_train.addAll(bin.subList(0,occurences));//copy the first portion of the bin into the train set new_test.addAll(bin.subList(occurences, bin.size()));//copy the remaining portion of the bin into the test set. } TimeSeriesInstances newTrain = new TimeSeriesInstances(new_train, train.getClassLabels()); TimeSeriesInstances newTest = new TimeSeriesInstances(new_test, test.getClassLabels()); // set problem name newTrain.setProblemName(train.getProblemName()); newTest.setProblemName(test.getProblemName()); // set description newTrain.setDescription(train.getDescription()); newTest.setDescription(test.getDescription()); // set class counts train.getClassCounts(); test.getClassCounts(); return new TrainTest(newTrain, newTest); } //this function is the one from Instances, want to mirror there shuffling algorithm. private static void randomize(List<TimeSeriesInstance> data, Random random) { for (int j = data.size() - 1; j > 0; j--) swap(data, j, random.nextInt(j+1)); } //this function is the same as private static void swap(List<TimeSeriesInstance> data, int i, int j){ TimeSeriesInstance in = data.get(i); data.set(i, data.get(j)); data.set(j, in); } public static class TrainTest{ public TrainTest(TimeSeriesInstances train, TimeSeriesInstances test) { this.train = train; this.test = test; } public TimeSeriesInstances train; public TimeSeriesInstances test; } }
3,921
35.654206
123
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/utilities/TimeSeriesStatsTools.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers.utilities; import tsml.data_containers.TimeSeries; public class TimeSeriesStatsTools { /** * @param ts * @return double */ public static double mean(TimeSeries ts){ return TimeSeriesSummaryStatistics.mean(ts); } /** * @param ts * @return double */ public static double std(TimeSeries ts){ double mean = TimeSeriesSummaryStatistics.mean(ts); return Math.sqrt(TimeSeriesSummaryStatistics.variance(ts, mean)); } /** * @param ts * @return TimeSeriesSummaryStatistics */ public static TimeSeriesSummaryStatistics getTimeSeriesSummaryStats(TimeSeries ts){ TimeSeriesSummaryStatistics stats = ts.getSeries().stream().collect(new TimeSeriesCollector()); return stats; } public static void main(String[] args) { double [] arr = {1.0, 2.0, Double.NaN, 3.0}; TimeSeries ts = new TimeSeries(arr); double actual = TimeSeriesStatsTools.mean(ts); double expected = 2.0; System.out.println("Actual " + actual + " expected " + expected); TimeSeriesSummaryStatistics stats1 = new TimeSeriesSummaryStatistics(ts.getSeries()); TimeSeriesSummaryStatistics stats2 = new TimeSeriesSummaryStatistics(ts); TimeSeriesSummaryStatistics stats3 = ts.getSeries().stream().collect(new TimeSeriesCollector()); TimeSeriesSummaryStatistics stats = TimeSeriesStatsTools.getTimeSeriesSummaryStats(ts); System.out.println(stats.getMean()); } }
2,343
30.675676
104
java
tsml-java
tsml-java-master/src/main/java/tsml/data_containers/utilities/TimeSeriesSummaryStatistics.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.data_containers.utilities; import java.util.List; import java.util.stream.Collectors; import java.util.stream.DoubleStream; import tsml.data_containers.TimeSeries; public class TimeSeriesSummaryStatistics { private double mean; private double sum; private double slope; private double variance; private double kurtosis; private double min; private double max; private double sumSq; private double skew; private double std; public TimeSeriesSummaryStatistics(double[] data) { // this method assume that there is no NaNs present. // use with care. calculateStats(data); } public TimeSeriesSummaryStatistics(TimeSeries data) { this(data.getSeries()); } public TimeSeriesSummaryStatistics(List<Double> data) { // calculate stats // strip out the NaNs, convert to an array of doubles. this(convert(data)); } /** * @param inst */ public void calculateStats(double[] inst) { max = max(inst); min = min(inst); sum = sum(inst); sumSq = sumSq(inst); mean = mean(inst); variance = variance(inst, mean); std = Math.sqrt(variance); skew = skew(inst, mean, std); kurtosis = kurtosis(inst, mean, std); slope = slope(inst, sum, sumSq, std); } /** * @param inst * @return double */ /* Surprised these don't exist */ public static double sum(double[] inst) { double sumSq = 0; for (double x : inst) { sumSq += x; } return sumSq; } /** * @param data * @return double */ public static double sum(List<Double> data){ return sum(convert(data)); } /** * @param ts * @return double */ public static double sum(TimeSeries ts){ return sum(ts.getSeries()); } /** * @param inst * @return double */ public static double sumSq(double[] inst) { double sumSq = 0; for (double x : inst) { sumSq += x * x; } return sumSq; } /** * @param data * @return double */ public static double sumSq(List<Double> data){ return sumSq(convert(data)); } /** * @param ts * @return double */ public static double sumSq(TimeSeries ts){ return sumSq(ts.getSeries()); } /** * @param inst * @return int */ public static int argmax(double[] inst) { double max = -999999999; int arg = -1; int j = 0; for (double x : inst) { if (x > max) { max = x; arg = j; } j++; } return arg; } /** * @param data * @return int */ public static int argmax(List<Double> data){ return argmax(convert(data)); } /** * @param ts * @return int */ public static int argmax(TimeSeries ts){ return argmax(ts.getSeries()); } /** * @param inst * @return double */ public static double max(double[] inst) { return inst[argmax(inst)]; } /** * @param data * @return double */ public static double max(List<Double> data){ return max(convert(data)); } /** * @param ts * @return double */ public static double max(TimeSeries ts){ return max(ts.getSeries()); } /** * @param inst * @return int */ public static int argmin(double[] inst) { double min = Double.MAX_VALUE; int arg = -1; int j = 0; for (double x : inst) { if (x < min) { min = x; arg = j; } j++; } return arg; } /** * @param data * @return int */ public static int argmin(List<Double> data){ return argmin(convert(data)); } /** * @param ts * @return int */ public static int argmin(TimeSeries ts){ return argmin(ts.getSeries()); } /** * @param inst * @return double */ public static double min(double[] inst) { return inst[argmin(inst)]; } /** * @param data * @return double */ public static double min(List<Double> data){ return min(convert(data)); } /** * @param ts * @return double */ public static double min(TimeSeries ts){ return min(ts.getSeries()); } /** * @param inst * @return double */ public static double mean(double[] inst) { double mean = 0; for (double x : inst) mean += x; return mean / (double) (inst.length); } /** * @param data * @return double */ public static double mean(List<Double> data){ return mean(convert(data)); } /** * @param ts * @return double */ public static double mean(TimeSeries ts){ return mean(ts.getSeries()); } /** * @param inst * @param mean * @return double */ public static double variance(double[] inst, double mean) { double var = 0; for (double x : inst) var += Math.pow(x - mean, 2); return var / (double) (inst.length); } /** * @param data * @param mean * @return double */ public static double variance(List<Double> data, double mean){ return variance(convert(data), mean); } /** * @param ts * @param mean * @return double */ public static double variance(TimeSeries ts, double mean){ return variance(ts.getSeries(), mean); } /** * @param inst * @param mean * @param std * @return double */ public static double kurtosis(double[] inst, double mean, double std) { double kurt = 0; for (double x : inst) kurt += Math.pow(x - mean, 4); kurt /= Math.pow(std, 4); return kurt / (double) (inst.length); } /** * @param data * @param mean * @param std * @return double */ public static double kurtosis(List<Double> data, double mean, double std){ return kurtosis(convert(data), mean, std); } /** * @param ts * @param mean * @param std * @return double */ public static double kurtosis(TimeSeries ts, double mean, double std){ return kurtosis(ts.getSeries(), mean, std); } /** * @param inst * @param mean * @param std * @return double */ public static double skew(double[] inst, double mean, double std) { double skew = 0; for (double x : inst) skew += Math.pow(x - mean, 3); skew /= Math.pow(std, 3); return skew / (double) (inst.length); } /** * @param data * @param mean * @param std * @return double */ public static double skew(List<Double> data, double mean, double std){ return skew(convert(data), mean, std); } /** * @param ts * @param mean * @param std * @return double */ public static double skew(TimeSeries ts, double mean, double std){ return skew(ts.getSeries(), mean, std); } /** * @param inst * @param sum * @param sumSq * @param std * @return double */ public static double slope(double[] inst, double sum, double sumSq, double std) { double sumXY = 0; for (int j = 0; j < inst.length; j++) { sumXY += inst[j] * j; } double length = inst.length; double sqsum = sum * sum; // slope double slope = sumXY - sqsum / length; double denom = sumSq - sqsum / length; if (denom != 0) slope /= denom; else slope = 0; return std != 0 ? slope : 0; } /** * @param data * @param sum * @param sumSq * @param std * @return double */ public static double slope(List<Double> data, double sum, double sumSq, double std){ return slope(convert(data), sum, sumSq, std); } /** * @param ts * @param sum * @param sumSq * @param std * @return double */ public static double slope(TimeSeries ts, double sum, double sumSq, double std){ return slope(ts.getSeries(), sum, sumSq, std); } /** * @param data * @param min * @param max * @return List<Double> */ public static List<Double> intervalNorm(List<Double> data, double min, double max){ return convert(intervalNorm(convert(data), min, max)); } /** * @param ts * @param min * @param max * @return TimeSeries */ public static TimeSeries intervalNorm(TimeSeries ts, double min, double max){ return new TimeSeries(intervalNorm(ts.toValueArray(), min, max)); } /** * @param data * @param min * @param max * @return double[] */ public static double[] intervalNorm(double[] data, double min, double max){ double[] out = new double[data.length]; for(int i=0; i<out.length; i++) out[i] = (data[i] - min) / (max - min); return out; } /** * @param data * @param mean * @param std * @return List<Double> */ public static List<Double> standardNorm(List<Double> data, double mean, double std){ return convert(standardNorm(convert(data), mean, std)); } /** * @param ts * @param mean * @param std * @return TimeSeries */ public static TimeSeries standardNorm(TimeSeries ts, double mean, double std){ return new TimeSeries(standardNorm(ts.toValueArray(), mean, std)); } /** * @param ts * @return TimeSeries */ public static TimeSeries standardNorm(TimeSeries ts){ double mean = mean(ts); double std = Math.sqrt(variance(ts, mean)); return new TimeSeries(standardNorm(ts.toValueArray(), mean, std)); } /** * @param data * @param mean * @param std * @return double[] */ public static double[] standardNorm(double[] data, double mean, double std){ double[] out = new double[data.length]; for(int i=0; i<out.length; i++) out[i] = (data[i] - mean) / (std); return out; } /** * @return double */ public double getMean() { return mean; } /** * @return double */ public double getSum() { return sum; } /** * @return double */ public double getSlope() { return slope; } /** * @return double */ public double getVariance() { return variance; } /** * @return double */ public double getKurtosis() { return kurtosis; } /** * @return double */ public double getMin() { return min; } /** * @return double */ public double getMax() { return max; } /** * @return double */ public double getSumSq() { return sumSq; } /** * @return double */ public double getSkew() { return skew; } /** * @param in * @return double[] */ private static double[] convert(List<Double> in){ return in.stream().filter(Double::isFinite).mapToDouble(Double::doubleValue).toArray(); } /** * @param in * @return List<Double> */ private static List<Double> convert(double[] in){ return DoubleStream.of(in).boxed().collect(Collectors.toList()); } }
12,979
19.505529
95
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ClassificationExamples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples; import tsml.classifiers.legacy.elastic_ensemble.WDTW1NN; import tsml.classifiers.legacy.elastic_ensemble.DTW1NN; import tsml.classifiers.legacy.elastic_ensemble.ED1NN; import tsml.classifiers.legacy.elastic_ensemble.MSM1NN; import tsml.classifiers.shapelet_based.FastShapelets; import tsml.classifiers.shapelet_based.LearnShapelets; import tsml.classifiers.distance_based.NN_CID; import tsml.classifiers.interval_based.TSBF; import tsml.classifiers.interval_based.TSF; import tsml.classifiers.distance_based.DTD_C; import tsml.classifiers.dictionary_based.BOSS; import tsml.classifiers.legacy.RISE; import tsml.classifiers.interval_based.LPS; import tsml.classifiers.dictionary_based.SAXVSM; import tsml.classifiers.shapelet_based.ShapeletTransformClassifier; import tsml.classifiers.distance_based.DD_DTW; import tsml.classifiers.dictionary_based.BagOfPatternsClassifier; import experiments.data.DatasetLoading; import fileIO.OutFile; import java.io.File; import java.text.DecimalFormat; import tsml.classifiers.EnhancedAbstractClassifier; import utilities.InstanceTools; import weka.classifiers.Classifier; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.NaiveBayes; import weka.classifiers.functions.Logistic; import weka.classifiers.functions.MultilayerPerceptron; import weka.classifiers.functions.SMO; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.meta.RotationForest; import machine_learning.classifiers.ensembles.CAWPE; import machine_learning.classifiers.ensembles.SaveableEnsemble; import weka.classifiers.trees.J48; import weka.classifiers.trees.RandomForest; import weka.core.Instances; /** * Code to reproduce all the results in the paper * @article{bagnall16bakeoff, * title={The Great Time Series Classification Bake Off: a Review and Experimental Evaluation of Recent Algorithmic Advance}, * author={A. Bagnall and J. Lines and A. Bostrom and J. Large and E. Keogh}, * journal={Data Mining and Knowledge Discovery}, * volume={Online First}, * year={2016} * } * @author ajb */ public class ClassificationExamples { //All classifier names //<editor-fold defaultstate="collapsed" desc="Directory names for all classifiers"> static String[] standard={"NB","C45","SVML","SVMQ","Logistic","BN","RandF","RotF","MLP"}; static String[] elastic = {"Euclidean_1NN","DTW_R1_1NN","DTW_Rn_1NN","DDTW_R1_1NN","DDTW_Rn_1NN","ERP_1NN","LCSS_1NN","MSM_1NN","TWE_1NN","WDDTW_1NN","WDTW_1NN","DD_DTW","DTD_C","DTW_F"}; static String[] shapelet={"ST","LS","FS"}; static String[] dictionary={"BoP","SAXVSM","BOSS"}; static String[] interval={"TSF","TSBF","LPS"}; static String[] ensemble={"ACF","PS","EE","COTE"}; static String[] complexity={"CID_ED","CID_DTW"}; static String[][] classifiers={standard,elastic,shapelet,dictionary,interval,ensemble,complexity}; static final String[] directoryNames={"standard","elastic","shapelet","dictionary","interval","ensemble","complexity"}; //Create classifier method public static Classifier setClassifier(String classifier){ Classifier c=null; switch(classifier){ //TIME DOMAIN CLASSIFIERS case "ED": c=new ED1NN(); break; case "C45": c=new J48(); break; case "NB": c=new NaiveBayes(); break; case "SVML": c=new SMO(); PolyKernel p=new PolyKernel(); p.setExponent(1); ((SMO)c).setKernel(p); break; case "SVMQ": c=new SMO(); PolyKernel p2=new PolyKernel(); p2.setExponent(2); ((SMO)c).setKernel(p2); break; case "BN": c=new BayesNet(); break; case "MLP": c=new MultilayerPerceptron(); break; case "RandF": c= new RandomForest(); ((RandomForest)c).setNumTrees(500); break; case "RotF": c= new RotationForest(); ((RotationForest)c).setNumIterations(50); break; case "Logistic": c= new Logistic(); break; case "HESCA": c=new CAWPE(); break; //ELASTIC CLASSIFIERS case "DTW": c=new DTW1NN(); ((DTW1NN )c).setWindow(1); break; case "DTWCV": c=new DTW1NN(); break; case "DD_DTW": c=new DD_DTW(); break; case "DTD_C": c=new DTD_C(); break; case "CID_DTW": c=new NN_CID(); ((NN_CID)c).useDTW(); break; case "MSM": c=new MSM1NN(); break; case "TWE": c=new MSM1NN(); break; case "WDTW": c=new WDTW1NN(); break; case "LearnShapelets": case "LS": c=new LearnShapelets(); break; case "FastShapelets": case "FS": c=new FastShapelets(); break; case "ShapeletTransform": case "ST": case "ST_Ensemble": c=new ShapeletTransformClassifier(); break; case "TSF": c=new TSF(); break; case "RISE": c=new RISE(); break; case "TSBF": c=new TSBF(); break; case "BOP": case "BoP": case "BagOfPatterns": c=new BagOfPatternsClassifier(); break; case "BOSS": case "BOSSEnsemble": c=new BOSS(); break; case "SAXVSM": case "SAX": c=new SAXVSM(); break; case "LPS": c=new LPS(); break; default: System.out.println("UNKNOWN CLASSIFIER"); System.exit(0); // throw new Exception("Unknown classifier "+classifier); } return c; } /** Run a given classifier/problem/fold combination with associated file set up @param args: args[0]: Classifier name. Create classifier with setClassifier args[1]: Problem name args[2]: Fold number. This is assumed to range from 1, hence we subtract 1 (this is because of the scripting we use to run the code on the cluster) the standard archive folds are always fold 0 NOTES: 1. this assumes you have set DatasetLists.problemPath to be where ever the data is, and assumes the data is in its own directory with two files, args[1]_TRAIN.arff and args[1]_TEST.arff 2. assumes you have set DatasetLists.resultsPath to where you want the results to go It will NOT overwrite any existing results (i.e. if a file of non zero size exists) 3. This method just does the file set up then calls the next method. If you just want to run the problem, go to the next method * */ public static void singleClassifierAndFold(String[] args){ //first gives the problem file String classifier=args[0]; String problem=args[1]; int fold=Integer.parseInt(args[2])-1; String problemPath=args[3]; String resultsPath=args[4]; Classifier c=setClassifier(classifier); Instances train=DatasetLoading.loadDataNullable(problemPath+problem+"/"+problem+"_TRAIN"); Instances test=DatasetLoading.loadDataNullable(problemPath+problem+"/"+problem+"_TEST"); File f=new File(resultsPath+classifier); if(!f.exists()) f.mkdir(); String predictions=resultsPath+classifier+"/Predictions"; f=new File(predictions); if(!f.exists()) f.mkdir(); predictions=predictions+"/"+problem; f=new File(predictions); if(!f.exists()) f.mkdir(); //Check whether fold already exists, if so, dont do it, just quit f=new File(predictions+"/fold"+fold+".csv"); if(!f.exists() || f.length()==0){ // of.writeString(problem+","); ); double acc=0; acc =singleClassifierAndFold(train,test,c,fold,predictions); // of.writeString("\n"); } } /** * * @param train: the standard train fold Instances from the archive * @param test: the standard test fold Instances from the archive * @param c: Classifier to evaluate * @param fold: integer to indicate which fold. Set to 0 to just use train/test * @param resultsPath: a string indicating where to store the results * @return the accuracy of c on fold for problem given in train/test * * NOTES: * 1. If the classifier is a SaveableEnsemble, then we save the internal cross * validation accuracy and the internal test predictions * 2. The output of the file testFold+fold+.csv is * Line 1: ProblemName,ClassifierName, train/test * Line 2: parameter information for final classifier, if it is available * Line 3: test accuracy * then each line is * Actual Class, Predicted Class, Class probabilities * * */ public static double singleClassifierAndFold(Instances train, Instances test, Classifier c, int fold,String resultsPath){ Instances[] data=InstanceTools.resampleTrainAndTestInstances(train, test, fold); double acc=0; int act; int pred; // Save internal info for ensembles if(c instanceof SaveableEnsemble) ((SaveableEnsemble)c).saveResults(resultsPath+"/internalCV_"+fold+".csv",resultsPath+"/internalTestPreds_"+fold+".csv"); try{ c.buildClassifier(data[0]); StringBuilder str = new StringBuilder(); DecimalFormat df=new DecimalFormat("##.######"); for(int j=0;j<data[1].numInstances();j++) { act=(int)data[1].instance(j).classValue(); double[] probs=c.distributionForInstance(data[1].instance(j)); pred=0; for(int i=1;i<probs.length;i++){ if(probs[i]>probs[pred]) pred=i; } if(act==pred) acc++; str.append(act); str.append(","); str.append(pred); str.append(",,"); for(double d:probs){ str.append(df.format(d)); str.append(","); } str.append("\n"); } acc/=data[1].numInstances(); OutFile p=new OutFile(resultsPath+"/testFold"+fold+".csv"); p.writeLine(train.relationName()+","+c.getClass().getName()+",test"); if(c instanceof EnhancedAbstractClassifier){ p.writeLine(((EnhancedAbstractClassifier)c).getParameters()); }else p.writeLine("No parameter info"); p.writeLine(acc+""); p.writeLine(str.toString()); }catch(Exception e) { System.out.println(" Error ="+e+" in method simpleExperiment"+e); e.printStackTrace(); System.out.println(" TRAIN "+train.relationName()+" has "+train.numAttributes()+" attributes and "+train.numInstances()+" instances"); System.out.println(" TEST "+test.relationName()+" has "+test.numAttributes()+" attributes"+test.numInstances()+" instances"); System.exit(0); } return acc; } public static void main(String[] args){ //Example usage: //2. Set up the arguments: Classifier, Problem, Fold, Problem Location, Results location String[] paras={"BOSS","ItalyPowerDemand","1","Z:/ArchiveData/Univariate_arff/","C:/Temp/"}; //3. Run a full experiment, saving the results singleClassifierAndFold(paras); } }
13,046
38.656535
191
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/DataSimulatorExamples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples; import java.util.ArrayList; import java.util.logging.Level; import java.util.logging.Logger; import statistics.simulators.ArmaModel; import statistics.simulators.DataSimulator; import statistics.simulators.Model; import statistics.simulators.SimulateSpectralData; import weka.classifiers.Classifier; import weka.classifiers.trees.J48; import weka.core.Instances; /** * Class demonstrating how to use the data simulators to generate weka instances * @author ajb */ public class DataSimulatorExamples extends DataSimulator{ public static void main(String[] args) { Classifier c=new J48(); try { c.buildClassifier(null); } catch (Exception ex) { Logger.getLogger(DataSimulatorExamples.class.getName()).log(Level.SEVERE, null, ex); } try { c.buildClassifier(null); } catch (Exception ex) { System.out.println("ERRRORRR!!!!"); System.exit(0); } /**DataSimulator: All the simulators inherit from DataSimulator * a DataSimulator contains an ArrayList of Models, one for each class * To create a data simulator, you can either pass it a 2D array of parameters * (one array for each class) or pass it an ArrayList of models * (again, one for each class). */ double[][] paras={{0.1,0.5,-0.6},{0.2,0.4,-0.5}}; // Creates a two class simulator for AR(3) models DataSimulator arma=new SimulateSpectralData(paras); /* Model: All models inherit from the base Model class. Model has three abstract * methods. generate: returns the next observation in the series, generate(t) * generates the observation at time t (if possible) and generateSeries(int n), * which calls generate n times and returns an array */ ArrayList<Model> m=new ArrayList<>(); m.add(new ArmaModel(paras[0])); m.add(new ArmaModel(paras[1])); /** Once you have created the simulator and/or the models, you can create sets * of instances thus */ int seriesLength=100; int[] casesPerClass={100,100}; arma.setSeriesLength(seriesLength); arma.setCasesPerClass(casesPerClass); Instances data = arma.generateDataSet(); } }
3,074
35.176471
96
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ShapeletExamples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples; /* Package weka.core.shapelet.* contains the classes * Shapelet that stores the actual shapelet, its location * in the data set, the quality assessment and a reference to the quality * measure used * BinaryShapelet that extends Shapelet to store the threshold used to * measure quality * OrderLineObj: A simple class to store <distance,classValue> pairs * for calculating the quality of a shapelet * QualityMeasures: A class to store shapelet quality measure * implementations. This includes an abstract quality measure class, * and implementations of each of the four shapelet quality measures * QualityBound: A class to store shapelet quality measure bounding * implementations. This is used to determine whether an early abandonment is * permissible for the four quality measures. */ import tsml.filters.shapelet_filters.old_code.ClusteredShapeletTransform; import tsml.filters.shapelet_filters.ShapeletFilter; import tsml.transformers.shapelet_tools.distance_functions.*; import tsml.transformers.shapelet_tools.distance_functions.ImprovedOnlineShapeletDistance; import tsml.transformers.shapelet_tools.distance_functions.CachedShapeletDistance; import tsml.filters.shapelet_filters.old_code.ApproximateShapeletFilter; import java.io.FileReader; import java.io.IOException; import java.text.DecimalFormat; import java.util.logging.Level; import java.util.logging.Logger; /* package weka.filters.timeseries.shapelet_transforms.* contains * FullShapeletTransform: Enumerative search to find the best k shapelets. * ShapeletTransformDistCaching: subclass of FullShapeletTransform that * uses the distance caching algorithm described in Mueen11. This is the fastest * exact approach, but is memory intensive. * ShapeletTransform: subclass of FullShapeletTransform that uses distance online normalisation and early abandon described in ??. Not as fast, * but does not require the extra memory. * ClusteredShapeletTransform: contains a FullShapeletTransform, and does post * transformation clustering. * * */ /* package weka.classifiers.trees.shapelet_trees.* contains * ShapeletTreeClassifier: implementation of a shapelet tree to match the * description on the original paper. * 4x tree classifiers based on the alternative distance measures in class * QualityMeasures. */ import weka.core.*; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality.ShapeletQualityChoice; /** * This class is a helper class to describe the structure of our shapelet code and * demonstrate how to use it. *copyright Anthony Bagnall * @author Anthony Bagnall, Jason Lines, Jon Hills and Edgaras Baranauskas */ public class ShapeletExamples { public static ShapeletFilter st; public static Instances basicTransformExample(Instances train){ /*Class to demonstrate the usage of the ShapeletTransform. Returns the * transformed set of instances */ st =new ShapeletFilter(); st.setSubSeqDistance(new OnlineShapeletDistance()); /*The number of shapelets defaults to 100. we recommend setting it to a large value, since there will be many duplicates and there is little overhead in * keeping a lot (although the shapelet early abandon becomes less efficient). * */ //Let m=train.numAttributes()-1 (series length) //Let n= train.numInstances() (number of series) int nosShapelets=(train.numAttributes()-1)*train.numInstances()/5; if(nosShapelets< ShapeletFilter.DEFAULT_NUMSHAPELETS) nosShapelets= ShapeletFilter.DEFAULT_NUMSHAPELETS; st.setNumberOfShapelets(nosShapelets); /* Two other key parameters are minShapeletLength and maxShapeletLength. For * each value between these two, a full search is performed, which is * order (m^2n^2), so clearly there is a time/accuracy trade off. Defaults * to min of 3 max of 30. */ int minLength=5; int maxLength=(train.numAttributes()-1)/10; if(maxLength< ShapeletFilter.DEFAULT_MINSHAPELETLENGTH) maxLength= ShapeletFilter.DEFAULT_MINSHAPELETLENGTH; st.setShapeletMinAndMax(minLength, maxLength); /*Next you need to set the quality measure. This defaults to IG, but * we recommend using the F stat. It is faster and (debatably) more accurate. */ st.setQualityMeasure(ShapeletQualityChoice.F_STAT); // You can set the filter to output details of the shapelets or not st.setLogOutputFile("ShapeletExampleLog.csv"); // Alternatively, you can turn the logging off // st.turnOffLog(); /* Thats the basic options. Now you need to perform the transform. * FullShapeletTransform extends the weka SimpleBatchFilter, but we have made * the method process public to make usage easier. */ Instances shapeletT=null; try { shapeletT=st.process(train); } catch (Exception ex) { System.out.println("Error performing the shapelet transform"+ex); ex.printStackTrace(); System.exit(0); } return shapeletT; } public static Instances clusteredShapeletTransformExample(Instances train){ /* The class ClusteredShapeletTransform contains a FullShapeletTransform and * post transform clusters it. You can either perform the transform outside of * the ClusteredShapeletTransform or leave it to do it internally. * */ Instances shapeletT=null; //Cluster down to 10% of the number. int nosShapelets=(train.numAttributes()-1)*train.numInstances()/50; ClusteredShapeletTransform cst = new ClusteredShapeletTransform(st,nosShapelets); System.out.println(" Clustering down to "+nosShapelets+" Shapelets"); System.out.println(" From "+st.getNumberOfShapelets()+" Shapelets"); try { shapeletT=cst.process(train); } catch (Exception ex) { System.out.println("Error performing the shapelet clustering"+ex); ex.printStackTrace(); System.exit(0); } return shapeletT; } public static void initializeShapelet(ShapeletFilter s, Instances train){ // int nosShapelets=(train.numAttributes()-1)*train.numInstances()/5; s.setNumberOfShapelets(1); int minLength=15; int maxLength=36; // int maxLength=(train.numAttributes()-1)/10; s.setShapeletMinAndMax(minLength, maxLength); s.setQualityMeasure(ShapeletQualityChoice.F_STAT); s.supressOutput(); s.turnOffLog(); } public static void distanceOptimizations(Instances train){ Instances shapeletT=null; ShapeletDistance[] ssq = {new ShapeletDistance(), new OnlineShapeletDistance(), new CachedShapeletDistance(), new ImprovedOnlineShapeletDistance()}; ShapeletFilter[] st = new ShapeletFilter[ssq.length]; for(int i=0; i< st.length; i++){ st[i] = new ShapeletFilter(); st[i].setSubSeqDistance(ssq[i]); initializeShapelet(st[i], train); } DecimalFormat df =new DecimalFormat("###.####"); long t1=0; long t2=0; double[] time = new double[ssq.length]; try { for(int i=0; i< time.length; i++){ t1=System.currentTimeMillis(); shapeletT=st[i].process(train); t2=System.currentTimeMillis(); time[i] =((t2-t1)/1000.0); } System.out.println("TIME (seconds)"); for(int i=0; i< time.length; i++){ System.out.print(ssq[i].getClass().getSimpleName()+ "\t"); } System.out.println(); for(int i=0; i< time.length; i++){ System.out.print(df.format(time[i])+ "\t"); } System.out.println(); } catch (Exception ex) { System.out.println("Error performing the shapelet transform"+ex); ex.printStackTrace(); System.exit(0); } } public static void shapeletEarlyAbandons(Instances train){ //Time the speed up from early abandon of the four distance measures. //IG: ShapeletFilter[] s=new ShapeletFilter[4]; ShapeletFilter[] pruned=new ShapeletFilter[4]; for(int i=0;i<s.length;i++){ s[i]=new ShapeletFilter(); pruned[i]=new ShapeletFilter(); } for(ShapeletFilter s1:s){ initializeShapelet(s1,train); s1.setCandidatePruning(false); } for(ShapeletFilter s1:pruned){ initializeShapelet(s1,train); s1.setCandidatePruning(true); } ShapeletQualityChoice[] choices=ShapeletQualityChoice.values(); for(int i=0;i<s.length;i++){ s[i].setQualityMeasure(choices[i]); pruned[i].setQualityMeasure(choices[i]); } long t1,t2; double time1,time2; DecimalFormat df =new DecimalFormat("###.####"); try { for(int i=0;i<s.length;i++){ t1=System.currentTimeMillis(); s[i].process(train); t2=System.currentTimeMillis(); time1=((t2-t1)/1000.0); t1=System.currentTimeMillis(); pruned[i].process(train); t2=System.currentTimeMillis(); time2=((t2-t1)/1000.0); System.out.println(" ********* QUALITY MEASURE ="+s[i].getQualityMeasure()+" **********"); System.out.println(" NO ABANDON \t\t ABANDON\t\t ABANDON/(NO ABANDON)%\t\t SPEED UP "); System.out.println(df.format(time1)+"\t\t\t"+df.format(time2)+"\t\t\t"+(int)(100.0*(time2/time1))+"%"+"\t\t\t"+df.format(time1/time2)); } } catch (Exception ex) { System.out.println("Error performing the shapelet transform"+ex); ex.printStackTrace(); System.exit(0); } } public static Instances approxDataTransformExample(Instances train){ /*Class to demonstrate the usage of the ApproximateShapeletTransform. Returns the * transformed set of instances */ st = new ApproximateShapeletFilter(); //Parameters that are relevant to all types of transforms that extend FullShapeletTransform: //1. Number of shapelets to be stored int nosShapelets=(train.numAttributes()-1)*train.numInstances()/5; if(nosShapelets< ShapeletFilter.DEFAULT_NUMSHAPELETS) nosShapelets= ShapeletFilter.DEFAULT_NUMSHAPELETS; st.setNumberOfShapelets(nosShapelets); //2. Shapelet lenght range to be eplored int minLength=5; int maxLength=(train.numAttributes()-1)/10; if(maxLength< ShapeletFilter.DEFAULT_MINSHAPELETLENGTH) maxLength= ShapeletFilter.DEFAULT_MINSHAPELETLENGTH; st.setShapeletMinAndMax(minLength, maxLength); //3. Quality measure st.setQualityMeasure(ShapeletQualityChoice.F_STAT); //4. Set the filter to output details of the shapelets or not st.setLogOutputFile("ApproximateTransformExampleLog.csv"); /* Parameters that are specific to ApproximateShapeletTransform are: * 1. Dataset sampling level - specifies the percentage of instances to be used * from the provided training data for the shapelet discovery, i.e. setting * this parmeter to 50 forces the transform to sample the training data to * reduce it to 50% of the original size. * * 2. Series reduction level - specifies the percentage of how much each * series should be reduced, i.e. setting this parameter to 50 forces * the trasform to approximate each series using PAA such that each series * lenght is 50% of the original length. * * On average the higher the percentage the lower the accuracy is to be * expected. For example setting the levels to 50 - 50 on averege * should reduce the processing time by ~30 times and reduce the accuracy by * ~15% */ try { // Parameter 1 - datast sampling level, Parameter 2 - PAA approximation level ((ApproximateShapeletFilter)st).setSampleLevels(50, 50); } catch (IOException ex) { Logger.getLogger(ShapeletExamples.class.getName()).log(Level.SEVERE, null, ex); } // Now perform the transform exacty like using the ShapeletTransfomr. Instances shapeletT=null; try { shapeletT=st.process(train); } catch (Exception ex) { System.out.println("Error performing the shapelet transform"+ex); ex.printStackTrace(); System.exit(0); } return shapeletT; } public static void main(String[] args){ Instances train=null,test=null; FileReader r; String path = "C:\\LocalData\\Dropbox\\TSC Problems\\"; String dataset = "SonyAIBORobotSurface1"; try{ r= new FileReader(path+dataset+"\\"+dataset+"_TRAIN.arff"); train = new Instances(r); train.setClassIndex(train.numAttributes()-1); r= new FileReader(path+dataset+"\\"+dataset+ "_TEST.arff"); test = new Instances(r); test.setClassIndex(test.numAttributes()-1); } catch(Exception e) { System.out.println("Unable to load data. Exception thrown ="+e); System.exit(0); } System.out.println("****************** PERFORMING BASIC TRANSFORM *******"); Instances shapeletT=basicTransformExample(train); System.out.println(" Transformed data set ="+shapeletT); System.out.println("\n **************** CLUSTERING *******"); shapeletT=clusteredShapeletTransformExample(train); System.out.println(" Clustered Transformed data set ="+shapeletT); System.out.println("\n ******Distance calculation optimizations *******"); distanceOptimizations(train); System.out.println("\n ******Shapelet Early Abandons *******"); shapeletEarlyAbandons(train); } }
15,229
42.89049
156
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/SimulationExperiments.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples; import tsml.classifiers.distance_based.DTWCV; import tsml.classifiers.legacy.COTE.FlatCote; import tsml.classifiers.shapelet_based.LearnShapelets; import tsml.classifiers.shapelet_based.FastShapelets; import tsml.classifiers.interval_based.TSBF; import tsml.classifiers.interval_based.TSF; import tsml.classifiers.distance_based.DTD_C; import tsml.classifiers.dictionary_based.BOSS; import tsml.classifiers.legacy.RISE; import tsml.classifiers.shapelet_based.ShapeletTransformClassifier; import tsml.classifiers.interval_based.LPS; import tsml.classifiers.distance_based.ElasticEnsemble; import tsml.classifiers.distance_based.DD_DTW; import tsml.classifiers.dictionary_based.BagOfPatternsClassifier; import tsml.classifiers.legacy.COTE.HiveCote; import fileIO.OutFile; import statistics.simulators.Model; import statistics.simulators.SimulateSpectralData; import statistics.simulators.SimulateDictionaryData; import statistics.simulators.SimulateIntervalData; import statistics.simulators.SimulateShapeletData; import tsml.classifiers.EnhancedAbstractClassifier; import utilities.InstanceTools; import weka.classifiers.Classifier; import weka.classifiers.meta.RotationForest; import machine_learning.classifiers.ensembles.CAWPE; import machine_learning.classifiers.ensembles.SaveableEnsemble; import machine_learning.classifiers.tuned.TunedRandomForest; import weka.core.Instances; import utilities.ClassifierTools; /** * * @author ajb * FINAL VERSION of simulator experiments for stand alone execution only * Just the main experiments, copied here for clarity. For sensitivity analysis * and cluster based versions, see the class * Please read the technical report LINK HERE */ public class SimulationExperiments { //Global variables that relate to the data set. These are different for different //simulators, and are set to default values in setStandardGlobalParameters static int []casesPerClass={50,50}; static int seriesLength=300; static double trainProp=0.5; //<editor-fold defaultstate="collapsed" desc="All Classifiers: edit if you want to try some others"> static String[] allClassifiers={ //Benchmarks "RotF","DTW","HESCA", //Whole series "DD_DTW","DTD_C","EE","HESCA", //Interval "TSF","TSBF","LPS", //Shapelet "FastShapelets","ST","LearnShapelets", //Dictionary "BOP","BOSS", //Spectral "RISE", //Combos "COTE","FLATCOTE","HIVECOTE"}; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="All Simulators: "> static String[] allSimulators={"WholeSeries","Interval","Shapelet","Dictionary","ARMA"}; //</editor-fold> public static Classifier createClassifier(String str) throws RuntimeException{ Classifier c; switch(str){ case "HESCA": c=new CAWPE(); break; case "RandF": c=new TunedRandomForest(); break; case "RotF": c=new RotationForest(); break; case "DTW": c=new DTWCV(); break; case "DD_DTW": c=new DD_DTW(); break; case "DTD_C": c=new DTD_C(); break; case "EE": c=new ElasticEnsemble(); break; case "TSF": c=new TSF(); break; case "TSBF": c=new TSBF(); break; case "LPS": c=new LPS(); break; case "FastShapelets": c=new FastShapelets(); break; case "ST": c=new ShapeletTransformClassifier(); //Just to make sure it is feasible ((ShapeletTransformClassifier)c).setOneHourLimit(); break; case "LearnShapelets": c=new LearnShapelets(); ((LearnShapelets)c).setParamSearch(true); break; case "BOP": c=new BagOfPatternsClassifier(); break; case "BOSS": c=new BOSS(); break; case "FLATCOTE": c=new FlatCote(); break; case "HIVECOTE": c=new HiveCote(); break; case "RISE": c=new RISE(); // ((RISE)c).setTransformType(RISE.TransformType.ACF_PS); ((RISE)c).setNumClassifiers(500); break; default: throw new RuntimeException(" UNKNOWN CLASSIFIER "+str); } return c; } public static void setStandardGlobalParameters(String str){ switch(str){ case "ARMA": case "AR": case "Spectral": casesPerClass=new int[]{200,200}; seriesLength=200; trainProp=0.1; Model.setDefaultSigma(1); break; case "Shapelet": casesPerClass=new int[]{250,250}; seriesLength=300; trainProp=0.1; Model.setDefaultSigma(1); break; case "Dictionary": casesPerClass=new int[]{200,200}; seriesLength=1500; trainProp=0.1; SimulateDictionaryData.setShapeletsPerClass(new int[]{5,10}); SimulateDictionaryData.setShapeletLength(29); // SimulateDictionaryData.checkGlobalSeedForIntervals(); Model.setDefaultSigma(1); break; case "Interval": seriesLength=1000; trainProp=0.1; casesPerClass=new int[]{200,200}; Model.setDefaultSigma(1); // SimulateIntervalData.setAmp(1); SimulateIntervalData.setNosIntervals(3); SimulateIntervalData.setNoiseToSignal(10); break; case "WholeSeriesElastic": seriesLength=200; trainProp=0.1; casesPerClass=new int[]{200,200}; Model.setDefaultSigma(1); // SimulateWholeSeriesElastic. break; case "WholeSeries": // break; default: throw new RuntimeException(" UNKNOWN SIMULATOR "); } } /** * Creates a simulated data set with the data characteristics defined in this * class and the default model characteristics set in the appropriate Simulator class * If you want to control the model parameters, see the class DataSimulator for two alternative * use cases * @param str:name of the simulator * @param seed: random seed * @return * @throws RuntimeException */ public static Instances simulateData(String str,int seed) throws RuntimeException{ Instances data; // for(int:) Model.setGlobalRandomSeed(seed); switch(str){ case "ARMA": case "AR": data=SimulateSpectralData.generateARDataSet(seriesLength, casesPerClass, true); break; case "Shapelet": data=SimulateShapeletData.generateShapeletData(seriesLength,casesPerClass); break; case "Dictionary": data=SimulateDictionaryData.generateDictionaryData(seriesLength,casesPerClass); break; case "WholeSeries": // data=SimulateWholeSeriesData.generateWholeSeriesData(seriesLength,casesPerClass); // break; case "WholeSeriesElastic": // data=SimulateWholeSeriesData.generateWholeSeriesData(seriesLength,casesPerClass); // break; default: throw new RuntimeException(" UNKNOWN SIMULATOR "); } return data; } /** Runs a single fold experiment, saving all output. * * @param train * @param test * @param c * @param sample * @param preds * @return */ public static double singleSampleExperiment(Instances train, Instances test, Classifier c, int sample,String preds){ double acc=0; OutFile p=new OutFile(preds+"/testFold"+sample+".csv"); // hack here to save internal CV for further ensembling if(EnhancedAbstractClassifier.classifierAbleToEstimateOwnPerformance(c)) ((EnhancedAbstractClassifier)c).setEstimateOwnPerformance(true); if(c instanceof SaveableEnsemble) ((SaveableEnsemble)c).saveResults(preds+"/internalCV_"+sample+".csv",preds+"/internalTestPreds_"+sample+".csv"); try{ c.buildClassifier(train); if(EnhancedAbstractClassifier.classifierIsEstimatingOwnPerformance(c)) ((EnhancedAbstractClassifier)c).getTrainResults().writeFullResultsToFile(preds+"/trainFold"+sample+".csv"); int[][] predictions=new int[test.numInstances()][2]; for(int j=0;j<test.numInstances();j++){ predictions[j][0]=(int)test.instance(j).classValue(); test.instance(j).setMissing(test.classIndex());//Just in case .... } for(int j=0;j<test.numInstances();j++) { predictions[j][1]=(int)c.classifyInstance(test.instance(j)); if(predictions[j][0]==predictions[j][1]) acc++; } acc/=test.numInstances(); String[] names=preds.split("/"); p.writeLine(names[names.length-1]+","+c.getClass().getName()+",test"); if(c instanceof EnhancedAbstractClassifier) p.writeLine(((EnhancedAbstractClassifier)c).getParameters()); else if(c instanceof SaveableEnsemble) p.writeLine(((SaveableEnsemble)c).getParameters()); else p.writeLine("NoParameterInfo"); p.writeLine(acc+""); for(int j=0;j<test.numInstances();j++){ p.writeString(predictions[j][0]+","+predictions[j][1]+","); double[] dist =c.distributionForInstance(test.instance(j)); for(double d:dist) p.writeString(","+d); p.writeString("\n"); } }catch(Exception e) { System.out.println(" Error ="+e+" in method simpleExperiment"+e); e.printStackTrace(); System.out.println(" TRAIN "+train.relationName()+" has "+train.numAttributes()+" attributes and "+train.numInstances()+" instances"); System.out.println(" TEST "+test.relationName()+" has "+test.numAttributes()+" attributes and "+test.numInstances()+" instances"); System.exit(0); } return acc; } /** * * Stand alone method to exactly reproduce shapelet experiment for all * classifiers defined within this method (makes NO use of global variables defined above. */ public static void runShapeletSimulatorExperiment(){ Model.setDefaultSigma(1); seriesLength=300; casesPerClass=new int[]{50,50}; String[] classifiers={"RotF","DTW","FastShapelets","ST","BOSS"}; // "EE","CAWPE","TSF","TSBF","FastShapelets","ST","LearnShapelets","BOP","BOSS","RISE","COTE"}; OutFile of=new OutFile("C:\\Temp\\ShapeletSimExperiment.csv"); setStandardGlobalParameters("Shapelet"); of.writeLine("Shapelet Sim, series length= "+seriesLength+" cases class 0 ="+casesPerClass[0]+" class 1"+casesPerClass[0]+" train proportion = "+trainProp); of.writeString("Rep"); for(String s:classifiers) of.writeString(","+s); of.writeString("\n"); for(int i=0;i<100;i++){ of.writeString(i+","); //Generate data Model.setGlobalRandomSeed(i); Instances data=SimulateShapeletData.generateShapeletData(seriesLength,casesPerClass); //Split data Instances[] split=InstanceTools.resampleInstances(data, i,trainProp); for(String str:classifiers){ Classifier c; //Build classifiers switch(str){ case "RotF": c=new RotationForest(); break; case "DTW": c=new DTWCV(); break; case "EE": c=new ElasticEnsemble(); break; case "TSF": c=new TSF(); break; case "TSBF": c=new TSBF(); break; case "FastShapelets": c=new FastShapelets(); break; case "ST": c=new ShapeletTransformClassifier(); ((ShapeletTransformClassifier)c).setOneHourLimit(); break; case "LearnShapelets": c=new LearnShapelets(); break; case "BOP": c=new BagOfPatternsClassifier(); break; case "BOSS": c=new BOSS(); break; case "COTE": c=new FlatCote(); break; case "RISE": c=new RISE(); // ((RISE)c).setTransformType(RISE.TransformType.ACF_PS); ((RISE)c).setNumClassifiers(500); break; default: throw new RuntimeException(" UNKNOWN CLASSIFIER "+str); } double acc=ClassifierTools.singleTrainTestSplitAccuracy(c, split[0], split[1]); of.writeString(acc+","); System.out.println(i+" "+str+" acc ="+acc); } of.writeString("\n"); } } public static void main(String[] args){ runShapeletSimulatorExperiment(); } }
15,344
38.045802
164
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/TransformExamples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples; import weka.core.Instances; import weka.filters.Filter; import tsml.transformers.ACF; import tsml.transformers.PowerSpectrum; /** * * @author ajb */ public class TransformExamples { public static Instances acfTransform(Instances data){ ACF acf=new ACF(); acf.setMaxLag(data.numAttributes()/4); Instances acfTrans=null; acfTrans=acf.transform(data); return acfTrans; } public static Instances psTransform(Instances data){ PowerSpectrum ps=new PowerSpectrum(); Instances psTrans=null; psTrans= ps.transform(data); ps.truncate(psTrans, data.numAttributes()/4); return psTrans; } }
1,475
30.404255
76
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/CapabilitiesExample.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import tsml.data_containers.TSCapabilities; import tsml.data_containers.TimeSeriesInstances; public class CapabilitiesExample { public static void example1() { double[][][] in = { // instance zero. { // time-series zero. { 0.0, 1.0, 2.0, 4.0 }, }, // instance one { // time-series zero. { 4.0, 3.0, 2.0, 1.0 }, } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[] { "A", "B" }); TSCapabilities capabilities1 = new TSCapabilities(); capabilities1.enable(TSCapabilities.EQUAL_LENGTH) .enable(TSCapabilities.UNIVARIATE) .enable(TSCapabilities.NO_MISSING_VALUES); boolean canHandle = capabilities1.test(data1); System.out.println(canHandle); } public static void example2() { double[][][] in = { // instance zero. { // time-series zero. { 0.0, 1.0, 2.0, 4.0 }, }, // instance one { // time-series zero. { 4.0, 3.0, 2.0, 1.0 }, } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[] { "A", "B" }); TSCapabilities capabilities1 = new TSCapabilities(); capabilities1.enable(TSCapabilities.EQUAL_LENGTH) .enable(TSCapabilities.UNIVARIATE) .enable(TSCapabilities.NO_MISSING_VALUES) .enable(TSCapabilities.MIN_LENGTH(3)); boolean canHandle = capabilities1.test(data1); System.out.println(canHandle); } public static void example3() { double[][][] in = { // instance zero. { // time-series zero. { 0.0, 1.0, 2.0, 4.0 }, { 0.0, 1.0, 2.0 } }, // instance one { // time-series zero. { 4.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0 } } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[] { "A", "B" }); TSCapabilities capabilities1 = new TSCapabilities(); capabilities1.enable(TSCapabilities.EQUAL_OR_UNEQUAL_LENGTH) .enable(TSCapabilities.MULTI_OR_UNIVARIATE) .enable(TSCapabilities.NO_MISSING_VALUES); boolean canHandle = capabilities1.test(data1); System.out.println(canHandle); double[][][] in2 = { // instance zero. { // time-series zero. { 0.0, 1.0, 2.0, 4.0 }, }, // instance one { // time-series zero. { 4.0, 3.0, 2.0, 1.0 }, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in2, new int[] { 0, 1 }, new String[] { "A", "B" }); canHandle = capabilities1.test(data2); System.out.println(canHandle); } public static void example4() { double[][][] in = { // instance zero. { // time-series zero. { 0.0, 1.0, 2.0, 4.0 }, { 0.0, 1.0, 2.0 } }, // instance one { // time-series zero. { 4.0, 2.0, 1.0 }, { 0.0, Double.NaN, 2.0, 4.0 } } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[] { "A", "B" }); TSCapabilities capabilities1 = new TSCapabilities(); capabilities1.enable(TSCapabilities.EQUAL_OR_UNEQUAL_LENGTH) .enable(TSCapabilities.MULTIVARIATE) .enable(TSCapabilities.MISSING_VALUES); boolean canHandle = capabilities1.test(data1); System.out.println(canHandle); } public static void main(String[] args) { example1(); example2(); example3(); example4(); } }
5,238
33.24183
112
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/ClassificationExamples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import machine_learning.classifiers.kNN; import tsml.classifiers.TSClassifier; import tsml.classifiers.shapelet_based.ShapeletTransformClassifier; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Converter; import utilities.ClassifierTools; import weka.classifiers.AbstractClassifier; public class ClassificationExamples { // Using a Weka Classifier the annoying way. public static void example1() throws Exception { double[][][] in = { // instance zero. { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, }, // instance one { // time-series zero. { 4.0, 3.0, 2.0, 1.0 }, } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[] { "A", "B" }); double[][][] in1 = { { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[] { 0 }, new String[] { "A", "B" }); kNN nn = new kNN(1); nn.buildClassifier(Converter.toArff(data1)); double acc = ClassifierTools.accuracy(Converter.toArff(data2), nn); System.out.println(acc); } // Using a Weka Classifier the annoying way. public static void example2() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0}, } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); double[][][] in1 = { { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[]{0}, new String[] { "A", "B" }); TSClassifier nn = new TSClassifier(){ kNN nn = new kNN(1); @Override public AbstractClassifier getClassifier() { return nn; } public TimeSeriesInstances trainData; @Override public TimeSeriesInstances getTSTrainData(){ return trainData; } @Override public void setTSTrainData(TimeSeriesInstances train){ trainData = train; } }; nn.buildClassifier(data1); double acc = ClassifierTools.accuracy(data2, nn); System.out.println(acc); } // Using a Weka Classifier the annoying way. public static void example3() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 1.0}, } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); double[][][] in1 = { { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[]{0}, new String[] { "A", "B" }); TSClassifier stc = new ShapeletTransformClassifier(); stc.buildClassifier(data1); double acc = ClassifierTools.accuracy(data2, stc); System.out.println(acc); } public static void main(String[] args) throws Exception { example2(); } }
4,597
30.278912
111
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/DataManipulationExample.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import java.util.Arrays; import java.util.stream.IntStream; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Converter; import tsml.data_containers.utilities.TimeSeriesSummaryStatistics; import weka.core.Instances; public class DataManipulationExample { //Example showing simple vertical slicing. public static void example1(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0}, //time-series one. {4.0,3.0,2.0,1.0} } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); double[] max = new double[data.getMaxLength()]; double[] min = new double[data.getMaxLength()]; double[] mean = new double[data.getMaxLength()]; double[] stdev = new double[data.getMaxLength()]; //calculate summary stats for each vertical slice of the dataset for (int j = 0; j < data.getMaxLength(); j++) { double[] slice = data.getVSliceArray(j); max[j] = TimeSeriesSummaryStatistics.max(slice); min[j] = TimeSeriesSummaryStatistics.min(slice); mean[j] = TimeSeriesSummaryStatistics.mean(slice); stdev[j] = Math.sqrt(TimeSeriesSummaryStatistics.variance(slice, mean[j])); } } //Example showing simple vertical slicing. public static void example2(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0}, //time-series one. {4.0,3.0,2.0,1.0} } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); //this should produce a size 3 interval slice across all dimensions that include atts: 0,1,2 double[][][] interval = data.getVSliceArray(IntStream.range(0, 3).toArray()); //equiv: double[][][] interval = data.getSliceArray(new int{0,1,2}); TimeSeriesInstances data_slice = new TimeSeriesInstances(interval, data.getClassIndexes(), data.getClassLabels()); System.out.println("Original"); System.out.println(data); System.out.println("Intveral of 3"); System.out.println(data_slice); } //truncation example. public static void example3(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] {"A", "B"}); //this should produce a size 3 interval slice across all dimensions that include atts: 0,1,2 double[][][] truncated = data.getVSliceArray(IntStream.range(0, data.getMinLength()).toArray()); //equiv: double[][][] interval = data.getSliceArray(new int{0,1,2}); TimeSeriesInstances truncated_data = new TimeSeriesInstances(truncated, data.getClassIndexes(), data.getClassLabels()); System.out.println("Original"); System.out.println(data); System.out.println("Should be 2 value"); System.out.println(truncated_data); } //multivariate unequal example. public static void example4(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); Instances converted = Converter.toArff(data); System.out.println(converted.toString()); } //univariate example. public static void example5(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0}, } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); Instances converted = Converter.toArff(data); System.out.println(converted.toString()); } //conversion from and backagain. An Weka Instances journey. public static void example6(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0}, } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); System.out.println(data); Instances converted = Converter.toArff(data); System.out.println(converted.toString()); TimeSeriesInstances converted_again = Converter.fromArff(converted); System.out.println(converted_again); } //multivariate unequal example. public static void example7(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); System.out.println(data); Instances converted = Converter.toArff(data); System.out.println(converted.toString()); TimeSeriesInstances converted_again = Converter.fromArff(converted); System.out.println(converted_again); } //HSlicing example. public static void example8(){ double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] { "A", "B" }); //this should produce only dimension double[][][] single_dimension = data.getHSliceArray(IntStream.range(0, 1).toArray()); //equiv: double[][][] single_dimension = data.getHSliceArray(new int{0}); System.out.println(Arrays.deepToString(single_dimension)); TimeSeriesInstances truncated_data = new TimeSeriesInstances(single_dimension, data.getClassIndexes(), data.getClassLabels()); System.out.println("Original"); System.out.println(data); System.out.println("Should be 2 value"); System.out.println(truncated_data); } public static void main(String[] args) { //example1(); //example2(); //example3(); //example4(); // example5(); // example6(); //example7(); example8(); } }
9,211
30.121622
134
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/FileWritingExample.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.ts_fileIO.TSReader; import tsml.data_containers.ts_fileIO.TSWriter; import tsml.data_containers.utilities.TimeSeriesResampler; import tsml.data_containers.utilities.TimeSeriesResampler.TrainTest; public class FileWritingExample { public static void example1() throws FileNotFoundException, IOException { String m_local_path = "D:\\Work\\Data\\Multivariate_ts\\"; String m_local_path_orig = "D:\\Work\\Data\\Multivariate_arff\\"; String dataset = "BasicMotions"; String filepath = m_local_path + dataset + "\\" + dataset; String filepath_Arff = m_local_path_orig + dataset + "\\" + dataset; TSReader ts_reader = new TSReader(new FileReader(new File(filepath + "_TRAIN" + ".ts"))); TimeSeriesInstances ts_train_data = ts_reader.GetInstances(); ts_reader = new TSReader(new FileReader(new File(filepath + "_TEST" + ".ts"))); TimeSeriesInstances ts_test_data = ts_reader.GetInstances(); TrainTest out2 = TimeSeriesResampler.resampleTrainTest(ts_train_data, ts_test_data, 1); TSWriter writer = new TSWriter(new File(dataset + "_TRAIN " + 1 + ".ts")); //TSWriter writer = new TSWriter(); //writer.setDestination(System.out); writer.setData(out2.train); writer.writeBatch(); writer = new TSWriter(new File(dataset + "_TEST " + 1 + ".ts")); //writer = new TSWriter(); //writer.setDestination(System.out); writer.setData(out2.test); writer.writeBatch(); } public static void main(String[] args) throws FileNotFoundException, IOException { example1(); } }
2,638
38.984848
97
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/GraphsExample.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import java.util.Arrays; import machine_learning.classifiers.kNN; import tsml.classifiers.multivariate.ConcatenateClassifier; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.TimeSeriesSummaryStatistics; import tsml.graphs.Pipeline; import tsml.graphs.Pipeline.*; import tsml.transformers.Cosine; import tsml.transformers.Sine; import tsml.transformers.Truncator; public class GraphsExample { public static void example1() throws Exception { double[][][] in = { // instance zero. { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, }, // instance one { // time-series zero. { 4.0, 3.0, 2.0, 1.0, 1.0 }, } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[]{"A", "B"}); double[][][] in1 = { { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[] { 0 }, new String[]{"A", "B"}); // this is a sequential pipeline. // this is a sequential pipeline. Pipeline model = new Pipeline(); model.add("sine", new Sine()); model.add("kNN", new kNN()); model.buildClassifier(data1); double[][] preds = model.distributionForInstances(data2); int count =0; int i=0; for(TimeSeriesInstance inst : data2){ System.out.println(Arrays.toString(preds[i])); if(inst.getLabelIndex() == TimeSeriesSummaryStatistics.argmax(preds[i])) count++; i++; } System.out.println(count); double acc = (double) count / (double) data2.numInstances(); System.out.println(acc); } public static void example2() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[]{"A", "B"}); double[][][] in1 = { { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, {4.0,3.0,2.0,1.0, 7.0, 8.0} } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[] { 0 }, new String[]{"A", "B"}); // this is a sequential pipeline. Pipeline model = new Pipeline(); model.add("Truncator", new Truncator()); //this chops the uneven off. model.concat("stack", new Pipeline.TransformerLayer("Sine0", new Sine()), new Pipeline.TransformerLayer("Cosine1", new Cosine()) ); //TODO: fix for MultivariateSingleEnsemble //model.add("kNN", new MultivariateSingleEnsemble(new kNN())); model.buildClassifier(data1); double[][] preds = model.distributionForInstances(data2); int count =0; int i=0; for(TimeSeriesInstance inst : data2){ System.out.println(Arrays.toString(preds[i])); if(inst.getLabelIndex() == TimeSeriesSummaryStatistics.argmax(preds[i])) count++; i++; } System.out.println(count); double acc = (double) count / (double) data2.numInstances(); System.out.println(acc); } public static void example3() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[]{"A", "B"}); double[][][] in1 = { { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, {4.0,3.0}, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[] { 0 }, new String[]{"A", "B"}); // this is a sequential pipeline. Pipeline model = new Pipeline(); model.add("Truncator", new Truncator()); //this chops the uneven off. Pipeline model0 = new Pipeline(); model0.add("Sine0", new Sine()); model0.add("kNN", new kNN()); Pipeline model1 = new Pipeline(); model1.add("Cosine1", new Cosine()); model1.add("kNN", new kNN()); //this will use mean ensembling across the split pipelines. model.splitAndEnsemble("split", model0, model1); model.buildClassifier(data1); double[][] preds = model.distributionForInstances(data2); int count =0; int i=0; for(TimeSeriesInstance inst : data2){ System.out.println(Arrays.toString(preds[i])); if(inst.getLabelIndex() == TimeSeriesSummaryStatistics.argmax(preds[i])) count++; i++; } System.out.println(count); double acc = (double) count / (double) data2.numInstances(); System.out.println(acc); } public static void example4() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[]{"A", "B"}); double[][][] in1 = { { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, {4.0,3.0}, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[] { 0 }, new String[]{"A", "B"}); // this is a sequential pipeline. Pipeline model = new Pipeline(); model.add("Truncator", new Truncator()); //this chops the uneven off. Pipeline model0 = new Pipeline(); model0.add("Sine0", new Sine()); Pipeline model1 = new Pipeline(); model1.add("Cosine1", new Cosine()); //if you're going to split and merge. //unless you want to stack. don't put a classifier on the end of the pipelines. model.split("split", model0, model1); model.add("kNN", new ConcatenateClassifier(new kNN())); model.buildClassifier(data1); double[][] preds = model.distributionForInstances(data2); int count =0; int i=0; for(TimeSeriesInstance inst : data2){ System.out.println(Arrays.toString(preds[i])); if(inst.getLabelIndex() == TimeSeriesSummaryStatistics.argmax(preds[i])) count++; i++; } System.out.println(count); double acc = (double) count / (double) data2.numInstances(); System.out.println(acc); } public static void example5() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0} } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[]{"A", "B"}); double[][][] in1 = { { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, {4.0,3.0}, } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[] { 0 }, new String[]{"A", "B"}); // this is a sequential pipeline. Pipeline model = new Pipeline(); model.add("Truncator", new Truncator()); //this chops the uneven off. Pipeline model0 = new Pipeline(); model0.add("Sine0", new Sine()); Pipeline model1 = new Pipeline(); model1.add("Cosine1", new Cosine()); //if you're going to split and merge. //unless you want to stack. don't put a classifier on the end of the pipelines. model.split("split", model0, model1); //TODO: fix for MultivariateSingleEnsemble //model.add("kNN", new MultivariateSingleEnsemble(new kNN())); model.buildClassifier(data1); double[][] preds = model.distributionForInstances(data2); int count =0; int i=0; for(TimeSeriesInstance inst : data2){ System.out.println(Arrays.toString(preds[i])); if(inst.getLabelIndex() == TimeSeriesSummaryStatistics.argmax(preds[i])) count++; i++; } System.out.println(count); double acc = (double) count / (double) data2.numInstances(); System.out.println(acc); } public static void example6() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, //time-series one. {0.0,1.0,2.0,4.0}, {0.0,1.0,2.0,4.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 7.0, 8.0}, //time-series one. {4.0,3.0}, {0.0,1.0,2.0,4.0} } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[]{"A", "B"}); double[][][] in1 = { { // time-series zero. { 0.0, 1.0, 2.0, 4.0, 5.0 }, {4.0,3.0}, {0.0,1.0,2.0,4.0} } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[] { 0 }, new String[]{"A", "B"}); // this is a sequential pipeline. Pipeline model = new Pipeline(); model.add("Truncator", new Truncator()); //this chops the uneven off. Pipeline model0 = new Pipeline(); model0.add("Sine0", new Sine()); Pipeline model1 = new Pipeline(); model1.add("Cosine1", new Cosine()); //if you're going to split and merge. //unless you want to stack. don't put a classifier on the end of the pipelines. //this will perform Pipeline one on dims 0 and 1 //this will perform Pipeline two on dim 2 model.split("split", new int[][]{{0,1},{2}}, model0, model1); //TODO: fix for MultivariateSingleEnsemble //model.add("kNN", new MultivariateSingleEnsemble(new kNN())); model.buildClassifier(data1); double[][] preds = model.distributionForInstances(data2); int count =0; int i=0; for(TimeSeriesInstance inst : data2){ System.out.println(Arrays.toString(preds[i])); if(inst.getLabelIndex() == TimeSeriesSummaryStatistics.argmax(preds[i])) count++; i++; } System.out.println(count); double acc = (double) count / (double) data2.numInstances(); System.out.println(acc); } public static void main(String[] args) throws Exception { example1(); example2(); example3(); example4(); example5(); System.out.println("---------------------------- Example 6--------------------"); example6(); } }
13,234
30.891566
108
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/HashExample.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import java.util.Arrays; import java.util.List; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; public class HashExample { public static void example1() { double[] in = { 0.0, 1.0, 2.0, 4.0, 5.0 }; List<Double> in_list = Arrays.asList(0.0, 1.0, 2.0, 4.0, 5.0); TimeSeries data = new TimeSeries(in); System.out.println(data.hashCode() == Arrays.hashCode(in)); System.out.println(data.hashCode() == in_list.hashCode()); } public static void example2() { double[][] in = { {0.0, 1.0, 2.0, 4.0, 5.0} }; List<List<Double>> in_list = Arrays.asList(Arrays.asList(0.0, 1.0, 2.0, 4.0, 5.0)); TimeSeriesInstance data1 = new TimeSeriesInstance(in, 0, new String[]{"A", "B"}); System.out.println(data1.hashCode() == Arrays.deepHashCode(in)); System.out.println(data1.hashCode() == in_list.hashCode()); } public static void example3() { double[][][] in = { { { 0.0, 1.0, 2.0, 4.0, 5.0 }, }, { { 4.0, 3.0, 2.0, 1.0 }, } }; List<List<List<Double>>> in_list = Arrays.asList( Arrays.asList(Arrays.asList(0.0, 1.0, 2.0, 4.0, 5.0)), Arrays.asList(Arrays.asList(4.0, 3.0, 2.0, 1.0)) ); TimeSeriesInstances data = new TimeSeriesInstances(in, new int[] { 0, 1 }, new String[]{"A", "B"}); System.out.println(data.hashCode() == Arrays.deepHashCode(in)); System.out.println(data.hashCode() == in_list.hashCode()); } public static void main(String[] args) { example1(); example2(); example3(); } }
2,761
33.525
107
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/ResamplingExamples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import experiments.data.DatasetLoading; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.ts_fileIO.TSReader; import tsml.data_containers.utilities.TimeSeriesResampler; import tsml.data_containers.utilities.TimeSeriesResampler.TrainTest; import utilities.InstanceTools; import utilities.multivariate_tools.MultivariateInstanceTools; import weka.core.Instances; public class ResamplingExamples { public static void example1() throws FileNotFoundException, IOException { String local_path = "D:\\Work\\Data\\Univariate_ts\\"; String local_path_orig = "D:\\Work\\Data\\Univariate_arff\\"; String dataset = "ItalyPowerDemand"; String filepath = local_path + dataset + "\\" + dataset; String filepath_Arff = local_path_orig + dataset + "\\" + dataset; TSReader ts_reader = new TSReader(new FileReader(new File(filepath + "_TRAIN" + ".ts"))); TimeSeriesInstances ts_train_data = ts_reader.GetInstances(); ts_reader = new TSReader(new FileReader(new File(filepath + "_TEST" + ".ts"))); TimeSeriesInstances ts_test_data = ts_reader.GetInstances(); Instances train_data = DatasetLoading.loadData(filepath_Arff + "_TRAIN" + ".arff"); Instances test_data = DatasetLoading.loadData(filepath_Arff + "_TEST" + ".arff"); Instances[] out1 = InstanceTools.resampleTrainAndTestInstances(train_data, test_data, 1); System.out.println(out1[0].instance(0)); System.out.println(out1[1].instance(0)); TrainTest out2 = TimeSeriesResampler.resampleTrainTest(ts_train_data, ts_test_data, 1); System.out.println(out2.train.get(0)); System.out.println(out2.test.get(0)); } public static void example2() throws FileNotFoundException, IOException { String m_local_path = "D:\\Work\\Data\\Multivariate_ts\\"; String m_local_path_orig = "D:\\Work\\Data\\Multivariate_arff\\"; String dataset = "BasicMotions"; String filepath = m_local_path + dataset + "\\" + dataset; String filepath_Arff = m_local_path_orig + dataset + "\\" + dataset; TSReader ts_reader = new TSReader(new FileReader(new File(filepath + "_TRAIN" + ".ts"))); TimeSeriesInstances ts_train_data = ts_reader.GetInstances(); ts_reader = new TSReader(new FileReader(new File(filepath + "_TEST" + ".ts"))); TimeSeriesInstances ts_test_data = ts_reader.GetInstances(); Instances train_data = DatasetLoading.loadData(filepath_Arff + "_TRAIN" + ".arff"); Instances test_data = DatasetLoading.loadData(filepath_Arff + "_TEST" + ".arff"); Instances[] out1 = MultivariateInstanceTools.resampleMultivariateTrainAndTestInstances(train_data, test_data, 1); System.out.println(out1[0].instance(0)); System.out.println(out1[1].instance(0)); TrainTest out2 = TimeSeriesResampler.resampleTrainTest(ts_train_data, ts_test_data, 1); System.out.println(out2.train.get(0)); System.out.println(out2.test.get(0)); } public static void main(String[] args) throws Exception { example1(); System.out.println("----------------------------------------------------"); example2(); } }
4,189
42.195876
121
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/SplittingExamples.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.examples.ts_examples; import java.util.ArrayList; import java.util.List; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.classifiers.interval_based.RISE; import tsml.classifiers.interval_based.TSF; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Splitter; public class SplittingExamples { public void example1() throws Exception { final double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0, 5.0}, {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0}, }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0}, } }; final TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[]{0, 1}, new String[]{"A", "B"}); final List<TimeSeriesInstances> individual_dims = Splitter.splitTimeSeriesInstances(data1); //train separate models on univariate data. final List<EnhancedAbstractClassifier> clfs = new ArrayList<>(individual_dims.size()); for(final TimeSeriesInstances data : individual_dims){ final TSF tsf = new TSF(1); tsf.buildClassifier(data); clfs.add(tsf); } //do some ensembling. combine predictions. etc. } public void example2() throws Exception { final double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0, 5.0}, {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0}, }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0}, } }; final TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[]{0, 1}, new String[] {"A", "B"}); final List<TimeSeriesInstances> individual_dims = Splitter.splitTimeSeriesInstances(data1, new int[][]{{0},{1,2}}); EnhancedAbstractClassifier[] clfs = new EnhancedAbstractClassifier[]{new TSF(), new RISE()}; for(int i=0; i<individual_dims.size(); i++){ clfs[i].buildClassifier(individual_dims.get(i)); } //ensemble in some clever way. } }
3,291
32.591837
123
java
tsml-java
tsml-java-master/src/main/java/tsml/examples/ts_examples/TransformationExample.java
package tsml.examples.ts_examples; import java.util.function.Function; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Converter; import tsml.transformers.ShapeletTransform; import tsml.transformers.shapelet_tools.DefaultShapeletOptions; import tsml.transformers.shapelet_tools.ShapeletTransformFactory; import weka.core.Instances; public class TransformationExample { public enum MyEnum { ONE,TWO; public String toString(){ return this.name(); } } public static void main1(){ MyEnum.ONE.toString(); } // Using a Weka Classifier the annoying way. public static void example_full() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, {0.0,1.0,2.0,4.0,5.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0} } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[]{0, 1}, new String[]{"A", "B"}); double[][][] in1 = { { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, {0.0,1.0,2.0,4.0,5.0} } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[]{0}, new String[]{"A", "B"}); //build dummy data just to show it works. Instances train = Converter.toArff(data1); ShapeletTransform trans = new ShapeletTransformFactory(DefaultShapeletOptions.createSHAPELET_D(train)) .getTransform(); Instances t_train = trans.fitTransform(train); System.out.println(t_train); } // Using a Weka Classifier the annoying way. public static void example_timed() throws Exception { double[][][] in = { //instance zero. { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, {0.0,1.0,2.0,4.0,5.0} }, //instance one { //time-series zero. {4.0,3.0,2.0,1.0, 1.0}, {4.0,3.0,2.0,1.0, 1.0} } }; TimeSeriesInstances data1 = new TimeSeriesInstances(in, new int[]{0, 1}, new String[]{"A", "B"}); double[][][] in1 = { { //time-series zero. {0.0,1.0,2.0,4.0,5.0}, {0.0,1.0,2.0,4.0,5.0} } }; TimeSeriesInstances data2 = new TimeSeriesInstances(in1, new int[]{0}, new String[]{"A", "B"}); //build dummy data just to show it works. Instances train = Converter.toArff(data1); ShapeletTransform trans1 = new ShapeletTransformFactory(DefaultShapeletOptions.createSHAPELET_I_TIMED(train, 10000l, 0)) .getTransform(); TimeSeriesInstances t_train1 = trans1.fitTransformConverter(train); ShapeletTransform trans2 = new ShapeletTransformFactory(DefaultShapeletOptions.createSHAPELET_I_TIMED(train, 10000l, 0)) .getTransform(); TimeSeriesInstances t_train2 = trans2.fitTransform(data1); System.out.println(t_train1 == t_train2); } public static void main(String[] args) throws Exception { // example_full(); example_timed(); Function<Integer, Function<Integer, Integer>> add = a -> b -> a+b; int a = add.apply(10).apply(5); Function<Integer, Integer> partial_add = add.apply(10); int b = partial_add.apply(5); } }
3,742
28.472441
128
java
tsml-java
tsml-java-master/src/main/java/tsml/filters/FFT.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.filters; /* Performs a FFT of the data set. NOTE: * 1. If algorithm type is set to DFT, then this will only perform a FFT if the series is length power of 2. * otherwise it will perform the order m^2 DFT. * 2. If algorithm type is set to FFT, then, if the length is not a powerr of 2, it either truncates or pads * (determined by the variable pad) with the mean the each series (i.e. each Instance) * so that the new length is power of 2 by flag pad (default true) * 2. By default, stoAlgorithmTyperes the complex terms in order, so att 1 is real coeff of Fourier term 1, attribute 2 the imag etc * 3. Only stores the first half of the Fourier terms (which are duplicates of the second half) * * Note that the series does store the first fourier term (series mean) and the * imaginary part will always be zero */ import weka.core.*; import weka.filters.SimpleBatchFilter; public class FFT extends SimpleBatchFilter { /** * */ public enum AlgorithmType {DFT,FFT} //If set to DFT, this will only perform a FFT if the series is length power of 2, otherwise resorts to DFT AlgorithmType algo=AlgorithmType.DFT; //If set to FFT, this will pad (or truncate) series to the nearest power of 2 private static final long serialVersionUID = 1L; private boolean pad=true; private static final double TWOPI = (Math.PI * 2); public void padSeries(boolean b){pad=b;} public void useDFT(){ algo=AlgorithmType.DFT; } public void useFFT(){ algo=AlgorithmType.FFT; } @Override protected Instances determineOutputFormat(Instances inputFormat) throws Exception { //Check all attributes are real valued, otherwise throw exception for(int i=0;i<inputFormat.numAttributes();i++) if(inputFormat.classIndex()!=i) if(!inputFormat.attribute(i).isNumeric()) throw new Exception("Non numeric attribute not allowed in FFT"); /** This method determines whether padding is required. The If the DFT is being calculated, the length will be 2*m, where m= (numAttributes -1) If the FFT is being used * if pad ==true * find x=first ^2 greater than m * length=x * else * find x=first ^2 greater than m , y last ^2 less than m * length= min(x-m,m-y) **/ int length=findLength(inputFormat); //Set up instances size and format. FastVector atts=new FastVector(); String name; for(int i=0;i<length;i++){ if(i%2==0) name="FFT_"+(i/2)+"_Real"; else name="FFT_"+(i/2)+"_Imag"; atts.addElement(new Attribute(name)); } if(inputFormat.classIndex()>=0){ //Classification set, set class //Get the class values as a fast vector Attribute target =inputFormat.attribute(inputFormat.classIndex()); FastVector vals=new FastVector(target.numValues()); for(int i=0;i<target.numValues();i++) vals.addElement(target.value(i)); atts.addElement(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(),vals)); } Instances result = new Instances("FFT_"+inputFormat.relationName(),atts,inputFormat.numInstances()); if(inputFormat.classIndex()>=0) result.setClassIndex(result.numAttributes()-1); return result; } protected int findLength(Instances inputFormat){ if(algo==AlgorithmType.FFT) return findPowerOfTwoLength(inputFormat); else if(algo==AlgorithmType.DFT){ if(inputFormat.classIndex()>=0){ //Classification set, dont transform the target class! return (inputFormat.numAttributes()-1); } else return inputFormat.numAttributes(); } throw new RuntimeException("Algorithm Type+ "+algo+" has not been implemented for FFT Class"); } //Length of the series NOT COUNTING THE CLASS ATTRIBUTE protected int findPowerOfTwoLength(Instances inputFormat){ int oldLength=0; int length=0; if(inputFormat.classIndex()>=0) //Classification set, dont transform the target class! oldLength=inputFormat.numAttributes()-1; else oldLength=inputFormat.numAttributes(); //Check if a power of 2, if not either pad or truncate if(!MathsPower2.isPow2(oldLength)){ length=(int)MathsPower2.roundPow2((float)oldLength); if(pad){ if(length<oldLength) length*=2; }else{ if(length>oldLength) length/=2; } } else length=oldLength; return length; } @Override public String globalInfo() { return null; } /** * * @param instances * @return Fourier transforms, each consecutive two terms are the real/imaginary * @throws Exception * This process only stores half the Fourier terms, since the second half are just * a duplicate of the first half with a different sign for the imaginary term * If the DFT algorithm is used, it returns exactly m terms (where m is the original series length * If FFT is used it returns x/2, where x is either the smallest power of 2 greater than * m (padding), or the largest power of 2 less than m (truncating). * If the variable pad is true, it ALWAYS pads, if pad==false it will go to the closest power of 2 * above or below. */ @Override public Instances process(Instances instances) throws Exception { Instances output=determineOutputFormat(instances); int originalLength=instances.numAttributes(); if(instances.classIndex()>=0){ originalLength--; } //Get the length of the full complex series, which might be padded or truncated. int fullLength=findLength(instances); //For each data, first extract the relevant data //Note the transform will be at least twice as long as the original //Length is the number of COMPLEX terms, which is HALF the length of the original series. for(int i=0;i<instances.numInstances();i++){ //1. Get original series stored in a complex array. This may be padded or truncated //depending on the original length. If DFT is being used, it is neither. Complex[] c=new Complex[fullLength]; int count=0; double seriesTotal=0; for(int j=0;j<originalLength&&count<c.length;j++){ //May cut off the trailing values if(instances.classIndex()!=j){ c[count]=new Complex(instances.instance(i).value(j),0.0); seriesTotal+=instances.instance(i).value(j); count++; } } //Add any Padding required double mean=seriesTotal/count; while(count<c.length) c[count++]=new Complex(mean,0); //2. Find FFT/DFT of series. if(algo==AlgorithmType.FFT) fft(c,c.length); else c=dft(c); //Extract out the terms and set the attributes. Instance inst=new DenseInstance(c.length+1); for(int j=0;j<c.length/2;j++){ inst.setValue(2*j, c[j].real); inst.setValue(2*j+1, c[j].imag); } //Set class value. //Set class value. if(instances.classIndex()>=0) inst.setValue(output.classIndex(), instances.instance(i).classValue()); output.add(inst); } return output; } /** Perform a discrete fourier transform, O(n^2) * */ public Complex[] dft(double[] series) { int n=series.length; Complex[] dft=new Complex[n]; for (int k = 0; k < n; k++) { // For each output element float sumreal = 0; float sumimag = 0; for (int t = 0; t < series.length; t++) { // For each input element sumreal += series[t]*Math.cos(2*Math.PI * t * k / n); sumimag += -series[t]*Math.sin(2*Math.PI * t * k / n); } dft[k]=new Complex(sumreal,sumimag); } return dft; } public Complex[] dft(Complex[] complex) { int n=complex.length; Complex[] dft=new Complex[n]; for (int k = 0; k < n; k++) { // For each output element float sumreal = 0; float sumimag = 0; for (int t = 0; t < complex.length; t++) { // For each input element sumreal += complex[t].real*Math.cos(2*Math.PI * t * k / n) + complex[t].imag*Math.sin(2*Math.PI * t * k / n); sumimag += -complex[t].real*Math.sin(2*Math.PI * t * k / n) + complex[t].imag*Math.cos(2*Math.PI * t * k / n); } dft[k]=new Complex(sumreal,sumimag); } return dft; } /** Perform an in-place mixed-radix inverse Fast Fourier Transform on the first <code>n</code> elements of the given set of <code>Complex</code> numbers. If <code>n</code> is not a power of two then the inverse FFT is performed on the first N numbers where N is largest power of two less than <code>n</code> */ public void fft(Complex[] complex, int n) { fft(1, complex, n); } /** Sort a set of <code>Complex</code> numbers into a bit-reversed order - only sort the first <code>n</code> elements. This method performs the sort in-place */ public static void bitReverse(Complex[] complex, int n) { int halfN = n / 2; int i, j, m; Complex temp; for (i = j = 0; i < n; ++i) { if (j >= i) { temp = complex[j]; complex[j] = complex[i]; complex[i] = temp; } m = halfN; while (m >= 1 && j >= m) { j -= m; m /= 2; } j += m; } temp = null; } /** Perform an in-place mixed-radix inverse Fast Fourier Transform on the first <code>n</code> elements of the given set of <code>Complex</code> numbers. If <code>n</code> is not a power of two then the inverse FFT is performed on the first N numbers where N is largest power of two less than <code>n</code> */ public void inverseFFT(Complex[] complex, int n) { fft(-1, complex, n); } // Perform an in-place mixed-radix FFT (if sign is 1) or inverse // FFT (if sign is -1) on the first n elements of the given set of // Complex numbers. Round n to the nearest power of two. // // This method performs the FFT in-place on the given set. private void fft(int sign, Complex[] complex, int n) { // n is number of data elements upon which FFT will be // performed. Round number of data elements to nearest power // of 2 n = (int)MathsPower2.roundPow2(n); // Sort the first n elements into bit-reversed order bitReverse(complex, n); if (n == 2) { // Perform a radix-2 FFT radix2FFT(sign, complex, n, 0); } else if (((float)Math.log(n) % (float)Math.log(4)) == 0) { // Perform a radix-4 FFT radix4FFT(sign, complex, n, 0); } else { // n is a multiple or two or four [8, 32, 128, ...] // Perform a mixed-radix FFT int halfN = n / 2; // Do a radix-4 transform on elements 0..halfN - 1 which // contains even-indexed elements from the original // unsorted set of numbers by definition of the bit // reversal operation radix4FFT(sign, complex, halfN, 0); // Do a radix-4 transform on elements halfN - 1 .. n - 1 // which contains odd-indexed elements from the original // unsorted set of numbers by definition of the bit // reversal operation radix4FFT(sign, complex, halfN, halfN); // Pair off even and odd elements and do final radix-2 // transforms, multiplying by twiddle factors as required // Loop variables used to point to pairs of even and odd // elements int g, h; // Array of two complex numbers for performing radix-2 // FFTs on pairs of elements Complex[] radix2x2 = new Complex[2]; // Twiddle factor Complex twiddle = new Complex(); // Values defining twiddle factor double delta = -sign * TWOPI / n; double w = 0; for (g = 0, h = halfN; g < halfN; g++, h++) { // Twiddle factors... twiddle.setRealImag((float)Math.cos(w), (float)Math.sin(w)); complex[h].multiply(twiddle); radix2x2[0] = complex[g]; radix2x2[1] = complex[h]; // Perform the radix-2 FFT radix2FFT(sign, radix2x2, 2, 0); complex[g] = radix2x2[0]; complex[h] = radix2x2[1]; w += delta; } radix2x2 = null; twiddle = null; } if (sign == -1) { // Divide all values by n for (int g = 0; g < n; g++) { complex[g].divide(n); } } } // Perform an in-place radix-4 FFT (if sign is 1) or inverse // FFT (if sign is -1). FFT is performed in the n elements // starting at index lower // // Assumes that n is a power of 2 and that lower + n is less than // or equal to the number of complex numbers given // // This method performs the FFT in-place on the given set. private static void radix4FFT(int sign, Complex[] complex, int n, int lower) { // Index of last element in array which will take part in the // FFT int upper = n + lower; // Variables used to hold the indicies of the elements forming // the four inputs to a butterfly int i, j, k, l; // Variables holding the results of the four main operations // performed when processing a butterfly Complex ijAdd = new Complex(); Complex klAdd = new Complex(); Complex ijSub = new Complex(); Complex klSub = new Complex(); // Twiddle factor Complex twiddle = new Complex(); // Values defining twiddle factor double delta, w, w2, w3; double deltaLower = -sign * TWOPI; // intraGap is number of array elements between the // two inputs to a butterfly (equivalent to the number of // butterflies in a cluster) int intraGap; // interGap is the number of array elements between the first // input of the ith butterfly in two adjacent clusters int interGap; for (intraGap = 1, interGap = 4 * intraGap; intraGap < n; intraGap = interGap, interGap = 4 * intraGap) { delta = deltaLower / (float)interGap; // For each butterfly in a cluster w = w2 = w3 = 0; for (int but = 0; but < intraGap; ++but) { // Process the intraGap-th butterfly in each cluster // i is the top input to a butterfly and j the second, // k third and l fourth for (i = (but + lower), j = i + intraGap, k = j + intraGap, l = k + intraGap; i < upper; i += interGap, j += interGap, k += interGap, l += interGap) { // Calculate and apply twiddle factors // cos(0) = 1 and sin(0) = 0 twiddle.setRealImag(1, 0); complex[i].multiply(twiddle); twiddle.setRealImag((float)Math.cos(w2), (float)Math.sin(w2)); complex[j].multiply(twiddle); twiddle.setRealImag((float)Math.cos(w), (float)Math.sin(w)); complex[k].multiply(twiddle); twiddle.setRealImag((float)Math.cos(w3), (float)Math.sin(w3)); complex[l].multiply(twiddle); // Compute the butterfly Complex.add(complex[i], complex[j], ijAdd); Complex.subtract(complex[i], complex[j], ijSub); Complex.add(complex[k], complex[l], klAdd); Complex.subtract(complex[k], complex[l], klSub); // Assign values Complex.add(ijAdd, klAdd, complex[i]); klSub.multiply(sign); complex[j].setRealImag(ijSub.getReal() + klSub.getImag(), ijSub.getImag() - klSub.getReal()); Complex.subtract(ijAdd, klAdd, complex[k]); complex[l].setRealImag(ijSub.getReal() - klSub.getImag(), ijSub.getImag() + klSub.getReal()); } w += delta; w2 = w + w; w3 = w2 + w; } intraGap = interGap; } ijAdd = klAdd = ijSub = klSub = twiddle = null; } // Perform an in-place radix-2 FFT (if sign is 1) or inverse // FFT (if sign is -1). FFT is performed in the n elements // starting at index lower // // Assumes that n is a power of 2 and that lower + n is less than // or equal to the number of complex numbers given... // // This method performs the FFT in-place on the given set. private static void radix2FFT(int sign, Complex[] complex, int n, int lower) { // Index of last element in array which will take part in the // FFT int upper = n + lower; // Variables used to hold the indicies of the elements forming // the two inputs to a butterfly int i, j; // intraGap is number of array elements between the // two inputs to a butterfly (equivalent to the number of // butterflies in a cluster) int intraGap; // interGap is the number of array elements between the first // input of the ith butterfly in two adjacent clusters int interGap; // The twiddle factor Complex twiddle = new Complex(); // Values defining twiddle factor float deltaLower = -(float)(sign * Math.PI); float w, delta; // Variable used to hold result of multiplying butterfly input // by a twiddle factor Complex twiddledInput = new Complex(); for (intraGap = 1, interGap = intraGap + intraGap; intraGap < n; intraGap = interGap, interGap = intraGap + intraGap) { delta = deltaLower / (float)intraGap; // For each butterfly in a cluster w = 0; for (int butterfly = 0; butterfly < intraGap; ++butterfly) { // Calculate the twiddle factor twiddle.setRealImag((float)Math.cos(w), (float)Math.sin(w)); // i is the top input to a butterfly and j the // bottom for (i = (butterfly + lower), j = i + intraGap; i < upper; i += interGap, j += interGap) { // Calculate the butterfly-th butterfly in // each cluster // Apply the twiddle factor Complex.multiply(complex[j], twiddle, twiddledInput); // Subtraction part of butterfly Complex.subtract(complex[i], twiddledInput, complex[j]); // Addition part of butterfly complex[i].add(twiddledInput); } w += delta; } intraGap = interGap; } twiddle = twiddledInput = null; } public String getRevision() { return null; } public static class MathsPower2 { /** Return 2 to the power of <code>power</code> */ public static int pow2(int power) { return (1 << power); } /** Is <code>value</code> a power of 2? */ public static boolean isPow2(int value) { return (value == (int)roundPow2(value)); } /** Round <code>value</code> to nearest power of 2 */ public static float roundPow2(float value) { float power = (float)(Math.log(value) / Math.log(2)); int intPower = Math.round(power); return (float)(pow2(intPower)); } /** Return the log to base 2 of <code>value</code> rounded to the nearest integer */ public static int integerLog2(float value) { int intValue; if (value < 2) { intValue = 0; } else if (value < 4) { intValue = 1; } else if (value < 8) { intValue = 2; } else if (value < 16) { intValue = 3; } else if (value < 32) { intValue = 4; } else if (value < 64) { intValue = 5; } else if (value < 128) { intValue = 6; } else if (value < 256) { intValue = 7; } else if (value < 512) { intValue = 8; } else if (value < 1024) { intValue = 9; } else if (value < 2048) { intValue = 10; } else if (value < 4098) { intValue = 11; } else if (value < 8192) { intValue = 12; } else { intValue = Math.round(roundPow2(value)); } return intValue; } } /** Remove all attributes unless the target class * I'm not sure if the indexing changes * @param n */ public void truncate(Instances d, int n){ int att=n; if(att<d.numAttributes()-1){//Remove the first two terms first d.deleteAttributeAt(0); d.deleteAttributeAt(0); } while(att<d.numAttributes()){ if(att==d.classIndex()) att++; else d.deleteAttributeAt(att); } } public static void computeDft(double[] inreal, double[] inimag, double[] outreal, double[] outimag) { int n = inreal.length; for (int k = 0; k < n; k++) { // For each output element double sumreal = 0; double sumimag = 0; for (int t = 0; t < n; t++) { // For each input element sumreal += inreal[t]*Math.cos(2*Math.PI * t * k / n) + inimag[t]*Math.sin(2*Math.PI * t * k / n); sumimag += -inreal[t]*Math.sin(2*Math.PI * t * k / n) + inimag[t]*Math.cos(2*Math.PI * t * k / n); } outreal[k] = sumreal; outimag[k] = sumimag; } } /** Author Mike Jackson - University of Edinburgh - 1999-2001 */ /** The <code>Complex</code> class generates objects that represent complex numbers in terms of real and imaginary components and supports addition, subtraction, multiplication, scalar multiplication and division or these numbers. The calculation of complex conjugates, magnitude, phase and power (in decibels) of the <code>Complex</code> numbers are also supported. */ public static class Complex implements Cloneable { /** Constant required to calculate power values in dBs: log 10 */ protected static final float LOG10 = (float)Math.log(10); /** Constant required to calculate power values in dBs: 20 / log 10 */ protected static final float DBLOG = 20 / LOG10; /** Real component */ protected float real; /** Imaginary component */ protected float imag; /** Create a new <code>Complex</code> number 0 + j0 */ public Complex() { real = imag = 0f; } /** Create a new <code>Complex</code> number <code>real</code> + j(<code>imag</code>) */ public Complex(float real, float imag) { this.real = real; this.imag = imag; } public String toString(){ return real+"+"+imag+"*i"; } public Complex(double real, double imag) { this.real = (float)real; this.imag = (float)imag; } /** Set the <code>Complex</code> number to be <code>real</code> + j(<code>imag</code>) */ public void setRealImag(float real, float imag) { this.real = real; this.imag = imag; } /** Get real component */ public float getReal() { return real; } /** Set real component */ public void setReal(float real) { this.real = real; } /** Get imaginary component */ public float getImag() { return imag; } /** Set imaginary component */ public void setImag(float imag) { this.imag = imag; } /** Add the given <code>Complex</code> number to this <code>Complex</code> number */ public void add(Complex complex) { real += complex.real; imag += complex.imag; } /** Subtract the given <code>Complex</code> number from this <code>Complex</code> number */ public void subtract(Complex complex) { real -= complex.real; imag -= complex.imag; } /** Multiply this <code>Complex</code> number by the given factor */ public void multiply(float factor) { real *= factor; imag *= factor; } /** Divide this <code>Complex</code> number by the given factor */ public void divide(float factor) { real /= factor; imag /= factor; } /** Multiply this <code>Complex</code> number by the given <code>Complex</code> number */ public void multiply(Complex complex) { float nuReal = real * complex.real - imag * complex.imag; float nuImag = real * complex.imag + imag * complex.real; real = nuReal; imag = nuImag; } /** Set this <code>Complex</code> number to be its complex conjugate */ public void conjugate() { imag = (-imag); } /** Return result of adding the complex conjugate of this <code>Complex</code> number to this <code>Complex</code> number */ public float addConjugate() { return real + real; } /** Return result of subtracting the complex conjugate of this <code>Complex</code> number from this <code>Complex</code> number */ public float subtractConjugate() { return imag + imag; } /** Return the magnitude of the <code>Complex</code> number */ public float getMagnitude() { return magnitude(real, imag); } /** Return the phase of the <code>Complex</code> number */ public float getPhase() { return phase(real, imag); } /** Return the power of this <code>Complex</code> number in dBs */ public float getPower() { return power(real, imag); } /** Add two <code>Complex</code> numbers: c = a + b */ public static void add(Complex a, Complex b, Complex c) { c.real = a.real + b.real; c.imag = a.imag + b.imag; } /** Subtract two <code>Complex</code> numbers: c = a - b*/ public static void subtract(Complex a, Complex b, Complex c) { c.real = a.real - b.real; c.imag = a.imag - b.imag; } /** Multiply a <code>Complex</code> number by a factor: b = a * factor */ public static void multiply(Complex a, float factor, Complex b) { b.real = a.real * factor; b.imag = a.imag * factor; } /** Divide a <code>Complex</code> number by a factor: b = a / factor */ public static void divide(Complex a, float factor, Complex b) { b.real = a.real / factor; b.imag = a.imag / factor; } /** Multiply two <code>Complex</code> numbers: c = a * b */ public static void multiply(Complex a, Complex b, Complex c) { c.real = a.real * b.real - a.imag * b.imag; c.imag = a.real * b.imag + a.imag * b.real; } /** Place the <code>Complex</code> conjugate of a into b */ public static void conjugate(Complex a, Complex b) { b.real = a.real; b.imag = -a.imag; } /** Return the magnitude of a <code>Complex</code> number <code>real</code> + (<code>imag</code>)j */ public static float magnitude(float real, float imag) { return (float)Math.sqrt(real * real + imag * imag); } /** Return the phase of a <code>Complex</code> number <code>real</code> + (<code>imag</code>)j */ public static float phase(float real, float imag) { return (float)Math.atan2(imag, real); } /** Return the power of a <code>Complex</code> number <code>real</code> + (<code>imag</code>)j */ public static float power(float real, float imag) { return DBLOG * (float)Math.log(magnitude(real, imag)); } /** Place the real components of the first <code>n</code> elements of the array <code>complex</code> of <code>Complex</code> numbers into the given <code>reals</code> array */ public static void reals(int n, Complex[] complex, float[] reals) { for (int i = 0; i < n; ++i) { reals[i] = complex[i].real; } } /** Place the imaginary components of the first <code>n</code> elements of the array <code>complex</code> of <code>Complex</code> numbers into the given <code>imags</code> array */ public static void imaginaries(int n, Complex[] complex, float[] imags) { for (int i = 0; i < n; ++i) { imags[i] = complex[i].imag; } } /** Place the magnitudes of the first <code>n</code> elements of the array <code>complex</code> of <code>Complex</code> numbers into the given <code>mags</code> array */ public static void magnitudes(int n, Complex[] complex, float[] mags) { for (int i = 0; i < n; ++i) { mags[i] = complex[i].getMagnitude(); } } /** Place the powers (in dBs) of the first <code>n</code> elements of the array <code>complex</code> of <code>Complex</code> numbers into the given <code>powers</code> array */ public static void powers(int n, Complex[] complex, float[] powers) { for (int i = 0; i < n; ++i) { powers[i] = complex[i].getPower(); } } /** Place the phases (in radians) of the first <code>n</code> elements of the array <code>complex</code> of <code>Complex</code> numbers into the given <code>phases</code> array */ public static void phase(int n, Complex[] complex, float[] phases) { for (int i = 0; i < n; ++i) { phases[i] = complex[i].getPhase(); } } /** Return a clone of the <code>Complex</code> object */ public Object clone() { return new Complex(real, imag); } } //Primitives version, assumes zero mean global, passes max run length public int[] processSingleSeries(double[] d, int mrl){ return null; } public static void basicTest(){ //Test FFT //Series 30,-1,2,3,3,2,-1,-4 /*FFT Desired * 34 19.9289321881345-5.82842712474618i 32-2i 34.0710678118655+0.171572875253798i 34 34.0710678118655-0.171572875253815i 32+2i 19.9289321881345+5.8284271247462i FFT Achieved 34 0 19.928932 -5.8284273 32 -2 34.071068 0.17157269 34 0 34.071068 -0.17157269 32 2 19.928932 5.8284273 * */ //Test FFT with truncation System.out.println("Basic test of FFT"); System.out.println("Series: 30,-1,2,3,3,2,-1,-4"); System.out.println(" /*FFT Desired"+ " 34\n"+ "19.9289321881345-5.82842712474618i\n"+ "32-2i\n"+ "34.0710678118655+0.171572875253798i\n"+ "34\n"+ "34.0710678118655-0.171572875253815i\n"+ "32+2i\n"+ "19.9289321881345+5.8284271247462i\n"+ "FFT Achieved\n"+ "34 0\n"+ "19.928932 -5.8284273\n"+ "32 -2\n"+ "34.071068 0.17157269\n"+ "34 0\n"+ "34.071068 -0.17157269\n"+ "32 2\n"+ "19.928932 5.8284273"); double[] d={30,-1,2,3,3,2,-1,-4}; int n=8; Complex[] x= new Complex[n]; for(int i=0;i<n;i++) x[i]=new Complex(d[i], 0.0); for(int i=0;i<n;i++) System.out.println(x[i].getReal()+","+x[i].getImag()); System.out.println("Transformed"); FFT fft =new FFT(); fft.fft(x,x.length); for(int i=0;i<n;i++) System.out.println(x[i].getReal()+","+x[i].getImag()); fft.fft(x,x.length); } public static void paddingTest(){ /* Test to check it works correctly with padded series * //Series 30,-1,2,3,3,2,-1,-4,3 * //Padded series 30,-1,2,3,3,2,-1,-4,3,0,0,0,0, */ } public static void main(String[] args){ // basicTest(); FFT fft=new FFT(); int size=8; double[] testSeries=new double[size]; for(int i=0;i<size;i++){ testSeries[i]=Math.random(); } Complex[] dft=fft.dft(testSeries); Complex[] dft2=new Complex[size]; for(int i=0;i<size;i++) dft2[i]=new Complex(testSeries[i],0); Complex[] dft3=fft.dft(dft2); for(int i=0;i<size;i++) System.out.println(dft[i]+" ::: "+dft3[i]); System.exit(0); matlabComparison(); } /* * Comparison to running the Matlab script FFT_Testing * */ public static void matlabComparison(){ //MATLAB Output generated by // Power of 2: use FFT //Create set of instances with 16 attributes, with values // Case 1: All Zeros // Case 2: 1,2,...16 // Case 3: -8,-7, -6,...,0,1,...7 //Case 4: 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1 // Instances test1=ClassifierTools.loadData("C:\\Users\\ajb\\Dropbox\\TSC Problems\\TestData\\FFT_test1"); /* Instances test2=ClassifierTools.loadData("C:\\Users\\ajb\\Dropbox\\TSC Problems\\TestData\\FFT_test2"); Instances t2; try{ // t2=fft.process(test1); // System.out.println(" FFT ="+t2); fft.padSeries(true); t2=fft.process(test2); System.out.println(" FFT with padding="+t2); fft=new FFT(); fft.padSeries(false); t2=fft.process(test2); System.out.println(" FFT with truncation="+t2); fft=new FFT(); fft.useDFT(); t2=fft.process(test2); System.out.println(" DFT ="+t2); }catch(Exception e){ System.out.println(" Errrrrrr = "+e); e.printStackTrace(); System.exit(0); } */ // Not a power of 2: use padding // Not a power of 2: use truncate // Not a power of 2: use DFT } }
31,870
28.185897
146
java
tsml-java
tsml-java-master/src/main/java/tsml/filters/shapelet_filters/BalancedClassShapeletFilter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.filters.shapelet_filters; import java.io.OutputStreamWriter; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.Map; import java.util.TreeMap; import tsml.transformers.shapelet_tools.Shapelet; import weka.core.Instances; /** * * @author Aaron Bostrom * */ public class BalancedClassShapeletFilter extends ShapeletFilter implements Serializable { protected Map<Double, ArrayList<Shapelet>> kShapeletsMap; /** * protected method for extracting k shapelets. * * @param data the data that the shapelets will be taken from * @return an ArrayList of FullShapeletTransform objects in order of their * fitness (by infoGain, separationGap then shortest length) */ @Override public ArrayList<Shapelet> findBestKShapeletsCache(Instances data){ ArrayList<Shapelet> seriesShapelets; // temp store of all shapelets for each time series //construct a map for our K-shapelets lists, on for each classVal. if(kShapeletsMap == null){ kShapeletsMap = new TreeMap(); for (int i=0; i < data.numClasses(); i++){ kShapeletsMap.put((double)i, new ArrayList<>()); } } //found out how many we want in each sub list. int proportion = numShapelets/kShapeletsMap.keySet().size(); //for all time series outputPrint("Processing data for numShapelets "+numShapelets+ " with proportion = "+proportion); int dataSize = data.numInstances(); //for all possible time series. while(casesSoFar < dataSize) { //get the Shapelets list based on the classValue of our current time series. kShapelets = kShapeletsMap.get(data.get(casesSoFar).classValue()); //we only want to pass in the worstKShapelet if we've found K shapelets. but we only care about this class values worst one. //this is due to the way we represent each classes shapelets in the map. worstShapelet = kShapelets.size() == proportion ? kShapelets.get(kShapelets.size()-1) : null; //set the series we're working with. subseqDistance.setSeries(casesSoFar); //set the clas value of the series we're working with. classValue.setShapeletValue(data.get(casesSoFar)); seriesShapelets = searchFunction.searchForShapeletsInSeries(data.get(casesSoFar), this::checkCandidate); numShapeletsEvaluated+=seriesShapelets.size(); // outputPrint("BalancedClassST: data : " + casesSoFar+" has "+seriesShapelets.size()+" candidates"+ " cumulative early abandons "+numEarlyAbandons); if(seriesShapelets != null){ Collections.sort(seriesShapelets, shapeletComparator); if(isRemoveSelfSimilar()) seriesShapelets = removeSelfSimilar(seriesShapelets); kShapelets = combine(proportion, kShapelets, seriesShapelets); } //re-update the list because it's changed now. kShapeletsMap.put(data.get(casesSoFar).classValue(), kShapelets); casesSoFar++; createSerialFile(); } kShapelets = buildKShapeletsFromMap(kShapeletsMap); this.numShapelets = kShapelets.size(); if (recordShapelets) recordShapelets(kShapelets, this.ouputFileLocation); if (!supressOutput) writeShapelets(kShapelets, new OutputStreamWriter(System.out)); return kShapelets; } protected ArrayList<Shapelet> buildKShapeletsFromMap(Map<Double, ArrayList<Shapelet>> kShapeletsMap) { ArrayList<Shapelet> kShapelets = new ArrayList<>(); int numberOfClassVals = kShapeletsMap.keySet().size(); int proportion = numShapelets/numberOfClassVals; Iterator<Shapelet> it; //all lists should be sorted. //go through the map and get the sub portion of best shapelets for the final list. for(ArrayList<Shapelet> list : kShapeletsMap.values()) { int i=0; it = list.iterator(); while(it.hasNext() && i++ <= proportion) { kShapelets.add(it.next()); } } return kShapelets; } }
5,260
36.848921
160
java
tsml-java
tsml-java-master/src/main/java/tsml/filters/shapelet_filters/ShapeletFilter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.filters.shapelet_filters; import experiments.data.DatasetLoading; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.ObjectOutputStream; import java.io.OutputStreamWriter; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Scanner; import java.util.TreeMap; import java.util.logging.Level; import java.util.logging.Logger; import tsml.transformers.shapelet_tools.*; import tsml.transformers.shapelet_tools.distance_functions.ShapeletDistance; import utilities.ClassifierTools; import utilities.class_counts.ClassCounts; import weka.classifiers.meta.RotationForest; import weka.core.*; import weka.filters.SimpleBatchFilter; import tsml.transformers.shapelet_tools.class_value.BinaryClassValue; import tsml.transformers.shapelet_tools.class_value.NormalClassValue; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality.ShapeletQualityChoice; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearch; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearchOptions; import tsml.transformers.shapelet_tools.distance_functions.ImprovedOnlineShapeletDistance; import tsml.transformers.shapelet_tools.search_functions.ShapeletSearchFactory; import utilities.rescalers.SeriesRescaler; /** * NOTE: As shapelet extraction can be time consuming, there is an option to output shapelets * to a text file (Default location is in the root dir of the project, file name "defaultShapeletOutput.txt"). * * Default settings are TO NOT PRODUCE OUTPUT FILE - unless file name is changed, each successive filter will * overwrite the output (see "setLogOutputFile(String fileName)" to change file dir and name). * * To reconstruct a filter from this output, please see the method "createFilterFromFile(String fileName)". * * * * A filter to transform a dataset by k shapelets. Once built on a training set, * the filter can be used to transform subsequent datasets using the extracted * shapelets. * <p> * See <a * href="http://delivery.acm.org/10.1145/2340000/2339579/p289-lines.pdf?ip=139.222.14.198&acc=ACTIVE%20SERVICE&CFID=221649628&CFTOKEN=31860141&__acm__=1354814450_3dacfa9c5af84445ea2bfd7cc48180c8"> * Lines J., Davis, L., Hills, J., Bagnall, A.: A shapelet transform for time series * classification. In: Proc. 18th ACM SIGKDD (2012)</a> * * @author Aaron Bostrom */ public class ShapeletFilter extends SimpleBatchFilter implements Serializable,TechnicalInformationHandler{ //Global defaults. Max should be a lambda set to series length public final static int DEFAULT_NUMSHAPELETS = 500; public final static int DEFAULT_MINSHAPELETLENGTH = 3; public final static int DEFAULT_MAXSHAPELETLENGTH = 23; //Variables for experiments protected static long subseqDistOpCount; private boolean removeSelfSimilar = true; private boolean pruneMatchingShapelets; //this int is used to serialise our position when iterating through a dataset. public int casesSoFar; public boolean searchComplete=false; protected boolean supressOutput=true; // defaults to print in System.out AS WELL as file, set to true to stop printing to console protected int numShapelets; //The maximum number of shapelets in the transform. This is K and is different to the total number of shapelets to look for/looked for protected ArrayList<Shapelet> shapelets; protected String ouputFileLocation = "defaultShapeletOutput.txt"; // default store location protected boolean recordShapelets; // default action is to write an output file protected boolean roundRobin; protected long numShapeletsEvaluated=0;//This counts the total number of shapelets returned by searchForShapeletsInSeries. It does not include early abandoned shapelets protected long numEarlyAbandons=0;//This counts number of shapelets early abandoned protected transient ShapeletQuality quality; /*protected transient QualityMeasures.ShapeletQualityMeasure qualityMeasure; protected transient QualityMeasures.ShapeletQualityChoice qualityChoice; protected transient QualityBound.ShapeletQualityBound qualityBound;*/ protected boolean useCandidatePruning; protected boolean useRoundRobin; protected boolean useBalancedClasses; protected Comparator<Shapelet> shapeletComparator; //Made public for now to keep the filters public ShapeletDistance subseqDistance; public NormalClassValue classValue; protected ShapeletSearch searchFunction; protected String serialName; protected Shapelet worstShapelet; protected Instances inputData; protected ArrayList<Shapelet> kShapelets; protected long count; public void setSubSeqDistance(ShapeletDistance ssd) { subseqDistance = ssd; } public long getCount() { return count; } protected int candidatePruningStartPercentage; protected static final double ROUNDING_ERROR_CORRECTION = 0.000000000000001; protected int[] dataSourceIDs; /** * Default constructor; Quality measure defaults to information gain. */ public ShapeletFilter() { this(DEFAULT_NUMSHAPELETS, DEFAULT_MINSHAPELETLENGTH, DEFAULT_MAXSHAPELETLENGTH, ShapeletQualityChoice.INFORMATION_GAIN); } /** * Constructor for generating a shapelet transform from an ArrayList of * Shapelets. * * @param shapes */ public ShapeletFilter(ArrayList<Shapelet> shapes) { this(); this.shapelets = shapes; this.m_FirstBatchDone = true; this.numShapelets = shapelets.size(); } /** * Single param constructor: Quality measure defaults to information gain. * * @param k the number of shapelets to be generated */ public ShapeletFilter(int k) { this(k, DEFAULT_MINSHAPELETLENGTH, DEFAULT_MAXSHAPELETLENGTH, ShapeletQualityChoice.INFORMATION_GAIN); } /** * Full constructor to create a usable filter. Quality measure defaults to * information gain. * * @param k the number of shapelets to be generated * @param minShapeletLength minimum length of shapelets * @param maxShapeletLength maximum length of shapelets */ public ShapeletFilter(int k, int minShapeletLength, int maxShapeletLength) { this(k, minShapeletLength, maxShapeletLength, ShapeletQualityChoice.INFORMATION_GAIN); } /** * Full, exhaustive, constructor for a filter. Quality measure set via enum, * invalid selection defaults to information gain. * * @param k the number of shapelets to be generated * @param minShapeletLength minimum length of shapelets * @param maxShapeletLength maximum length of shapelets * @param qualityChoice the shapelet quality measure to be used with this * filter */ public ShapeletFilter(int k, int minShapeletLength, int maxShapeletLength, ShapeletQualityChoice qualityChoice) { this.numShapelets = k; this.shapelets = new ArrayList<>(); this.m_FirstBatchDone = false; this.useCandidatePruning = false; this.casesSoFar = 0; this.recordShapelets = true; // default action is to write an output file this.roundRobin = false; this.useRoundRobin = false; this.shapeletComparator = new Shapelet.LongOrder(); this.kShapelets = new ArrayList<>(); setQualityMeasure(qualityChoice); this.subseqDistance = new ShapeletDistance(); this.classValue = new NormalClassValue(); ShapeletSearchOptions sOp = new ShapeletSearchOptions.Builder().setMin(minShapeletLength).setMax(maxShapeletLength).build(); this.searchFunction = new ShapeletSearchFactory(sOp).getShapeletSearch(); } public long getNumShapeletsPerSeries(){ return searchFunction.getNumShapeletsPerSeries();} /** * Returns the set of shapelets for this transform as an ArrayList. * * @return An ArrayList of Shapelets representing the shapelets found for * this Shapelet Transform. */ public ArrayList<Shapelet> getShapelets() { return this.shapelets; } /** * Shouldnt really hav this method, but it is a convenience to allow refactoring * ClusteredShapeletTransform * @param s */ public void setShapelets(ArrayList<Shapelet> s) { this.shapelets=s; } /** * Set the transform to round robin the data or not. This transform defaults * round robin to false to keep the instances in the same order as the * original data. If round robin is set to true, the transformed data will * be reordered which can make it more difficult to use the ensemble. * * @param val */ public void setRoundRobin(boolean val) { this.roundRobin = val; } public void setUseBalancedClasses(boolean val) { this.useBalancedClasses = val; } /** * Supresses filter output to the console; useful when running timing * experiments. */ public void supressOutput() { this.supressOutput = true; } public void setSuppressOutput(boolean b) { this.supressOutput = !b; } public boolean getSuppressOutput() { return this.supressOutput; } /** * Use candidate pruning technique when checking candidate quality. This * speeds up the transform processing time. */ public void useCandidatePruning() { this.useCandidatePruning = true; this.candidatePruningStartPercentage = 10; } /** * Use candidate pruning technique when checking candidate quality. This * speeds up the transform processing time. * * @param percentage the percentage of data to be precocessed before pruning * is initiated. In most cases the higher the percentage the less effective * pruning becomes */ public void useCandidatePruning(int percentage) { this.useCandidatePruning = true; this.candidatePruningStartPercentage = percentage; } /** * Mutator method to set the number of shapelets to be stored by the filter. * * @param k the number of shapelets to be generated */ public void setNumberOfShapelets(int k) { this.numShapelets = k; } /** * set the number of shapelets in the transform * @return */ public int getNumberOfShapelets() { return numShapelets; } /** * Turns off log saving; useful for timing experiments where speed is * essential. */ public void turnOffLog() { this.recordShapelets = false; } /** * Set file path for the filter log. Filter log includes shapelet quality, * seriesId, startPosition, and content for each shapelet. * * @param fileName the updated file path of the filter log */ public void setLogOutputFile(String fileName) { this.recordShapelets = true; this.ouputFileLocation = fileName; } /** * Mutator method to set the minimum and maximum shapelet lengths for the * filter. * * @param min minimum length of shapelets * @param max maximum length of shapelets */ public void setShapeletMinAndMax(int min, int max) { searchFunction.setMinAndMax(min, max); } /** * Mutator method to set the quality measure used by the filter. As with * constructors, default selection is information gain unless another valid * selection is specified. * * @return */ public ShapeletQualityChoice getQualityMeasure() { return quality.getChoice(); } /** * * @param qualityChoice */ public void setQualityMeasure(ShapeletQualityChoice qualityChoice) { quality = new ShapeletQuality(qualityChoice); } /** * * @param rescaler */ public void setRescaler(SeriesRescaler rescaler){ if(subseqDistance != null) this.subseqDistance.seriesRescaler = rescaler; } /** * * @param classDist * @return */ protected void initQualityBound(ClassCounts classDist) { if (!useCandidatePruning) return; quality.initQualityBound(classDist, candidatePruningStartPercentage); } /** * * @param f */ public void setCandidatePruning(boolean f) { this.useCandidatePruning = f; this.candidatePruningStartPercentage = f ? 10 : 100; } /** * Sets the format of the filtered instances that are output. I.e. will * include k attributes each shapelet distance and a class value * * @param inputFormat the format of the input data * @return a new Instances object in the desired output format */ @Override protected Instances determineOutputFormat(Instances inputFormat) throws IllegalArgumentException { if (this.numShapelets < 1) { System.out.println(this.numShapelets); throw new IllegalArgumentException("ShapeletTransform not initialised correctly - please specify a value of k (this.numShapelets) that is greater than or equal to 1. It is currently set tp "+this.numShapelets); } //Set up instances size and format. //int length = this.numShapelets; int length = this.shapelets.size(); ArrayList<Attribute> atts = new ArrayList<>(); String name; for (int i = 0; i < length; i++) { name = "Shapelet_" + i; atts.add(new Attribute(name)); } if (inputFormat.classIndex() >= 0) { //Classification set, set class //Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); FastVector vals = new FastVector(target.numValues()); for (int i = 0; i < target.numValues(); i++) { vals.addElement(target.value(i)); } atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances("Shapelets" + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } return result; } protected void inputCheck(Instances dataInst) throws IllegalArgumentException { if (numShapelets < 1) { throw new IllegalArgumentException("Number of shapelets initialised incorrectly to "+numShapelets+" - please select value of k (Usage: setNumberOfShapelets"); } int maxPossibleLength; maxPossibleLength = dataInst.instance(0).numAttributes(); if (dataInst.classIndex() >= 0) { maxPossibleLength -= 1; } } /** * The main logic of the filter; when called for the first time, k shapelets * are extracted from the input Instances 'data'. The input 'data' is * transformed by the k shapelets, and the filtered data is returned as an * output. * <p> * If called multiple times, shapelet extraction DOES NOT take place again; * once k shapelets are established from the initial call to process(), the * k shapelets are used to transform subsequent Instances. * <p> * Intended use: * <p> * 1. Extract k shapelets from raw training data to build filter; * <p> * 2. Use the filter to transform the raw training data into transformed * training data; * <p> * 3. Use the filter to transform the raw testing data into transformed * testing data (e.g. filter never extracts shapelets from training data, * therefore avoiding bias); * <p> * 4. Build a classifier using transformed training data, perform * classification on transformed test data. * * @param data the input data to be transformed (and to find the shapelets * if this is the first run) * @return the transformed representation of data, according to the * distances from each instance to each of the k shapelets */ @Override public Instances process(Instances data) throws IllegalArgumentException { inputData = data; //check the input data is correct and assess whether the filter has been setup correctly. inputCheck(data); //checks if the shapelets haven't been found yet, finds them if it needs too. if (!m_FirstBatchDone && !searchComplete){ trainShapelets(data); searchComplete=true; //we log the count from the subsequence distance before we reset it in the transform. //we only care about the count from the train. What is it counting? count = subseqDistance.getCount(); } //build the transformed dataset with the shapelets we've found either on this data, or the previous training data return buildTansformedDataset(data); } protected void trainShapelets(Instances data) { //we might round robin the data in here. So we need to override the input data with the new ordering. inputData = initDataSource(data); searchFunction.setComparator(shapeletComparator); searchFunction.init(inputData); //setup subseqDistance subseqDistance.init(inputData); //setup classValue classValue.init(inputData); outputPrint("num shapelets before search "+numShapelets); shapelets = findBestKShapeletsCache(inputData); // get k shapelets m_FirstBatchDone = true; outputPrint(shapelets.size() + " Shapelets have been generated num shapelets now "+numShapelets); //we don't need to undo the roundRobin because we clone the data into a different order. } private Instances initDataSource(Instances data) { int dataSize = data.numInstances(); // shapelets discovery has not yet been carried out, so this must be training data dataSourceIDs = new int[dataSize]; Instances dataset = data; if (roundRobin) { //Reorder the data in round robin order dataset = roundRobinData(data, dataSourceIDs); } else { for (int i = 0; i < dataSize; i++) { dataSourceIDs[i] = i; } } return dataset; } //given a set of instances transform it by the internal shapelets. public Instances buildTansformedDataset(Instances data) { //Reorder the training data and reset the shapelet indexes Instances output = determineOutputFormat(data); //init out data for transforming. subseqDistance.init(inputData); //setup classsValue classValue.init(inputData); Shapelet s; // for each data, get distance to each shapelet and create new instance int size = shapelets.size(); int dataSize = data.numInstances(); //create our data instances for (int j = 0; j < dataSize; j++) { output.add(new DenseInstance(size + 1)); } double dist; for (int i = 0; i < size; i++) { s = shapelets.get(i); subseqDistance.setShapelet(s); for (int j = 0; j < dataSize; j++) { dist = subseqDistance.calculate(data.instance(j), j); output.instance(j).setValue(i, dist); } } //do the classValues. for (int j = 0; j < dataSize; j++) { //we always want to write the true ClassValue here. Irrelevant of binarised or not. output.instance(j).setValue(size, data.instance(j).classValue()); } return output; } /** * protected method for extracting k shapelets. * this method extracts shapelets series by series, using the searchFunction method searchForShapeletsInSeries, * which itself uses checkCandidate * 1. The search method determines the method of choosing shapelets. By default all are evaluated (ShapeletSearch) * or the alternative RandomSearch, which finds a fixed number of shapelets determined by the time contract. * 2. The qualityFunction assesses each candidate, and uses the worstShapelet (set in checkCandidate) to test for inclusion and * any lower bounding. I dont think it uses it to test for inclusion. * 3. self similar are removed by default, and the method combine is used to merge the current candidates and * the new ones * @param data the data that the shapelets will be taken from * @return an ArrayList of FullShapeletTransform objects in order of their * fitness (by infoGain, seperationGap then shortest length) */ public ArrayList<Shapelet> findBestKShapeletsCache(Instances data) { ArrayList<Shapelet> seriesShapelets; // temp store of all shapelets for each time series // temp store of all shapelets for each time series //for all time series outputPrint("Processing data: "); int dataSize = data.numInstances(); //for all possible time series. for(; casesSoFar < dataSize; casesSoFar++) { //set the worst Shapelet so far, as long as the shapelet set is full. worstShapelet = kShapelets.size() == numShapelets ? kShapelets.get(numShapelets - 1) : null; //set the series we're working with. subseqDistance.setSeries(casesSoFar); //set the class value of the series we're working with. classValue.setShapeletValue(data.get(casesSoFar)); seriesShapelets = searchFunction.searchForShapeletsInSeries(data.get(casesSoFar), this::checkCandidate); numShapeletsEvaluated+=seriesShapelets.size(); outputPrint("data : " + casesSoFar+" has "+seriesShapelets.size()+" candidates"+ " cumulative early abandons "+numEarlyAbandons+" worst so far ="+worstShapelet); if(seriesShapelets != null){ Collections.sort(seriesShapelets, shapeletComparator); if(isRemoveSelfSimilar()) seriesShapelets = removeSelfSimilar(seriesShapelets); kShapelets = combine(numShapelets, kShapelets, seriesShapelets); } createSerialFile(); } this.numShapelets = kShapelets.size(); if (recordShapelets) recordShapelets(kShapelets, this.ouputFileLocation); if (!supressOutput) writeShapelets(kShapelets, new OutputStreamWriter(System.out)); return kShapelets; } public void createSerialFile() { if(serialName == null) return; //Serialise the object. ObjectOutputStream out = null; try { out = new ObjectOutputStream(new FileOutputStream(serialName)); out.writeObject(this); } catch (IOException ex) { System.out.println("Failed to write " + ex); } finally{ if(out != null){ try { out.close(); } catch (IOException ex) { System.out.println("Failed to close " + ex); } } } } /** * protected method for extracting k shapelets. * * @param numShapelets * @param data the data that the shapelets will be taken from * @param minShapeletLength * @param maxShapeletLength * @return an ArrayList of FullShapeletTransform objects in order of their * fitness (by infoGain, seperationGap then shortest length) */ public ArrayList<Shapelet> findBestKShapeletsCache(int numShapelets, Instances data, int minShapeletLength, int maxShapeletLength) { this.numShapelets = numShapelets; //setup classsValue classValue.init(data); //setup subseqDistance subseqDistance.init(data); Instances newData=initDataSource(data); return findBestKShapeletsCache(newData); } /** * Private method to combine two ArrayList collections of * FullShapeletTransform objects. * * @param k the maximum number of shapelets to be returned after combining * the two lists * @param kBestSoFar the (up to) k best shapelets that have been observed so * far, passed in to combine with shapelets from a new series (sorted) * @param timeSeriesShapelets the shapelets taken from a new series that are * to be merged in descending order of fitness with the kBestSoFar * @return an ordered ArrayList of the best k (or less) (sorted) * FullShapeletTransform objects from the union of the input ArrayLists */ protected ArrayList<Shapelet> combine(int k, ArrayList<Shapelet> kBestSoFar, ArrayList<Shapelet> timeSeriesShapelets) { //both kBestSofar and timeSeries are sorted so we can exploit this. //maintain a pointer for each list. ArrayList<Shapelet> newBestSoFar = new ArrayList<>(); //best so far pointer int bsfPtr = 0; //new time seris pointer. int tssPtr = 0; for (int i = 0; i < k; i++) { Shapelet shapelet1 = null, shapelet2 = null; if (bsfPtr < kBestSoFar.size()) { shapelet1 = kBestSoFar.get(bsfPtr); } if (tssPtr < timeSeriesShapelets.size()) { shapelet2 = timeSeriesShapelets.get(tssPtr); } boolean shapelet1Null = shapelet1 == null; boolean shapelet2Null = shapelet2 == null; //both lists have been explored, but we have less than K elements. if (shapelet1Null && shapelet2Null) { break; } //one list is expired keep adding the other list until we reach K. if (shapelet1Null) { //even if the list has expired don't just add shapelets without considering they may be dupes. AddToBestSoFar(shapelet2, newBestSoFar); tssPtr++; continue; } //one list is expired keep adding the other list until we reach K. if (shapelet2Null) { //even if the list has expired don't just add shapelets without considering they may be dupes. AddToBestSoFar(shapelet1, newBestSoFar); bsfPtr++; continue; } //if both lists are fine then we need to compare which one to use. int compare = shapeletComparator.compare(shapelet1, shapelet2); if (compare < 0) { AddToBestSoFar(shapelet1, newBestSoFar); bsfPtr++; shapelet1 = null; } else{ AddToBestSoFar(shapelet2, newBestSoFar); tssPtr++; shapelet2 = null; } } return newBestSoFar; } private void AddToBestSoFar(Shapelet shapelet1, ArrayList<Shapelet> newBestSoFar) { boolean containsMatchingShapelet = false; if(pruneMatchingShapelets) containsMatchingShapelet = containsMatchingShapelet(shapelet1, newBestSoFar); if(!containsMatchingShapelet) newBestSoFar.add(shapelet1); } private boolean containsMatchingShapelet(Shapelet shapelet, ArrayList<Shapelet> newBestSoFar){ //we're going to be comparing all the shapelets we have to shapelet. this.subseqDistance.setShapelet(shapelet); //go backwards from where we're at until we stop matching. List is sorted. for(int index=newBestSoFar.size()-1;index>=0; index--){ Shapelet shape = newBestSoFar.get(index); int compare2 = shapeletComparator.compare(shape, shapelet); //if we are not simply equal to the shapelet that we're looking at then abandon ship. if(compare2 != 0){ return false; // stop evaluating. we no longer have any matches. } //if we're here then evaluate the shapelet distance. if they're equal in the comparator it means same length, same IG. double dist = this.subseqDistance.distanceToShapelet(shape); //if we hit a shapelet we nearly match with 1e-6 match with stop checking. if(isNearlyEqual(dist, 0.0)){ return true; //this means we should not add the shapelet. } } return false; } private static boolean isNearlyEqual(double a, double b){ double eps = 1e-6; return Math.abs(a - b) < eps; } /** * protected method to remove self-similar shapelets from an ArrayList (i.e. * if they come from the same series and have overlapping indicies) * * @param shapelets the input Shapelets to remove self similar * FullShapeletTransform objects from * @return a copy of the input ArrayList with self-similar shapelets removed */ protected static ArrayList<Shapelet> removeSelfSimilar(ArrayList<Shapelet> shapelets) { // return a new pruned array list - more efficient than removing // self-similar entries on the fly and constantly reindexing ArrayList<Shapelet> outputShapelets = new ArrayList<>(); int size = shapelets.size(); boolean[] selfSimilar = new boolean[size]; for (int i = 0; i < size; i++) { if (selfSimilar[i]) { continue; } outputShapelets.add(shapelets.get(i)); for (int j = i + 1; j < size; j++) { // no point recalc'ing if already self similar to something if ((!selfSimilar[j]) && selfSimilarity(shapelets.get(i), shapelets.get(j))) { selfSimilar[j] = true; } } } return outputShapelets; } protected Shapelet checkCandidate(Instance series, int start, int length, int dimension) { //init qualityBound. initQualityBound(classValue.getClassDistributions()); //Set bound of the bounding algorithm if (worstShapelet != null) { quality.setBsfQuality(worstShapelet.qualityValue); } //set the candidate. This is the instance, start and length. subseqDistance.setCandidate(series, start, length, dimension); // create orderline by looping through data set and calculating the subsequence // distance from candidate to all data, inserting in order. ArrayList<OrderLineObj> orderline = new ArrayList<>(); int dataSize = inputData.numInstances(); for (int i = 0; i < dataSize; i++) { //Check if it is possible to prune the candidate if (quality.pruneCandidate()) { numEarlyAbandons++; return null; } double distance = 0.0; //don't compare the shapelet to the the time series it came from because we know it's 0. if (i != casesSoFar) { distance = subseqDistance.calculate(inputData.instance(i), i); } //this could be binarised or normal. double classVal = classValue.getClassValue(inputData.instance(i)); // without early abandon, it is faster to just add and sort at the end orderline.add(new OrderLineObj(distance, classVal)); //Update qualityBound - presumably each bounding method for different quality measures will have a different update procedure. quality.updateOrderLine(orderline.get(orderline.size() - 1)); } Shapelet shapelet = new Shapelet(subseqDistance.getCandidate(), dataSourceIDs[casesSoFar], start, quality.getQualityMeasure()); //this class distribution could be binarised or normal. shapelet.calculateQuality(orderline, classValue.getClassDistributions()); shapelet.classValue = classValue.getShapeletValue(); //set classValue of shapelet. (interesing to know). shapelet.dimension = dimension; return shapelet; } /** * Load a set of Instances from an ARFF * * @param fileName the file name of the ARFF * @return a set of Instances from the ARFF */ public static Instances loadData(String fileName) { Instances data = null; try { FileReader r; r = new FileReader(fileName); data = new Instances(r); data.setClassIndex(data.numAttributes() - 1); } catch (IOException e) { System.out.println(" Error =" + e + " in method loadData"); } return data; } /** * A private method to assess the self similarity of two * FullShapeletTransform objects (i.e. whether they have overlapping * indicies and are taken from the same time series). * * @param shapelet the first FullShapeletTransform object (in practice, this * will be the dominant shapelet with quality >= candidate) * @param candidate the second FullShapeletTransform * @return */ private static boolean selfSimilarity(Shapelet shapelet, Shapelet candidate) { //check whether they're the same dimension or not. if (candidate.seriesId == shapelet.seriesId && candidate.dimension == shapelet.dimension) { if (candidate.startPos >= shapelet.startPos && candidate.startPos < shapelet.startPos + shapelet.getLength()) { //candidate starts within exisiting shapelet return true; } if (shapelet.startPos >= candidate.startPos && shapelet.startPos < candidate.startPos + candidate.getLength()) { return true; } } return false; } /** * A method to read in a FullShapeletTransform log file to reproduce a * FullShapeletTransform * <p> * NOTE: assumes shapelets from log are Z-NORMALISED * * @param fileName the name and path of the log file * @return a duplicate FullShapeletTransform to the object that created the * original log file * @throws Exception */ public static ShapeletFilter createFilterFromFile(String fileName) throws Exception { return createFilterFromFile(fileName, Integer.MAX_VALUE); } /** * A method to obtain time taken to find a single best shapelet in the data * set * * @param data the data set to be processed * @param minShapeletLength minimum shapelet length * @param maxShapeletLength maximum shapelet length * @return time in seconds to find the best shapelet */ public double timingForSingleShapelet(Instances data, int minShapeletLength, int maxShapeletLength) { data = roundRobinData(data, null); long startTime = System.nanoTime(); findBestKShapeletsCache(1, data, minShapeletLength, maxShapeletLength); long finishTime = System.nanoTime(); return (double) (finishTime - startTime) / 1000000000.0; } public void writeAdditionalData(String saveDirectory, int fold){ recordShapelets(this.kShapelets, saveDirectory + "_shapelets" + fold + ".csv"); } public void recordShapelets(ArrayList<Shapelet> kShapelets, String saveLocation) { //just in case the file doesn't exist or the directories. File file = new File(saveLocation); if (file.getParentFile() != null) { file.getParentFile().mkdirs(); } try (FileWriter out = new FileWriter(file)) { writeShapelets(kShapelets, out); } catch (IOException ex) { Logger.getLogger(ShapeletFilter.class.getName()).log(Level.SEVERE, null, ex); } } protected void writeShapelets(ArrayList<Shapelet> kShapelets, OutputStreamWriter out){ try { out.append("informationGain,seriesId,startPos,classVal,numChannels,dimension\n"); for (Shapelet kShapelet : kShapelets) { out.append(kShapelet.qualityValue + "," + kShapelet.seriesId + "," + kShapelet.startPos + "," + kShapelet.classValue + "," + kShapelet.getNumDimensions() + "," + kShapelet.dimension+"\n"); for (int i = 0; i < kShapelet.numDimensions; i++) { double[] shapeletContent = kShapelet.getContent().getShapeletContent(i); for (int j = 0; j < shapeletContent.length; j++) { out.append(shapeletContent[j] + ","); } out.append("\n"); } } } catch (IOException ex) { Logger.getLogger(ShapeletFilter.class.getName()).log(Level.SEVERE, null, ex); } } /** * Returns a list of the lengths of the shapelets found by this transform. * * @return An ArrayList of Integers representing the lengths of the * shapelets. */ public ArrayList<Integer> getShapeletLengths() { ArrayList<Integer> shapeletLengths = new ArrayList<>(); if (m_FirstBatchDone) { for (Shapelet s : this.shapelets) { shapeletLengths.add(s.getLength()); } } return shapeletLengths; } /** * A method to read in a FullShapeletTransform log file to reproduce a * FullShapeletTransform, * <p> * NOTE: assumes shapelets from log are Z-NORMALISED * * @param fileName the name and path of the log file * @param maxShapelets * @return a duplicate FullShapeletTransform to the object that created the * original log file * @throws Exception */ public static ShapeletFilter createFilterFromFile(String fileName, int maxShapelets) throws Exception { File input = new File(fileName); Scanner scan = new Scanner(input); scan.useDelimiter("\n"); ShapeletFilter sf = new ShapeletFilter(); ArrayList<Shapelet> shapelets = new ArrayList<>(); String shapeletContentString; String shapeletStatsString; ArrayList<Double> content; double[] contentArray; Scanner lineScan; Scanner statScan; double qualVal; int serID; int starPos; int shapeletCount = 0; while (shapeletCount < maxShapelets && scan.hasNext()) { shapeletStatsString = scan.next(); shapeletContentString = scan.next(); //Get the shapelet stats statScan = new Scanner(shapeletStatsString); statScan.useDelimiter(","); qualVal = Double.parseDouble(statScan.next().trim()); serID = Integer.parseInt(statScan.next().trim()); starPos = Integer.parseInt(statScan.next().trim()); //End of shapelet stats lineScan = new Scanner(shapeletContentString); lineScan.useDelimiter(","); content = new ArrayList<>(); while (lineScan.hasNext()) { String next = lineScan.next().trim(); if (!next.isEmpty()) { content.add(Double.parseDouble(next)); } } contentArray = new double[content.size()]; for (int i = 0; i < content.size(); i++) { contentArray[i] = content.get(i); } contentArray = sf.subseqDistance.seriesRescaler.rescaleSeries(contentArray, false); ShapeletCandidate cand = new ShapeletCandidate(); cand.setShapeletContent(contentArray); Shapelet s = new Shapelet(cand, qualVal, serID, starPos); shapelets.add(s); shapeletCount++; } sf.shapelets = shapelets; sf.m_FirstBatchDone = true; sf.numShapelets = shapelets.size(); sf.setShapeletMinAndMax(1, 1); return sf; } /** * A method to read in a shapelet csv file and return a shapelet arraylist. * @param f * @return a duplicate FullShapeletTransform to the object that created the * original log file * @throws java.io.FileNotFoundException */ public static ArrayList<Shapelet> readShapeletCSV(File f) throws FileNotFoundException{ ArrayList<Shapelet> shapelets = new ArrayList<>(); Scanner sc = new Scanner(f); System.out.println(sc.nextLine()); boolean readHeader = true; double quality = 0, classVal = 0; int series = 0, position = 0, dimension = 0, numDimensions = 1; ShapeletCandidate cand = null; int currentDim = 0; while(sc.hasNextLine()){ String line = sc.nextLine(); String[] cotentsAsString = line.split(","); if(readHeader){ quality = Double.parseDouble(cotentsAsString[0]); series = Integer.parseInt(cotentsAsString[1]); position = Integer.parseInt(cotentsAsString[2]); classVal = Double.parseDouble(cotentsAsString[3]); numDimensions = Integer.parseInt(cotentsAsString[4]); dimension = Integer.parseInt(cotentsAsString[5]); cand = new ShapeletCandidate(numDimensions); currentDim =0; readHeader = false; } else{ //read dims until we run out. double[] content = new double[cotentsAsString.length]; for (int i = 0; i < content.length; i++) { content[i] = Double.parseDouble(cotentsAsString[i]); } //set the content for the current channel. cand.setShapeletContent(currentDim, content); currentDim++; //if we've evald all the current dim data for a shapelet we can add it to the list, and move on with the next one. if(currentDim == numDimensions){ Shapelet shapelet = new Shapelet(cand, quality, series, position); shapelet.dimension = dimension; shapelet.classValue = classVal; shapelets.add(shapelet); readHeader = true; } } } return shapelets; } /** * Method to reorder the given Instances in round robin order * * @param data Instances to be reordered * @param sourcePos Pointer to array of ints, where old positions of * instances are to be stored. * @return Instances in round robin order */ public static Instances roundRobinData(Instances data, int[] sourcePos) { //Count number of classes TreeMap<Double, ArrayList<Instance>> instancesByClass = new TreeMap<>(); TreeMap<Double, ArrayList<Integer>> positionsByClass = new TreeMap<>(); NormalClassValue ncv = new NormalClassValue(); ncv.init(data); //Get class distributions ClassCounts classDistribution = ncv.getClassDistributions(); //Allocate arrays for instances of every class for (int i = 0; i < classDistribution.size(); i++) { int frequency = classDistribution.get(i); instancesByClass.put((double) i, new ArrayList<>(frequency)); positionsByClass.put((double) i, new ArrayList<>(frequency)); } int dataSize = data.numInstances(); //Split data according to their class memebership for (int i = 0; i < dataSize; i++) { Instance inst = data.instance(i); instancesByClass.get(ncv.getClassValue(inst)).add(inst); positionsByClass.get(ncv.getClassValue(inst)).add(i); } //Merge data into single list in round robin order Instances roundRobinData = new Instances(data, dataSize); for (int i = 0; i < dataSize;) { //Allocate arrays for instances of every class for (int j = 0; j < classDistribution.size(); j++) { ArrayList<Instance> currentList = instancesByClass.get((double) j); ArrayList<Integer> currentPositions = positionsByClass.get((double) j); if (!currentList.isEmpty()) { roundRobinData.add(currentList.remove(currentList.size() - 1)); if (sourcePos != null && sourcePos.length == dataSize) { sourcePos[i] = currentPositions.remove(currentPositions.size() - 1); } i++; } } } return roundRobinData; } public void outputPrint(String val) { if (!this.supressOutput) { System.out.println(val); } } @Override public String toString() { String str = "Shapelets: \n"; for (Shapelet s : shapelets) { str += s.toString() + "\n"; } return str; } public String getShapeletCounts() { return "numShapelets," + numShapelets + ",numShapeletsEvaluated," + numShapeletsEvaluated + ",numEarlyAbandons," + numEarlyAbandons; } //searchFunction public String getParameters(){ String str="minShapeletLength,"+searchFunction.getMin()+",maxShapeletLength,"+searchFunction.getMax()+",numShapelets,"+numShapelets +",numShapeletsEvaluated,"+numShapeletsEvaluated+",numEarlyAbandons,"+numEarlyAbandons + ",searchFunction,"+this.searchFunction.getSearchType() + ",qualityMeasure,"+this.quality.getQualityMeasure().getClass().getSimpleName() +",subseqDistance,"+this.subseqDistance.getClass().getSimpleName() +",roundrobin,"+roundRobin+",earlyAbandon,"+useCandidatePruning+",TransformClass,"+this.getClass().getSimpleName(); return str; } /** * * @param data * @param minShapeletLength * @param maxShapeletLength * @return * @throws Exception */ public long opCountForSingleShapelet(Instances data, int minShapeletLength, int maxShapeletLength) throws Exception { data = roundRobinData(data, null); subseqDistOpCount = 0; findBestKShapeletsCache(1, data, minShapeletLength, maxShapeletLength); return subseqDistOpCount; } public static void basicTest(){ String dataLocation = "E:\\Data\\TSCProblems2018\\"; String saveLocation = "C:\\Temp\\TSC\\"; final String dataset = "FordA"; final int fold = 1; final String filePath = dataLocation + File.separator + dataset + File.separator + dataset; Instances test, train; test = DatasetLoading.loadDataNullable(filePath + "_TEST"); train = DatasetLoading.loadDataNullable(filePath + "_TRAIN"); ShapeletSearchOptions searchOptions = new ShapeletSearchOptions.Builder() .setMin(3) .setMax(train.numAttributes()-1) .setSearchType(ShapeletSearch.SearchType.FULL) .build(); ShapeletTransformFactoryOptions options = new ShapeletTransformFactoryOptions.ShapeletTransformOptions() .setDistanceType(ShapeletDistance.DistanceType.IMPROVED_ONLINE) .setKShapelets(train.numInstances()*10) .useBinaryClassValue() .useClassBalancing() .useCandidatePruning() .useRoundRobin() .setSearchOptions(searchOptions) .build(); ShapeletFilter transform = new ShapeletTransformFactory(options).getFilter(); transform.setLogOutputFile(saveLocation+"fordAOutput.csv"); long startTime1 = System.nanoTime(); Instances tranTrain1 = transform.process(train); Instances tranTest1 = transform.process(test); long endTime1 = System.nanoTime(); System.out.println("Transform time = " + (endTime1-startTime1)); } public static void testFilterUsage(){ String filePath ="Z:\\ArchiveData\\Univariate_ts\\"; String problem="Chinatown"; Instances test, train; test = DatasetLoading.loadDataNullable(filePath + problem+"\\"+problem+"_TEST"); train = DatasetLoading.loadDataNullable(filePath + problem+"\\"+problem+"_TRAIN"); ShapeletFilter shapeletFilter = ShapeletTransformTimingUtilities.createTransformWithTimeLimit(train, 24); } public static void main(String[] args){ try { final String resampleLocation = "D:\\Research TSC\\Data\\TSCProblems2018"; final String dataset = "Yoga"; final int fold = 1; final String filePath = resampleLocation + File.separator + dataset + File.separator + dataset; Instances test, train; test = DatasetLoading.loadDataNullable(filePath + "_TEST"); train = DatasetLoading.loadDataNullable(filePath + "_TRAIN"); //use fold as the seed. //train = InstanceTools.subSample(train, 100, fold); ShapeletFilter transform = new ShapeletFilter(); transform.setRoundRobin(true); //construct shapelet classifiers. transform.setClassValue(new BinaryClassValue()); transform.setSubSeqDistance(new ImprovedOnlineShapeletDistance()); transform.setShapeletMinAndMax(3, train.numAttributes() - 1); transform.useCandidatePruning(); transform.setNumberOfShapelets(train.numInstances() * 10); transform.setQualityMeasure(ShapeletQualityChoice.INFORMATION_GAIN); transform.supressOutput(); transform.setPruneMatchingShapelets(true); long startTime = System.nanoTime(); Instances tranTrain = transform.process(train); Instances tranTest = transform.process(test); long endTime = System.nanoTime(); RotationForest rot1 = new RotationForest(); rot1.buildClassifier(tranTrain); double accuracy = ClassifierTools.accuracy(tranTest, rot1); System.out.println("Shapelet transform "+ accuracy + " time " + (endTime-startTime)); ShapeletSearchOptions searchOptions = new ShapeletSearchOptions.Builder() .setMin(3) .setMax(train.numAttributes()-1) .setSearchType(ShapeletSearch.SearchType.FULL) .build(); ShapeletTransformFactoryOptions options = new ShapeletTransformFactoryOptions.ShapeletTransformOptions() .setDistanceType(ShapeletDistance.DistanceType.IMPROVED_ONLINE) .setKShapelets(train.numInstances()*10) .useBinaryClassValue() .useClassBalancing() .useCandidatePruning() .useRoundRobin() .setSearchOptions(searchOptions) .build(); ShapeletFilter transform1 = new ShapeletTransformFactory(options).getFilter(); transform1.supressOutput(); long startTime1 = System.nanoTime(); Instances tranTrain1 = transform.process(train); Instances tranTest1 = transform.process(test); long endTime1 = System.nanoTime(); RotationForest rot2 = new RotationForest(); rot2.buildClassifier(tranTrain1); double accuracy1 = ClassifierTools.accuracy(tranTest1, rot2); System.out.println("Fast shapelet transform "+ accuracy1 + " time " + (endTime1-startTime1)); } catch (Exception ex) { Logger.getLogger(ShapeletFilter.class.getName()).log(Level.SEVERE, null, ex); } } /** * @return the removeSelfSimilar */ public boolean isRemoveSelfSimilar() { return removeSelfSimilar; } /** * @param removeSelfSimilar the removeSelfSimilar to set */ public void setRemoveSelfSimilar(boolean removeSelfSimilar) { this.removeSelfSimilar = removeSelfSimilar; } /** * @return the pruneMatchingShapelets */ public boolean isPruneMatchingShapelets() { return pruneMatchingShapelets; } /** * @param pruneMatchingShapelets the pruneMatchingShapelets to set */ public void setPruneMatchingShapelets(boolean pruneMatchingShapelets) { this.pruneMatchingShapelets = pruneMatchingShapelets; } public void setClassValue(NormalClassValue cv) { classValue = cv; } public void setSearchFunction(ShapeletSearch shapeletSearch) { searchFunction = shapeletSearch; } public ShapeletSearch getSearchFunction(){ return searchFunction; } public void setSerialName(String sName) { serialName = sName; } public void useSeparationGap() { shapeletComparator = new Shapelet.ReverseSeparationGap(); } public void setShapeletComparator(Comparator<Shapelet> comp){ shapeletComparator = comp; } public void setUseRoundRobin(boolean b) { useRoundRobin = b; } public ShapeletDistance getSubSequenceDistance(){ return subseqDistance; } public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE); result.setValue(TechnicalInformation.Field.AUTHOR, "authors"); result.setValue(TechnicalInformation.Field.YEAR, "put in Aarons paper"); result.setValue(TechnicalInformation.Field.TITLE, "stuff"); result.setValue(TechnicalInformation.Field.JOURNAL, "places"); result.setValue(TechnicalInformation.Field.VOLUME, "vol"); result.setValue(TechnicalInformation.Field.PAGES, "pages"); return result; } @Override public String globalInfo() { return "Shapelet Transform with settings ="; } }
56,259
38.096595
222
java
tsml-java
tsml-java-master/src/main/java/tsml/filters/shapelet_filters/cShapeletFilter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.filters.shapelet_filters; /** * This is a first go at introducing a simple time contract that uses time rather than an estimate of number of shapelets * and does it internally rather than externally. * * This is a hacked version, and the whole structure could be tidied up. One problem is that the factory decides on * BalancedClassShapeletTransform and ShapeletTransform based on the number of classes. This means if cShapeletTransform * extends BalancedClassShapeletTransform, it needs to internally revert to ShapeletTransform method. Essentially breaking * encapsulation by modelling the behaviour super.super.findBestKShapeletsCache (whichof course is not allowed). * It also makes configuring the builder/factory model harder. cShapeletTransform needs useBalanced to be set. * * this can be made better by absorbing the BalancedClassShapeletTransform into ShapeletTransform and just switching there * instead of here. * * in terms of the contract, there is also the issue of the time taken to perform the transform. This can be quite long for * big data */ import tsml.transformers.shapelet_tools.Shapelet; import weka.core.Instances; import java.io.OutputStreamWriter; import java.util.ArrayList; import java.util.Collections; import java.util.TreeMap; public class cShapeletFilter extends BalancedClassShapeletFilter { private int numSeriesToUse=0; private long contractTime=0; //nano seconds time. If set to zero everything reverts to BalancedClassShapeletTransform public void setContractTime(long c){ contractTime=c; } @Override public ArrayList<Shapelet> findBestKShapeletsCache(Instances data) { if (useBalancedClasses) return findBestKShapeletsCacheBalanced(data); else return findBestKShapeletsCacheOriginal(data); } /** * * @param data * @return */ private ArrayList<Shapelet> findBestKShapeletsCacheBalanced(Instances data) { if(contractTime==0) return super.findBestKShapeletsCache(data); long startTime=System.nanoTime(); long usedTime=0; int numSeriesToUse = data.numInstances(); //This can be used to reduce the number of series in favour of more System.out.println(" Set up in contract balanced in cST"); System.out.println("\t\t\t numShapelets "+numShapelets); System.out.println("\t\t\t Contract (secs) = "+contractTime/1000000000.0); System.out.println("Search function "+searchFunction.getSearchType()); System.out.println("Shapelets per series "+getNumShapeletsPerSeries()); ArrayList<Shapelet> seriesShapelets; // temp store of all shapelets for each time series //construct a map for our K-shapelets lists, on for each classVal. if(kShapeletsMap == null){ kShapeletsMap = new TreeMap(); for (int i=0; i < data.numClasses(); i++){ kShapeletsMap.put((double)i, new ArrayList<>()); } } //found out how many we want in each sub list. int proportion = numShapelets/kShapeletsMap.keySet().size(); //for all time series outputPrint("Processing data for numShapelets "+numShapelets+ " with proportion per class = "+proportion); outputPrint("in contract balanced: Contract (secs)"+contractTime/1000000000.0); //continue processing series until we run out of time while(casesSoFar < numSeriesToUse && usedTime<contractTime) { System.out.println(casesSoFar +" Cumulative time (secs) = "+usedTime/1000000000.0); //get the Shapelets list based on the classValue of our current time series. kShapelets = kShapeletsMap.get(data.get(casesSoFar).classValue()); //we only want to pass in the worstKShapelet if we've found K shapelets. but we only care about this class values worst one. //this is due to the way we represent each classes shapelets in the map. worstShapelet = kShapelets.size() == proportion ? kShapelets.get(kShapelets.size()-1) : null; //set the series we're working with. subseqDistance.setSeries(casesSoFar); //set the class value of the series we're working with. classValue.setShapeletValue(data.get(casesSoFar)); seriesShapelets = searchFunction.searchForShapeletsInSeries(data.get(casesSoFar), this::checkCandidate); //Here we can tweak the the number of shapelets to do per series, although it would be much easier with time. numShapeletsEvaluated+=seriesShapelets.size(); // outputPrint("BalancedClassST: data : " + casesSoFar+" has "+seriesShapelets.size()+" candidates"+ " cumulative early abandons "+numEarlyAbandons); if(seriesShapelets != null){ Collections.sort(seriesShapelets, shapeletComparator); if(isRemoveSelfSimilar()) seriesShapelets = removeSelfSimilar(seriesShapelets); kShapelets = combine(proportion, kShapelets, seriesShapelets); } //re-update the list because it's changed now. kShapeletsMap.put(data.get(casesSoFar).classValue(), kShapelets); casesSoFar++; createSerialFile(); usedTime=System.nanoTime()-startTime; //Logic is we have underestimated the contract so can run back through. If we over estimate it we will just stop. if(casesSoFar==numSeriesToUse-1 && !searchFunction.getSearchType().equals("FULL")) ///HORRIBLE! casesSoFar=0; } kShapelets = buildKShapeletsFromMap(kShapeletsMap); this.numShapelets = kShapelets.size(); if (recordShapelets) recordShapelets(kShapelets, this.ouputFileLocation); if (!supressOutput) writeShapelets(kShapelets, new OutputStreamWriter(System.out)); return kShapelets; } public ArrayList<Shapelet> findBestKShapeletsCacheOriginal(Instances data) { long time=contractTime; if(time==0) time= Long.MAX_VALUE; //If no contract, keep going until all series looked at long startTime=System.nanoTime(); long usedTime=0; int numSeriesToUse = data.numInstances(); //This can be used to reduce the number of series in favour of more ArrayList<Shapelet> seriesShapelets; // temp store of all shapelets for each time series // temp store of all shapelets for each time series //for all time series System.out.println(" Set up in contract original cache in cST"); System.out.println("\t\t\t numShapelets "+numShapelets); System.out.println("\t\t\t Contract (secs) = "+contractTime/1000000000.0); System.out.println("Search function "+searchFunction.getSearchType()); System.out.println("Shapelets per series "+getNumShapeletsPerSeries()); // System.out.println("\t\t\t number per series = "+contractTime/1000000000.0); // System.exit(0); int dataSize = data.numInstances(); //for all possible time series. for(; casesSoFar < numSeriesToUse && usedTime<time; casesSoFar++) { System.out.println(casesSoFar +" Cumulative time (secs) = "+usedTime/1000000000.0); //set the worst Shapelet so far, as long as the shapelet set is full. worstShapelet = kShapelets.size() == numShapelets ? kShapelets.get(numShapelets - 1) : null; //set the series we're working with. subseqDistance.setSeries(casesSoFar); //set the class value of the series we're working with. classValue.setShapeletValue(data.get(casesSoFar)); seriesShapelets = searchFunction.searchForShapeletsInSeries(data.get(casesSoFar), this::checkCandidate); numShapeletsEvaluated+=seriesShapelets.size(); outputPrint("data : " + casesSoFar+" has "+seriesShapelets.size()+" candidates"+ " cumulative early abandons "+numEarlyAbandons+" worst so far ="+worstShapelet); if(seriesShapelets != null){ Collections.sort(seriesShapelets, shapeletComparator); if(isRemoveSelfSimilar()) seriesShapelets = removeSelfSimilar(seriesShapelets); kShapelets = combine(numShapelets, kShapelets, seriesShapelets); } createSerialFile(); usedTime=System.nanoTime()-startTime; //Logic is we have underestimated the contract so can run back through. If we over estimate it we will just stop. if(casesSoFar==numSeriesToUse-1 && !searchFunction.getSearchType().equals("FULL")) ///HORRIBLE! casesSoFar=0; } this.numShapelets = kShapelets.size(); if (recordShapelets) recordShapelets(kShapelets, this.ouputFileLocation); if (!supressOutput) writeShapelets(kShapelets, new OutputStreamWriter(System.out)); System.out.println("Time used in find k shapelets = "+usedTime/1000000000.0+" leaving the method"); return kShapelets; } }
9,970
46.9375
173
java
tsml-java
tsml-java-master/src/main/java/tsml/filters/shapelet_filters/old_code/ApproximateShapeletFilter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.filters.shapelet_filters.old_code; import java.io.IOException; import java.util.ArrayList; import java.util.TreeMap; import java.util.logging.Level; import java.util.logging.Logger; import tsml.filters.shapelet_filters.ShapeletFilter; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality.ShapeletQualityChoice; import weka.filters.unsupervised.instance.Resample; /** * An approximate filter to transform a dataset by k shapelets. The approximation * is achieved by means of sampling the dataset according to supplied percentages * * @author Edgaras Baranauskas * * To possibly be depreciated */ public class ApproximateShapeletFilter extends ShapeletFilter { /** * Size of the subsample, as a percentage of the original set */ protected int seriesSampleLevel; /** * Size of approximated series, as a percentage of the original series */ protected int dataPointsSize; private ArrayList<Integer> sampledIDs; /** * Default constructor; Quality measure defaults to information gain. */ public ApproximateShapeletFilter(){ super(); seriesSampleLevel = 50; dataPointsSize = 50; } /** * Single param constructor: filter is unusable until min/max params are initialised. * Quality measure defaults to information gain. * @param k the number of shapelets to be generated */ public ApproximateShapeletFilter(int k){ super(k); seriesSampleLevel = 50; dataPointsSize = 50; } /** * Full constructor to create a usable filter. Quality measure defaults to information gain. * * @param k the number of shapelets to be generated * @param minShapeletLength minimum length of shapelets * @param maxShapeletLength maximum length of shapelets */ public ApproximateShapeletFilter(int k, int minShapeletLength, int maxShapeletLength){ super(k, minShapeletLength, maxShapeletLength); seriesSampleLevel = 50; dataPointsSize = 50; } /** * Full, exhaustive, constructor for a filter. Quality measure set via enum, invalid * selection defaults to information gain. * * @param k the number of shapelets to be generated * @param minShapeletLength minimum length of shapelets * @param maxShapeletLength maximum length of shapelets * @param qualityChoice the shapelet quality measure to be used with this filter */ public ApproximateShapeletFilter(int k, int minShapeletLength, int maxShapeletLength, ShapeletQualityChoice qualityChoice){ super(k, minShapeletLength, maxShapeletLength, qualityChoice); seriesSampleLevel = 50; dataPointsSize = 50; } /** * Method to set the sampling levels for series and data points. The default * percentages are 50, 50. * * @param series the percentage of series to be sampled * @param dataPoints the percentage of data points to be used in PAA series */ public void setSampleLevels(int series, int dataPoints) throws IOException{ if(series < 1 || series > 100){ throw new IOException ("Series sample level must be in range [1, 100]"); } if(dataPoints < 1 || dataPoints > 100){ throw new IOException ("Piece aggregate approximation must be in range [1, 100]"); } seriesSampleLevel = series; dataPointsSize = dataPoints; } @Override public Instances process(Instances dataInst) throws IllegalArgumentException { //check the input data is correct and assess whether the filter has been setup correctly. inputCheck(dataInst); //Approximate data Instances orderedInst = null; if(!this.m_FirstBatchDone){ sampledIDs = new ArrayList<Integer>(); dataInst = approximateInstances(dataInst); //Sort data in round robin order dataSourceIDs = new int[dataInst.numInstances()]; int[] roundRobidIDs = new int[dataInst.numInstances()]; orderedInst = roundRobinData(dataInst, roundRobidIDs); //Generate ID of the orignal source dataSourceIDs = new int[dataInst.numInstances()]; for(int i = 0; i < dataSourceIDs.length; i++){ dataSourceIDs[i] = sampledIDs.get(roundRobidIDs[i]); } }else{ dataInst = performPAA(dataInst); } if(!m_FirstBatchDone){ // shapelets discovery has not yet been caried out, so do so this.shapelets = findBestKShapeletsCache(orderedInst); // get k shapelets ATTENTION m_FirstBatchDone = true; if(!supressOutput){ System.out.println(shapelets.size()+" Shapelets have been generated"); } } return this.buildTansformedDataset(dataInst); } //Method to approximate the training data private Instances approximateInstances(Instances data){ Instances output = sampleInstances(data); output = performPAA(output); //Make shapelet length relative to that of the original //minShapeletLength = (output.numAttributes() - 1) * minShapeletLength / (data.numAttributes()-1); //maxShapeletLength = (output.numAttributes() - 1) * maxShapeletLength / (data.numAttributes()-1); return output; } //Method to sample instances private Instances sampleInstances(Instances data){ if(seriesSampleLevel == 100){ return data; }else{ Resample sampler = new Resample(); //Set up sampler try { sampler.setInputFormat(data); } catch (Exception ex) { Logger.getLogger(ApproximateShapeletFilter.class.getName()).log(Level.SEVERE, null, ex); } sampler.setNoReplacement(true); sampler.setSampleSizePercent(seriesSampleLevel); //Queue data for processing for(int i = 0; i < data.numInstances(); i++){ sampler.input(data.instance(i)); } sampler.batchFinished(); //Retrieve output Instances sampledData = new Instances(data, data.numInstances() * seriesSampleLevel / 100); boolean isFinished = false; while(!isFinished){ Instance toAdd = sampler.output(); if(toAdd == null){ isFinished = true; }else{ sampledData.add(toAdd); //Find source id for(int sIndex = 0; sIndex < data.numInstances(); sIndex++){ for(int attIndex = 0; attIndex < data.numAttributes(); attIndex++){ if(data.instance(sIndex).value(attIndex) != toAdd.value(attIndex)){ break; }else if(attIndex == data.numAttributes()-1){ sampledIDs.add(sIndex); } } } } } /* Used for testing TreeMap<Double, Integer> dist = FullShapeletTransform.getClassDistributions(data); TreeMap<Double, Integer> dist2 = FullShapeletTransform.getClassDistributions(sampledData); printTreeMap(dist); printTreeMap(dist2); System.out.println("Original size: " + data.numInstances()); System.out.println("Percentage: " + seriesSampleLevel); System.out.println("Sampled size: " + sampledData.numInstances()); */ return sampledData; } } //Method to perform Piecewise Aggregate Approximation for a given data private Instances performPAA(Instances data){ if(dataPointsSize == 100){ return data; }else{ int paaSize = (data.numAttributes()-1) * dataPointsSize / 100; //Determine output format Instances output = null; try { output = determinePAAOutputFormat(data, paaSize); } catch (Exception ex) { Logger.getLogger(ApproximateShapeletFilter.class.getName()).log(Level.SEVERE, null, ex); } double portionLength = ((double)(data.numAttributes() - 1)) / paaSize; //For each data, compute PAA components for(int i = 0; i < data.numInstances(); i++){ Instance currentInstance = data.instance(i); Instance toAdd = new DenseInstance(paaSize + 1); //Normalise series double[] series = currentInstance.toDoubleArray(); series = this.subseqDistance.seriesRescaler.rescaleSeries(series, true); double[] paaSublists = new double[paaSize]; int[] paaSublistsSizes = new int[paaSize]; double currentPortion = portionLength; int seriesIndex = 0; int subListIndex = 0; boolean advance = false; while(!advance){ if(currentPortion >= 0.999999999999){//Get rid of accumulated error paaSublistsSizes[subListIndex]++; paaSublists[subListIndex] += series[seriesIndex++]; currentPortion -= 1.0; if(currentPortion < 0.0){ currentPortion = 0.0; } }else{ if(seriesIndex < series.length-1){ //Required portion paaSublistsSizes[subListIndex]++; paaSublists[subListIndex++] += currentPortion * series[seriesIndex]; //Remaining portion currentPortion = 1.0 - currentPortion; paaSublistsSizes[subListIndex]++; paaSublists[subListIndex] += currentPortion * series[seriesIndex]; currentPortion = portionLength - currentPortion; }else{ advance = true; } seriesIndex++; } } for(int j = 0; j < paaSublists.length; j++){ toAdd.setValue(j, paaSublists[j]/paaSublistsSizes[j]); } toAdd.setValue(paaSize, currentInstance.classValue()); output.add(toAdd); } return output; } } //Method to determine output format of Piecewise Aggregate Approximation of the time series private Instances determinePAAOutputFormat(Instances inputFormat, int length) throws Exception{ ArrayList<Attribute> atts = new ArrayList<>(); String name; for(int i = 0; i < length; i++){ name = "PAA" + i; atts.add(new Attribute(name)); } if(inputFormat.classIndex() >= 0){ //Classification set, set class //Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for(int i = 0; i < target.numValues(); i++){ vals.add(target.value(i)); } atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances("PAA" + inputFormat.relationName(), atts, inputFormat.numInstances()); if(inputFormat.classIndex() >= 0){ result.setClassIndex(result.numAttributes() - 1); } return result; } //Method used for testing private void printTreeMap(TreeMap<Double, Integer> dist){ System.out.println("\nTREEMAP"); for(Double d: dist.keySet()){ System.out.println(d + ": " +dist.get(d)); } } //Method used for testing private double[] testPAA(double[] data) throws IOException{ ArrayList<Attribute> atts = new ArrayList<>(); String name; for(int i = 0; i < data.length-1; i++){ name = "Attribute" + i; atts.add(new Attribute(name)); } ArrayList<String> classValues = new ArrayList(); classValues.add("0"); classValues.add("1"); Attribute classAtt = new Attribute("Binary", classValues); atts.add(classAtt); //Create dataset Instances instances = new Instances("Test", atts, 1); instances.setClassIndex(data.length-1); //Create instance Instance inst = new DenseInstance(1, data); instances.add(inst); Instances output = performPAA(instances); return output.instance(0).toDoubleArray(); } /** * * @param args */ public static void main(String[] args){ //Create some time series for testing System.out.println("\n1.) Create series for testing: "); int seriesLength = 11; double[] dataEven = new double[seriesLength]; int min = -5; int max = 5; for(int j = 0; j < seriesLength; j++){ if(j == seriesLength-1){ dataEven[j] = 0; }else{ dataEven[j] = min + (int)(Math.random() * ((max - min) + 1)); } } seriesLength = 10; double[] dataUneven = new double[seriesLength]; for(int j = 0; j < seriesLength; j++){ if(j == seriesLength-1){ dataUneven[j] = 0; }else{ dataUneven[j] = min + (int)(Math.random() * ((max - min) + 1)); } } ApproximateShapeletFilter ast = new ApproximateShapeletFilter(); double[] out = null; try { ast.setSampleLevels(100, 50); out = ast.testPAA(dataEven); } catch (IOException ex) { Logger.getLogger(ApproximateShapeletFilter.class.getName()).log(Level.SEVERE, null, ex); } System.out.println("Even Test: "); } }
15,627
36.298329
127
java
tsml-java
tsml-java-master/src/main/java/tsml/filters/shapelet_filters/old_code/ClusteredShapeletTransform.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.filters.shapelet_filters.old_code; import java.io.FileWriter; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; import tsml.transformers.shapelet_tools.Shapelet; import tsml.filters.shapelet_filters.ShapeletFilter; import tsml.transformers.shapelet_tools.distance_functions.OnlineShapeletDistance; import weka.core.Attribute; import weka.core.Instances; import weka.filters.SimpleBatchFilter; import tsml.transformers.shapelet_tools.quality_measures.ShapeletQuality.ShapeletQualityChoice; /** * * copyright: Anthony Bagnall * A filter for using the shapelet transform with hierarchical * clustering of shapelets. * * Recommended usage: Build the shapelet transform outside of this class and pass in. * * FullShapeletTransform shape=new FullShapeletTransform(); * //Build and use shape here * * int nosClusters=10; * ClusteredShapeletTransform cShape=new ClusteredShapeletTransform(shape,nosClusters); * * it will work like this with any of the numerous constructors * ClusteredShapeletTransform cShape=new ClusteredShapeletTransform(); * Instances c=cShape.process(data) * * * @author Jon Hills - j.hills@uea.ac.uk */ public class ClusteredShapeletTransform extends SimpleBatchFilter{ ShapeletFilter st; protected double[][] distanceMap; protected ArrayList<int[]> clusterPairs; protected ArrayList<Shapelet> clusteredShapelets; protected ArrayList<Shapelet> allShapelets; protected int noClust; public static int DEFAULT_NUMCLUSTERS=1; /* * */ public ClusteredShapeletTransform(ShapeletFilter shapes, int n){ st=shapes; this.clusteredShapelets = new ArrayList<Shapelet>(); noClust=n; } /** * Fully specified constructor. * * @param k The number of shapelets to store. * @param minShapeletLength The minimum shapelet langth. * @param maxShapeletLength The maximum shapelet length. * @param qualityChoice The quality measure to use for assessing candidates. * @param noClust The number of clusters. */ public ClusteredShapeletTransform(int k, int minShapeletLength, int maxShapeletLength, ShapeletQualityChoice qualityChoice, int noClust) { st=new ShapeletFilter(k, minShapeletLength, maxShapeletLength, qualityChoice); this.noClust=noClust; this.clusteredShapelets = new ArrayList<Shapelet>(); } /** * Partially specified constructor. Defaults to clustering. If * clustering is used, defaults to one cluster, i.e., the best * shapelet only. * * @param k The number of shapelets to store. * @param minShapeletLength The minimum shapelet langth. * @param maxShapeletLength The maximum shapelet length. * @param qualityChoice The quality measure to use for assessing candidates. */ public ClusteredShapeletTransform(int k, int minShapeletLength, int maxShapeletLength, ShapeletQualityChoice qualityChoice) { st=new ShapeletFilter(k, minShapeletLength, maxShapeletLength, qualityChoice); st.setSubSeqDistance(new OnlineShapeletDistance()); this.noClust=DEFAULT_NUMCLUSTERS; this.clusteredShapelets = new ArrayList<Shapelet>(); } /** * Partially specified constructor. Defaults to Information Gain quality * measure. Defaults to no clustering. If clustering is used, defaults to * one cluster, i.e., the best shapelet only. * * @param k The number of shapelets to store. * @param minShapeletLength The minimum shapelet langth. * @param maxShapeletLength The maximum shapelet length. */ public ClusteredShapeletTransform(int k, int minShapeletLength, int maxShapeletLength) { st=new ShapeletFilter(k, minShapeletLength, maxShapeletLength); st.setSubSeqDistance(new OnlineShapeletDistance()); this.noClust=DEFAULT_NUMCLUSTERS; this.clusteredShapelets = new ArrayList<Shapelet>(); } /** * Partially specified constructor. Defaults to Information Gain quality * measure. * * @param k The number of shapelets to store. * @param minShapeletLength The minimum shapelet langth. * @param maxShapeletLength The maximum shapelet length. * @param noClust The number of clusters. */ public ClusteredShapeletTransform(int k, int minShapeletLength, int maxShapeletLength, int noClust) { st=new ShapeletFilter(k, minShapeletLength, maxShapeletLength); st.setSubSeqDistance(new OnlineShapeletDistance()); this.noClust=noClust; this.clusteredShapelets = new ArrayList<Shapelet>(); } /** * Partially specified constructor. Defaults to Information Gain quality * measure. Minimum and maximum shapelet lengths must be set before use. * Defaults to no clustering. Defaults to one cluster. * * @param k The number of shapelets to store. */ public ClusteredShapeletTransform(int k) { st=new ShapeletFilter(k); st.setSubSeqDistance(new OnlineShapeletDistance()); this.noClust=DEFAULT_NUMCLUSTERS; this.clusteredShapelets = new ArrayList<Shapelet>(); } /** * Partially specified constructor. Defaults to Information Gain quality * measure. Minimum and maximum shapelet lengths must be set before use. * * @param k The number of shapelets to store. * @param cluster Whether or not to use clustering. * @param noClust Then number of clusters. */ public ClusteredShapeletTransform(int k, boolean cluster, int noClust) { st=new ShapeletFilter(k); st.setSubSeqDistance(new OnlineShapeletDistance()); this.noClust=noClust; this.clusteredShapelets = new ArrayList<Shapelet>(); } /** * Empty constructor. Defaults to Information Gain quality measure, no * clustering, one cluster if clustering turned on. Shapelet lengths must * be set. K must be set. */ public ClusteredShapeletTransform() { st=new ShapeletFilter(); st.setSubSeqDistance(new OnlineShapeletDistance()); this.noClust=DEFAULT_NUMCLUSTERS; this.clusteredShapelets = new ArrayList<Shapelet>(); } /** * Transform datasets. If cluster=true, shapelets will be clustered into * noClust clusters prior to transformation. * * @param data - the input data to be transformed (and to find the shapelets if this is the first run) * @return the transformed Instances in terms of the distance from each shapelet * @throws Exception - if the number of shapelets or the length parameters specified are incorrect */ @Override public Instances process(Instances data) throws Exception{ int size=st.getNumberOfShapelets(); if(size < 1) throw new Exception("Number of shapelets initialised incorrectly - please select value of k (Usage: setNumberOfShapelets"); if(size<noClust) throw new Exception("Trying to produce more clusters than there are shapelets!"); // We only want the shapelets from st, so could optimize this to not work out the transform too. However, cleaner this way if(!st.isFirstBatchDone()) st.process(data); allShapelets=st.getShapelets(); clusterShapelets(); this.st.setShapelets(clusteredShapelets); return st.buildTansformedDataset(data); } /** * * @param inputFormat - the format of the input data * @return a new Instances object in the desired output format * @throws Exception - if all required attributes of the filter are not initialised correctly */ @Override protected Instances determineOutputFormat(Instances inputFormat) throws Exception{ int s=st.getNumberOfShapelets(); if(s < 1 || s<noClust){ throw new Exception("ShapeletFilter not initialised correctly - please specify a value of k that is greater than or equal to 1. You entered s="+s+" num clusters ="+noClust); } ArrayList<Attribute> atts = new ArrayList<>(); String name; for(int i = 0; i < noClust; i++){ name = "CShapelet_" + i; atts.add(new Attribute(name)); } Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList(target.numValues()); for(int i = 0; i < target.numValues(); i++){ vals.add(target.value(i)); } atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); Instances result = new Instances("CShapelets" + inputFormat.relationName(), atts, inputFormat.numInstances()); result.setClassIndex(result.numAttributes() - 1); return result; } /** * Creates a set of clustered shapelets with a noClust clusters. */ public void clusterShapelets() { // System.out.println("Clustering shapelets: "+this.noClust); double[][] shapeletSet = new double[allShapelets.size()][]; for(int i=0;i<shapeletSet.length;i++) { shapeletSet[i] = allShapelets.get(i).getUnivariateShapeletContent(); } distanceMap = getDistanceMap(shapeletSet); clusterPairs = new ArrayList(); this.clusteredShapelets.clear(); //Adds an int[] of each index to clusterPairs for(int i=0;i<distanceMap.length;i++) { int[] tmp = {i}; clusterPairs.add(tmp); } //Returns pair of indexes to clusterPairs/adjusted distanceMap //Is the index of the ArrayList ever a factor? It should be done with //just the stored indexes. int[] bestPair = findClosestPair(distanceMap); double[][] map = new double[2][]; while(clusterPairs.size()>noClust) { adjustClusterPairs(bestPair); map = adjustDistanceMap(); bestPair = findClosestPair(map); } //Select the best shapelet in each cluster //Make sure that the index stored in clusterPairs is the index of //the shapelet stored in the shapelet ArrayList. for(int i=0;i<clusterPairs.size();i++) { if(clusterPairs.get(i).length==1) clusteredShapelets.add(allShapelets.get(clusterPairs.get(i)[0])); else { double best = Double.MIN_VALUE; int position = 0; for(int j=0;j<clusterPairs.get(i).length;j++) { //Infogain will need to be changed to quality measure if(allShapelets.get(clusterPairs.get(i)[j]).qualityValue >best) { best = allShapelets.get(clusterPairs.get(i)[j]).qualityValue; position =j; } } clusteredShapelets.add(allShapelets.get(clusterPairs.get(i)[position])); //System.out.println("Added shapelet at position"+position); } } } /** * Finds the pair on a distance map with the least distance between them. * * @param map The current distance map * @return The indexes of the best-matching pair. */ private int[] findClosestPair(double[][] map) { int[] pair = new int[2]; double best = Double.MAX_VALUE; for(int i=0;i<map.length;i++) { for(int j=i+1;j<map[i].length;j++) { if(map[i][j]<best) { best = map[i][j]; pair[0] = i; pair[1] = j; } } } return pair; } /** * Creates complete distance map with identities and redundant information. * * @param shapeletSet An array of shapelet content double arrays. * @return The distance map for the shapelet set. */ private double[][] getDistanceMap(double[][] shapeletSet) { double[][] map = new double[shapeletSet.length][]; //Initialise double[] for(int i=0;i<shapeletSet.length;i++) { double[] tmp = new double[shapeletSet.length]; map[i] = tmp; } for(int i=0;i<shapeletSet.length;i++) { map[i][i] = 0; for(int j=i+1;j<shapeletSet.length;j++) { map[i][j] = findMinDistance(shapeletSet[i],shapeletSet[j]); map[j][i] = map[i][j]; } } return map; } /** * Returns the shapelet distance between two shapelets, that is, the * shortest distance between the shorter shapelet and the best-matching * subsequence of the longer shapelet. * * @param first One shapelet content array. * @param second The other shapelet content array. * @return The shapelet distance between the shapelets. */ private double findMinDistance(double[] first, double[] second) { double distance = 0; double bestDist = Double.MAX_VALUE; if (first.length == second.length){ bestDist = getDistance(first,second); } else{ if(first.length>second.length){ for(int i=0;i<(first.length-second.length)+1;i++){ double [] temp= Arrays.copyOfRange(first, i, i+second.length); distance = getDistance(temp,second); if(distance<bestDist) bestDist=distance; } } else{ for(int i=0;i<(second.length-first.length)+1;i++){ double [] temp= Arrays.copyOfRange(second, i, i+first.length); distance = getDistance(temp,first); if(distance<bestDist) bestDist=distance; } } } return bestDist; } /** * Returns squared Euclidean distance between two series of equal length. * * @param first The first series. * @param second The second series. * @return The Euclidean distance between the series. */ private double getDistance(double[] first, double[] second){ double distance = 0; for(int i=0;i<first.length;i++) distance = distance+ ((first[i]-second[i])*(first[i]-second[i])); return Math.sqrt(distance); } /** * Rebuilds distance map from scratch - not efficient. * * @return The adjusted distance map. */ private double[][] adjustDistanceMap() { double[][] map = new double[clusterPairs.size()][]; //Initialise distance map for(int i=0;i<map.length;i++) { double[] tmp=new double[clusterPairs.size()]; map[i] = tmp; } //Retrieve distances from original distance map. for(int i=0;i<clusterPairs.size();i++) { map[i][i]=0; for(int j=i+1;j<clusterPairs.size();j++) { map[i][j] = averageDistance(clusterPairs.get(i),clusterPairs.get(j)); map[j][i] = map[i][j]; } } return map; } /** * Returns the average distance for the distance map. * * @param first First cluster. * @param second Second cluster. * @return Average distance. */ private double averageDistance(int[] first,int[] second) { double dist = 0; for(int i=0;i<first.length;i++) { for(int j=0;j<second.length;j++) { dist = dist+distanceMap[first[i]][second[j]]; } } dist = dist/(first.length*second.length); return dist; } // /** * Takes a pair of indexes to the clusterPair ArrayList and * merges the entries. * * @param pair A pair of indexes to the clusterPair ArrayList. */ private void adjustClusterPairs(int[] pair) { int[] first = clusterPairs.get(pair[0]); int[] second = clusterPairs.get(pair[1]); int[] tmp = new int[first.length+second.length]; for(int i=0;i<tmp.length;i++) { if(i<first.length) { tmp[i]=first[i]; } else { tmp[i]=second[i-first.length]; } } clusterPairs.remove(pair[0]); clusterPairs.add(pair[0],tmp); clusterPairs.remove(pair[1]); } /** * Returns the noClust variable. * * @return noClust. */ public int getNoClust() { return this.noClust; } /** * Sets the number of clusters to use. * * @param num The number of clusters. */ public void setNoClust(int num){ this.noClust = num; } public void setShapeletTransform(ShapeletFilter s){ st=s; } public void outputLog(String outfile) throws Exception { PrintWriter cout = new PrintWriter( new FileWriter(outfile), true); for(int i=0;i<clusteredShapelets.size();i++) { // System.out.println("******************************************"); Shapelet s = clusteredShapelets.get(i); cout.println(s.qualityValue+","+s.seriesId+","+s.startPos); cout.flush(); double[] con = s.getUnivariateShapeletContent(); cout.print(con[0]); cout.flush(); for(int j=1;j<con.length;j++) { cout.print(","+con[j]); cout.flush(); } cout.println(); cout.flush(); } cout.close(); } @Override public String globalInfo() { throw new UnsupportedOperationException("Not supported yet."); } }
19,288
32.663176
185
java
tsml-java
tsml-java-master/src/main/java/tsml/filters/shapelet_filters/old_code/GraceShapeletFilter.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.filters.shapelet_filters.old_code; import experiments.data.DatasetLists; import experiments.data.DatasetLoading; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.Map; import java.util.TreeMap; import tsml.transformers.shapelet_tools.Shapelet; import tsml.filters.shapelet_filters.ShapeletFilter; import weka.core.Instances; /** * * @author Aaron */ public class GraceShapeletFilter extends ShapeletFilter { int currentSeries = 0; String seriesShapeletsFilePath; public void setSeries(int i) { currentSeries = i; } /** * The main logic of the filter; when called for the first time, k shapelets * are extracted from the input Instances 'data'. The input 'data' is * transformed by the k shapelets, and the filtered data is returned as an * output. * <p> * If called multiple times, shapelet extraction DOES NOT take place again; * once k shapelets are established from the initial call to process(), the * k shapelets are used to transform subsequent Instances. * <p> * Intended use: * <p> * 1. Extract k shapelets from raw training data to build filter; * <p> * 2. Use the filter to transform the raw training data into transformed * training data; * <p> * 3. Use the filter to transform the raw testing data into transformed * testing data (e.g. filter never extracts shapelets from training data, * therefore avoiding bias); * <p> * 4. Build a classifier using transformed training data, perform * classification on transformed test data. * * @param data the input data to be transformed (and to find the shapelets * if this is the first run) * @return the transformed representation of data, according to the * distances from each instance to each of the k shapelets */ @Override public Instances process(Instances data) throws IllegalArgumentException { //check the input data is correct and assess whether the filter has been setup correctly. inputCheck(data); //setup classsValue classValue.init(data); //setup subseqDistance subseqDistance.init(data); //checks if the shapelets haven't been found yet, finds them if it needs too. if (!m_FirstBatchDone) { trainShapelets(data); m_FirstBatchDone = false; //set the shapelets Trained to false, because we'll set it to true once all the sub code has been finished. outputPrint("Partially Built the shapelet Set"); return null; } //build the transformed dataset with the shapelets we've found either on this data, or the previous training data return buildTansformedDataset(data); } /** * protected method for extracting k shapelets. * * @param data the data that the shapelets will be taken from * @return an ArrayList of FullShapeletTransform objects in order of their * fitness (by infoGain, seperationGap then shortest length) */ @Override public ArrayList<Shapelet> findBestKShapeletsCache(Instances data) { ArrayList<Shapelet> kShapelets = new ArrayList<>(); ArrayList<Shapelet> seriesShapelets; // temp store of all shapelets for each time series int proportion = numShapelets/data.numClasses(); //for all time series outputPrint("Processing data: "); outputPrint("data : " + currentSeries); //we don't have a worst shapelet because we're doing a single scan. //set the series we're working with. subseqDistance.setSeries(currentSeries); //set the clas value of the series we're working with. classValue.setShapeletValue(data.get(currentSeries)); seriesShapelets = searchFunction.searchForShapeletsInSeries(data.get(casesSoFar), this::checkCandidate); Collections.sort(seriesShapelets, shapeletComparator); seriesShapelets = removeSelfSimilar(seriesShapelets); //by putting them into kShapelets we cut down on how many we seralise. //also use the proportion rather than num to be in line with Balanced. kShapelets = combine(proportion, kShapelets, seriesShapelets); createSerialFile(kShapelets); return kShapelets; } private void createSerialFile(ArrayList<Shapelet> shapelets){ String fileName = getSubShapeletFileName(currentSeries); //Serialise the object. ObjectOutputStream out = null; try { out = new ObjectOutputStream(new FileOutputStream(fileName)); out.writeObject(shapelets); } catch (IOException ex) { System.out.println("Failed to write " + ex); } finally{ if(out != null){ try { out.close(); } catch (IOException ex) { System.out.println("Failed to close " + ex); } } } } private String getSubShapeletFileName(int i) { File f = new File(serialName); String str = f.getName(); str = str.substring(0, str.lastIndexOf('.')); return str + "_" + i + ".ser"; } //we use the balanced class structure from BalancedClassShapeletTransform. public Instances processFromSubFile(Instances train) { File f = new File(this.ouputFileLocation); ArrayList<Shapelet> kShapelets = new ArrayList<>(); ArrayList<Shapelet> seriesShapelets; TreeMap<Double, ArrayList<Shapelet>> kShapeletsMap = new TreeMap<>(); for (int i=0; i < train.numClasses(); i++){ kShapeletsMap.put((double)i, new ArrayList<Shapelet>()); } //found out how many we want in each sub list. int proportion = numShapelets/kShapeletsMap.keySet().size(); for(int i=0; i<train.numInstances(); i++){ //get the proportion. kShapelets = kShapeletsMap.get(train.get(i).classValue()); seriesShapelets = readShapeletsFromFile(getSubShapeletFileName(i)); kShapelets = combine(proportion, kShapelets, seriesShapelets); //put the new proportion back. kShapeletsMap.put(train.get(i).classValue(), kShapelets); } kShapelets = buildKShapeletsFromMap(kShapeletsMap); this.numShapelets = kShapelets.size(); shapelets = kShapelets; m_FirstBatchDone = true; return buildTansformedDataset(train); } private ArrayList<Shapelet> buildKShapeletsFromMap(Map<Double, ArrayList<Shapelet>> kShapeletsMap) { ArrayList<Shapelet> kShapelets = new ArrayList<>(); int numberOfClassVals = kShapeletsMap.keySet().size(); int proportion = numShapelets/numberOfClassVals; Iterator<Shapelet> it; //all lists should be sorted. //go through the map and get the sub portion of best shapelets for the final list. for(ArrayList<Shapelet> list : kShapeletsMap.values()) { int i=0; it = list.iterator(); while(it.hasNext() && i++ <= proportion) { kShapelets.add(it.next()); } } return kShapelets; } public static ArrayList<Shapelet> readShapeletsFromFile(String shapeletLocation){ ArrayList<Shapelet> shapelets = null; try { ObjectInputStream ois = new ObjectInputStream(new FileInputStream(shapeletLocation)); shapelets = (ArrayList<Shapelet>) ois.readObject(); } catch (IOException | ClassNotFoundException ex) { System.out.println(ex); } return shapelets; } //memUsage is in MB. public static void buildGraceBSUB(String fileName, int numInstances, int fold, String queue, int memUsage) { try { //create the directory and the files. File f1 = new File(fileName+"GRACE.bsub"); f1.createNewFile(); //write the bsubs try (PrintWriter pw = new PrintWriter(f1)) { pw.println("#!/bin/csh"); pw.println("#BSUB -q " + queue); pw.println("#BSUB -J " + fileName+fold + "[1-" + numInstances + "]"); //+1 because we have to start at 1. pw.println("#BSUB -cwd \"/gpfs/sys/raj09hxu/GraceTransform/dist\""); pw.println("#BSUB -oo output/" + fileName+fold + "_%I.out"); pw.println("#BSUB -R \"rusage[mem=" + memUsage + "]\""); pw.println("#BSUB -M " + (memUsage)); //give ourselves a 20% wiggle room. pw.println("./etc/profile"); //pw.println("module add java/jdk/1.7.0_13"); pw.println("module add java/jdk1.8.0_51"); pw.println("java -jar -Xmx" + memUsage + "m TimeSeriesClassification.jar " + fileName + " 1 " + (fold+1) + " $LSB_JOBINDEX" ); } } catch (IOException ex) { System.out.println("Failed to create file " + ex); } } public static void main(String[] args) { } public static void test() { final String ucrLocation = "../../time-series-datasets/TSC Problems"; final String transformLocation = "../../"; String fileExtension = File.separator + DatasetLists.tscProblemsSmall[0] + File.separator + DatasetLists.tscProblemsSmall[0]; Instances train = DatasetLoading.loadDataNullable(ucrLocation + fileExtension + "_TRAIN"); Instances test = DatasetLoading.loadDataNullable(ucrLocation + fileExtension + "_TEST"); //first run: build the BSUB. //GraceFullShapeletTransform.buildGraceBSUB("../../"+DatasetLists.tscProblemsSmall[0], train.numInstances(), "raj09hxu", "SamplingExperiments/dist", "samplingExperiments", "long", 1000); GraceShapeletFilter st = new GraceShapeletFilter(); st.setNumberOfShapelets(train.numInstances()*10); st.setLogOutputFile(DatasetLists.tscProblemsSmall[0] + ".csv"); //set the params for your transform. length, shapelets etc. //second run: using the BSUB. for the cluster //st.setSeries(Integer.parseInt(args[0])-1); //st.process(train); //third run: for your own machine. this will build the datasets. String classifierDir = File.separator + st.getClass().getSimpleName() + fileExtension; String savePath = transformLocation + classifierDir; // LocalInfo.saveDataset(st.processFromSubFile(train), savePath + "_TRAIN"); // LocalInfo.saveDataset(st.process(test), savePath + "_TEST"); /**/ } }
11,979
36.554859
194
java
tsml-java
tsml-java-master/src/main/java/tsml/graphs/Pipeline.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.graphs; import java.util.ArrayList; import java.util.List; import tsml.classifiers.EnhancedAbstractClassifier; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Splitter; import tsml.data_containers.utilities.TimeSeriesSummaryStatistics; import tsml.transformers.Transformer; import weka.classifiers.AbstractClassifier; import weka.core.Instance; import weka.core.Instances; public class Pipeline extends EnhancedAbstractClassifier { List<Layer> layers; public Pipeline() { layers = new ArrayList<>(); } public void add(String name, Transformer transformer) { layers.add(new TransformerLayer(name, transformer)); } public void add(String name, EnhancedAbstractClassifier clf) { layers.add(new ClassifierLayer<EnhancedAbstractClassifier>(name, clf)); } public void add(String name, AbstractClassifier clf) { layers.add(new ClassifierLayer<EnhancedAbstractClassifier>(name, new EnhancedClassifierWrapper(clf))); } public void concat(String name, Layer... concats){ layers.add(new ConcatLayer(name, concats)); } public void concat(String name, int[][] inds, Layer... concats){ layers.add(new ConcatLayer(name, concats, inds)); } public void concat(String name, Pipeline... models){ layers.add(new ConcatLayer(name, models)); } public void concat(String name, int[][] inds, Pipeline... concats){ layers.add(new ConcatLayer(name, concats, inds)); } public void concat(String name, Transformer... concats){ layers.add(new ConcatLayer(name, concats)); } public void concat(String name, int[][] inds, Transformer... concats){ layers.add(new ConcatLayer(name, concats, inds)); } public void split(String name, Layer... splits){ layers.add(new SplitLayer(name, splits)); } public void split(String name,int[][] slicingIndexes, Layer... splits){ layers.add(new SplitLayer(name, splits, slicingIndexes)); } public void split(String name, Pipeline... models){ layers.add(new SplitLayer(name, models)); } public void split(String name,int[][] slicingIndexes, Pipeline... models){ layers.add(new SplitLayer(name, models, slicingIndexes)); } public void split(String name,int[][] slicingIndexes, Transformer... transforms){ layers.add(new SplitLayer(name, transforms, slicingIndexes)); } public void split(String name,Transformer... transforms){ layers.add(new SplitLayer(name, transforms)); } public void splitAndEnsemble(String name, Pipeline... models){ layers.add(new SplitLayer(name, models)); layers.add(new EnsembleLayer()); } @Override public void buildClassifier(TimeSeriesInstances trainData) throws Exception { //super.buildClassifier(trainData); TimeSeriesInstances data = trainData; for (Layer layer : layers) { data = layer.fit(data); } } public TimeSeriesInstances fit(TimeSeriesInstances trainData) throws Exception { TimeSeriesInstances data = trainData; for (Layer layer : layers) { data = layer.fit(data); } return data; } public TimeSeriesInstances predict(TimeSeriesInstances testData)throws Exception{ TimeSeriesInstances data = testData; for (Layer layer : layers) { data = layer.predict(data); } return data; } @Override public double[][] distributionForInstances(TimeSeriesInstances testData) throws Exception { return predict(testData).getHSliceArray(0); } public static abstract class Layer { String name; abstract TimeSeriesInstances fit(TimeSeriesInstances input) throws Exception; abstract TimeSeriesInstances predict(TimeSeriesInstances inst) throws Exception; } public static class ClassifierLayer<T extends EnhancedAbstractClassifier> extends Layer { EnhancedAbstractClassifier classifier; boolean fit; public ClassifierLayer(String name, T clf) { this.name = name; this.classifier = clf; } TimeSeriesInstances fit(TimeSeriesInstances input) throws Exception { classifier.buildClassifier(input); return predict(input); } @Override TimeSeriesInstances predict(TimeSeriesInstances data) throws Exception { return new TimeSeriesInstances(new double[][][]{classifier.distributionForInstances(data)}, data.getClassIndexes(), data.getClassLabels()); } } public static class PipelineLayer extends Layer { Pipeline pipeline; public PipelineLayer(String name, Pipeline pipeline) { this.name = name; this.pipeline = pipeline; } @Override TimeSeriesInstances fit(TimeSeriesInstances input) throws Exception{ return this.pipeline.fit(input); } @Override TimeSeriesInstances predict(TimeSeriesInstances inst) throws Exception { return this.pipeline.predict(inst); } } public static class TransformerLayer extends Layer { Transformer transformer; public TransformerLayer(String name, Transformer clf) { this.name = name; this.transformer = clf; } @Override TimeSeriesInstances fit(TimeSeriesInstances input) throws Exception{ return this.transformer.transform(input); } @Override TimeSeriesInstances predict(TimeSeriesInstances data) throws Exception { return this.transformer.transform(data); } } public abstract static class MultiLayer extends Layer { Layer[] layers; int[][] slicingIndexes; public MultiLayer(String name, Layer... layers) { this.name = name; this.layers = layers; generate_indexes(); } public MultiLayer(String name, Layer[] layers, int[][] indexes) { this.name = name; this.layers = layers; this.slicingIndexes = indexes; } public MultiLayer(String name, Transformer[] concats) { this.layers = new TransformerLayer[concats.length]; this.name = name; int i=0; for(Transformer t : concats) layers[i++] = new TransformerLayer(name + "_" + (i-1),t); generate_indexes(); } public MultiLayer(String name, Transformer[] concats, int[][] indexes) { this.slicingIndexes = indexes; this.layers = new TransformerLayer[concats.length]; int i=0; for(Transformer t : concats) layers[i++] = new TransformerLayer(name + "_" + (i-1),t); } public MultiLayer(String name, Pipeline[] models) { this.layers = new PipelineLayer[models.length]; this.name = name; int i=0; for(Pipeline t : models) layers[i++] = new PipelineLayer(name + "_" + (i-1),t); generate_indexes(); } public MultiLayer(String name, Pipeline[] models, int[][] indexes) { this.slicingIndexes = indexes; this.layers = new PipelineLayer[models.length]; this.name = name; int i=0; for(Pipeline t : models) layers[i++] = new PipelineLayer(name + "_" + (i-1),t); } private void generate_indexes() { slicingIndexes = new int[layers.length][1]; for(int i=0; i< slicingIndexes.length; i++) slicingIndexes[i] = new int[]{i}; } } public static class ConcatLayer extends MultiLayer{ public ConcatLayer(String name, Layer[] layers) { super(name, layers); } public ConcatLayer(String name, Pipeline[] models) { super(name, models); } public ConcatLayer(String name, Layer[] layers, int[][] indexes){ super(name, layers, indexes); } public ConcatLayer(String name, Transformer[] concats) { super(name, concats); } public ConcatLayer(String name, Pipeline[] models, int[][] indexes) { super(name, models, indexes); } public ConcatLayer(String name, Transformer[] transforms, int[][] inds) { super(name, transforms, inds); } @Override TimeSeriesInstances fit(TimeSeriesInstances inst) throws Exception{ List<TimeSeriesInstances> split = Splitter.splitTimeSeriesInstances(inst); if (layers.length != split.size()) { System.out.println("layers Split MisMatch"); } List<TimeSeriesInstances> t_split = new ArrayList<TimeSeriesInstances>(split.size()); for (int i = 0; i < layers.length; i++) { t_split.add(layers[i].fit(split.get(i))); } return Splitter.mergeTimeSeriesInstances(t_split); } @Override TimeSeriesInstances predict(TimeSeriesInstances inst) throws Exception { List<TimeSeriesInstances> split = Splitter.splitTimeSeriesInstances(inst); if (layers.length != split.size()) { System.out.println("layers Split MisMatch"); } List<TimeSeriesInstances> t_split = new ArrayList<TimeSeriesInstances>(split.size()); for (int i = 0; i < layers.length; i++) { t_split.add(layers[i].predict(split.get(i))); } return Splitter.mergeTimeSeriesInstances(t_split); } } public static class SplitLayer extends MultiLayer { public SplitLayer(String name, Layer[] layers) { super(name, layers); } public SplitLayer(String name, Layer[] splits, int[][] slicingIndexes) { super(name, splits, slicingIndexes); } public SplitLayer(String name, Pipeline[] models) { super(name, models); } public SplitLayer(String name, Pipeline[] models, int[][] slicingIndexes) { super(name, models, slicingIndexes); } public SplitLayer(String name, Transformer[] transforms, int[][] slicingIndexes) { super(name, transforms, slicingIndexes); } public SplitLayer(String name, Transformer[] transforms) { super(name, transforms); } @Override TimeSeriesInstances fit(TimeSeriesInstances inst) throws Exception{ List<TimeSeriesInstances> split = Splitter.splitTimeSeriesInstances(inst, slicingIndexes); List<TimeSeriesInstances> t_split = new ArrayList<TimeSeriesInstances>(layers.length); for (int i = 0; i < layers.length; i++) { System.out.println(split.get(i)); t_split.add(layers[i].fit(split.get(i))); } return Splitter.mergeTimeSeriesInstances(t_split); } @Override TimeSeriesInstances predict(TimeSeriesInstances inst) throws Exception { List<TimeSeriesInstances> split = Splitter.splitTimeSeriesInstances(inst, slicingIndexes); List<TimeSeriesInstances> t_split = new ArrayList<TimeSeriesInstances>(layers.length); for (int i = 0; i < layers.length; i++) { t_split.add(layers[i].predict(split.get(i))); } return Splitter.mergeTimeSeriesInstances(t_split); } } public static class EnsembleLayer extends Layer { @Override TimeSeriesInstances fit(TimeSeriesInstances input) { return predict(input); } @Override TimeSeriesInstances predict(TimeSeriesInstances data) { double[][][] output = new double[data.numInstances()][][]; for(int j=0; j< data.numInstances(); j++){ double[][] output1 = new double[1][data.get(j).getMaxLength()]; for(int i=0; i<data.get(j).getMaxLength(); i++){ output1[0][i] = TimeSeriesSummaryStatistics.mean(data.get(j).getVSliceArray(i)); } output[j] = output1; } return new TimeSeriesInstances(output, data.getClassIndexes(), data.getClassLabels()); } } public static class EnhancedClassifierWrapper extends EnhancedAbstractClassifier{ AbstractClassifier classifier; public EnhancedClassifierWrapper(AbstractClassifier clf){ classifier = clf; } @Override public void buildClassifier(Instances trainData) throws Exception { System.out.println(trainData); super.buildClassifier(trainData); classifier.buildClassifier(trainData); } @Override public double[] distributionForInstance(Instance instance) throws Exception { return classifier.distributionForInstance(instance); } } }
14,003
32.342857
151
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/ACF.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import experiments.SimulationExperiments; import experiments.data.DatasetLoading; import fileIO.OutFile; import tsml.data_containers.TSCapabilities; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import utilities.InstanceTools; import java.text.DecimalFormat; import java.util.ArrayList; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Utils; /** * <!-- globalinfo-start --> Implementation of autocorrelation function as a * Weka SimpleBatchFilter Series to series transform independent of class value * <!-- globalinfo-end --> <!-- options-start --> Valid options are: * <p/> * * <pre> * -L * set the max lag. * </pre> * * <!-- options-end --> * * * author: Anthony Bagnall circa 2008. Reviewed and tidied up 2019 This should * not really be a batch filter, as it is series to series, but it makes the use * case simpler. */ public class ACF implements Transformer { private static final long serialVersionUID = 1L; /** * If the series are normalised, the calculation can be done much more * efficiently */ private boolean normalized = false; // if true, assum zero mean and unit variance /** * Whatever the maxLag value, we always ignore at least the endTerms * correlations since they are based on too little data and hence unreliable */ private int endTerms = 4; /** * The maximum number of ACF terms considered. It must be less than * seriesLength-endTerms (checked in process() */ public static final int DEFAULT_MAXLAG = 100; private int maxLag = DEFAULT_MAXLAG; /** Currently assumed constant for all series. Have to, using instances* */ private int seriesLength; public void setMaxLag(int n) { maxLag = n; } public void setNormalized(boolean flag) { normalized = flag; } /** * Sets up the header info for the transformed series * * @param inputFormat * @return * @throws Exception */ @Override public Instances determineOutputFormat(Instances inputFormat) { // Check capabilities for the filter. Can only handle real valued, no missing. // getCapabilities().testWithFail(inputFormat); seriesLength = inputFormat.numAttributes(); if (inputFormat.classIndex() >= 0) seriesLength--; // Cannot include the final endTerms correlations, since they are based on too // little data and hence unreliable. if (maxLag > seriesLength - endTerms) maxLag = seriesLength - endTerms; if (maxLag < 0) maxLag = inputFormat.numAttributes() - 1; // Set up instances size and format. ArrayList<Attribute> atts = new ArrayList<>(); String name; for (int i = 1; i <= maxLag; i++) { name = "ACF_" + i; atts.add(new Attribute(name)); } if (inputFormat.classIndex() >= 0) { // Get the class values as an ArrayList Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) vals.add(target.value(i) + ""); atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances("ACF" + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } return result; } /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;num&gt; * max lag for the ACF function * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String maxLagString = Utils.getOption('L', options); if (maxLagString.length() != 0) this.maxLag = Integer.parseInt(maxLagString); else this.maxLag = DEFAULT_MAXLAG; } /** * ACF can only operate on real valued attributes with no missing values * * @return Capabilities object */ public TSCapabilities getCapabilities() { TSCapabilities result = new TSCapabilities(this); // result.disableAll(); // // attributes must be numeric // // Here add in relational when ready // result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES); // // result.enable(Capabilities.Capability.MISSING_VALUES); // // class // result.enableAllClasses(); // result.enable(Capabilities.Capability.MISSING_CLASS_VALUES); // result.enable(Capabilities.Capability.NO_CLASS); return result; } @Override public Instance transform(Instance inst) { double[] d = InstanceTools.ConvertInstanceToArrayRemovingClassValue(inst); // 2. Fit Autocorrelations, if not already set externally double[] autoCorr = fitAutoCorrelations(d); int length = autoCorr.length + (inst.classIndex() >= 0 ? 1 : 0); // ACF atts + // PACF atts + optional classvalue. // 6. Stuff back into new Instances. Instance out = new DenseInstance(length); // Set class value. if (inst.classIndex() >= 0) { out.setValue(length - 1, inst.classValue()); } for (int k = 0; k < autoCorr.length; k++) { out.setValue(k, autoCorr[k]); } return out; } /** * Note it is possible to do this with FFT in O(nlogn) BUT requires 2^n * attributes * * @param data * @return */ public double[] fitAutoCorrelations(double[] data) { double[] a = new double[maxLag]; if (!normalized) { for (int i = 1; i <= maxLag; i++) { double s1, s2, ss1, ss2, v1, v2; a[i - 1] = 0; s1 = s2 = ss1 = ss2 = 0; for (int j = 0; j < data.length - i; j++) { s1 += data[j]; ss1 += data[j] * data[j]; s2 += data[j + i]; ss2 += data[j + i] * data[j + i]; } s1 /= data.length - i; s2 /= data.length - i; for (int j = 0; j < data.length - i; j++) a[i - 1] += (data[j] - s1) * (data[j + i] - s2); a[i - 1] /= (data.length - i); v1 = ss1 / (data.length - i) - s1 * s1; v2 = ss2 / (data.length - i) - s2 * s2; if (v1 == 0 && v2 == 0)// Both zero variance, both must be 100% corr a[i - 1] = 1; else if (v1 == 0 || (v2 == 0))// One zero variance the other not a[i - 1] = 0; else a[i - 1] /= Math.sqrt(v1) * Math.sqrt(v2); } } else { for (int i = 1; i <= maxLag; i++) { a[i - 1] = 0; for (int j = 0; j < data.length - i; j++) a[i - 1] += data[j] * data[j + i]; a[i - 1] /= data.length; } } return a; } /** * Static variant, with no normalisation speed up. * * @param data * @param mLag * @return first mLag autocorrelations */ public static double[] fitAutoCorrelations(double[] data, int mLag) { return fitAutoCorrelations(data, mLag, false); } public static double[] fitAutoCorrelations(double[] data, int mLag, boolean normalised) { double[] a = new double[mLag]; if (!normalised) { double s1, s2, ss1, ss2, v1, v2; for (int i = 1; i <= mLag; i++) { a[i - 1] = 0; s1 = s2 = ss1 = ss2 = 0; for (int j = 0; j < data.length - i; j++) { s1 += data[j]; ss1 += data[j] * data[j]; s2 += data[j + i]; ss2 += data[j + i] * data[j + i]; } s1 /= data.length - i; s2 /= data.length - i; for (int j = 0; j < data.length - i; j++) a[i - 1] += (data[j] - s1) * (data[j + i] - s2); a[i - 1] /= (data.length - i); v1 = ss1 / (data.length - i) - s1 * s1; v2 = ss2 / (data.length - i) - s2 * s2; if (v1 != 0 && v2 != 0) a[i - 1] /= Math.sqrt(v1) * Math.sqrt(v2); } } else { for (int i = 1; i <= mLag; i++) { a[i - 1] = 0; for (int j = 0; j < data.length - i; j++) a[i - 1] += data[j] * data[j + i]; a[i - 1] /= data.length; } } return a; } public String getRevision() { return "Revision 2: 2019"; } /** * Below are Instances level functions to reshape the whole data set based on * characteristics of all series. */ /** * Pre: An Instances of ACF transformed data. Finds the indexes of the last * significant feature for all instances * * @param inst * @return array of integer indexes */ /** * These are data set level options */ public void setGlobalSigThresh(boolean flag) { useGlobalSigThreshold = flag; } int globalSignificantLag = maxLag; double globalSigThreshold; boolean useGlobalSigThreshold = true; double[] sigThreshold; int[] cutOffs; boolean globalTruncate = true; double alpha = 0.1; // Significant threshold for the truncation public int truncate(Instances d, boolean global) { globalTruncate = global; return truncate(d); } /** * Firstly, this method finds the first insignificant ACF term in every series * It then does does one of two things if globalTruncate is true, it finds the * max cut off point, and truncates all to thisd if not, it zeros all values * after the truncation point. * * @param d * @return largest cut off point */ public int truncate(Instances d) { // Truncate 1: find the first insignificant term for each series, then find the // highest, then remove all after this int largestPos = 0; int[] c = findAllCutOffs(d); if (globalTruncate) { for (int i = 1; i < c.length; i++) { if (c[largestPos] < c[i]) largestPos = i; } // This is to stop zero attributes! if (largestPos < d.numAttributes() - 2) largestPos++; truncate(d, largestPos); } else { for (int i = 0; i < d.numInstances(); i++) { zeroInstance(d.instance(i), c[i]); } } return largestPos; } /** * use * * @param inst * @return */ private int[] findAllCutOffs(Instances inst) { globalSigThreshold = 2 / Math.sqrt(seriesLength); sigThreshold = new double[inst.numAttributes() - 1]; cutOffs = new int[inst.numInstances()]; for (int i = 0; i < cutOffs.length; i++) cutOffs[i] = findSingleCutOff(inst.instance(i)); return cutOffs; } /** * PRE: An instance of ACF data. Performs a test of significance on the ACF * terms until it finds the first insignificant one. Will not work if the class * variable is not the last. * * @param inst * @return */ private int findSingleCutOff(Instance inst) { /** * Finds the threshold of the first non significant ACF term for all the series. */ double[] r = inst.toDoubleArray(); int count = 0; if (useGlobalSigThreshold) { for (int i = 0; i < inst.numAttributes(); i++) { if (i != inst.classIndex()) { sigThreshold[count] = globalSigThreshold; count++; } } } else { /// DO NOT USE, I'm not sure of the logic of this, need to look up the paper sigThreshold[0] = r[0] * r[0]; count = 1; for (int i = 1; i < inst.numAttributes(); i++) { if (i != inst.classIndex()) { sigThreshold[count] = sigThreshold[count - 1] + r[i] * r[i]; count++; } } for (int i = 0; i < sigThreshold.length; i++) { sigThreshold[i] = (1 + sigThreshold[i]) / seriesLength; sigThreshold[i] = 2 / Math.sqrt(sigThreshold[i]); } } for (int i = 0; i < sigThreshold.length; i++) if (Math.abs(r[i]) < sigThreshold[i]) return i; return sigThreshold.length - 1; } /** * Truncates all cases to having n attributes, i.e. removes from numAtts()-n to * numAtts()-1 * * @param d * @param n */ public void truncate(Instances d, int n) { int att = n; while (att < d.numAttributes()) { if (att == d.classIndex()) att++; else d.deleteAttributeAt(att); } } /** * Sets all values from p to end to zero * * @param ins * @param p */ private void zeroInstance(Instance ins, int p) { for (int i = p; i < ins.numAttributes(); i++) { if (i != ins.classIndex()) ins.setValue(i, 0); } } /** * /**Debug code to test ACF generation: */ public static void testTransform() { // Test File ACF: Four AR(1) series, first two \phi_0=0.5, seconde two // \phi_0=-0.5 Instances test = DatasetLoading.loadDataNullable("C:\\Research\\Data\\TestData\\ACFTest"); DecimalFormat df = new DecimalFormat("##.####"); ACF acf = new ACF(); acf.setMaxLag(test.numAttributes() - 10); Instances t2 = acf.transform(test); System.out.println(" Number of attributes =" + t2.numAttributes()); Instance ins = t2.instance(0); for (int i = 0; i < ins.numAttributes() && i < 10; i++) System.out.print(" " + df.format(ins.value(i))); OutFile of = new OutFile("C:\\Research\\Data\\TestData\\ACTTestOutput.csv"); of.writeString(t2.toString()); } public static void testTrunctate() { Instances test = DatasetLoading.loadDataNullable("C:\\Research\\Data\\TestData\\ACFTest"); DecimalFormat df = new DecimalFormat("##.####"); ACF acf = new ACF(); int[] cases = { 20, 20 }; int seriesLength = 200; acf.setMaxLag(test.numAttributes() - 10); acf.setMaxLag(seriesLength - 10); Instances all = SimulationExperiments.simulateData("AR", 1); System.out.println(" Number of attributes All =" + all.numAttributes()); Instances t2 = acf.transform(all); System.out.println(" Number of attributes =" + t2.numAttributes()); acf.truncate(t2); System.out.println(" Number of attributes =" + t2.numAttributes()); acf.useGlobalSigThreshold = true; t2 = acf.transform(all); acf.truncate(t2); System.out.println(" Number of attributes =" + t2.numAttributes()); } public static void main(String[] args) { double[] x = { 1, 2, 2, 3, 3, 1, 3, 4, 6, 6, 7, 8 }; double[] a = fitAutoCorrelations(x, 2); for (double d : a) System.out.println(d); System.exit(0); String problemPath = "E:/TSCProblems/"; String resultsPath = "E:/Temp/"; String datasetName = "ItalyPowerDemand"; Instances train = DatasetLoading .loadDataNullable("E:/TSCProblems/" + datasetName + "/" + datasetName + "_TRAIN"); ACF acf = new ACF(); Instances trans = acf.transform(train); OutFile out = new OutFile(resultsPath + datasetName + "ACF_JAVA.csv"); out.writeLine(datasetName); for (Instance ins : trans) { double[] d = ins.toDoubleArray(); for (int j = 0; j < d.length; j++) { if (j != trans.classIndex()) out.writeString(d[j] + ","); } out.writeString("\n"); } } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { //could do this across all dimensions. double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for(TimeSeries ts : inst){ out[i++] = this.fitAutoCorrelations(ts.toValueArray()); } //create a new output instance with the ACF data. return new TimeSeriesInstance(out, inst.getLabelIndex()); } }
18,072
32.971805
111
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/ACF_PACF.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.util.ArrayList; import utilities.InstanceTools; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** * <!-- globalinfo-start --> Implementation of partial autocorrelation function * as a Weka SimpleBatchFilter Series to series transform independent of class * value <!-- globalinfo-end --> <!-- options-start --> Valid options are: * <p/> * * <pre> * -L * set the max lag. * </pre> * * <!-- options-end --> * * * author: Anthony Bagnall 2019. Transform for RISE: It finds both the ACF and * PACF features and concatenates them. Will ditch AR and test if there is a * difference */ public class ACF_PACF extends PACF { @Override public Instances determineOutputFormat(Instances inputFormat) { // Check capabilities for the filter. Can only handle real valued, no missing. // getCapabilities().testWithFail(inputFormat); seriesLength = inputFormat.numAttributes(); if (inputFormat.classIndex() >= 0) seriesLength--; if (maxLag > seriesLength - endTerms) maxLag = seriesLength - endTerms; if (maxLag < 0) maxLag = inputFormat.numAttributes() - 1; // Set up instances size and format. ArrayList<Attribute> atts = new ArrayList<>(); String name; for (int i = 0; i < maxLag; i++) { name = "ACF_" + i; atts.add(new Attribute(name)); } for (int i = 0; i < maxLag; i++) { name = "PACF_" + i; atts.add(new Attribute(name)); } if (inputFormat.classIndex() >= 0) { // Classification set, set class // Get the class values Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) vals.add(target.value(i)); atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances("PACF" + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) result.setClassIndex(result.numAttributes() - 1); return result; } @Override public Instance transform(Instance inst) { double[] d = InstanceTools.ConvertInstanceToArrayRemovingClassValue(inst); // 2. Fit Autocorrelations, if not already set externally autos = ACF.fitAutoCorrelations(d, maxLag); // 3. Form Partials partials = formPartials(autos); // 5. Find parameters double[] pi = new double[maxLag]; for (int k = 0; k < maxLag; k++) { // Set NANs to zero if (Double.isNaN(partials[k][k]) || Double.isInfinite(partials[k][k])) { pi[k] = 0; } else pi[k] = partials[k][k]; } int length = autos.length + (pi.length + inst.classIndex() >= 0 ? 1 : 0); // ACF atts + PACF atts + optional // classvalue. // 6. Stuff back into new Instances. Instance in = new DenseInstance(length); // Set class value. if (inst.classIndex() >= 0) { in.setValue(length - 1, inst.classValue()); } int count = 0; for (int k = 0; k < autos.length; k++) { in.setValue(count, autos[k]); count++; } for (int k = 0; k < pi.length; k++) { in.setValue(count, pi[k]); count++; } return in; } public static double[][] formPartials(double[] r) { // Using the Durban-Leverson int p = r.length; double[][] phi = new double[p][p]; double numerator, denominator; phi[0][0] = r[0]; for (int k = 1; k < p; k++) { // Find diagonal k,k // Naive implementation, should be able to do with running sums numerator = r[k]; for (int i = 0; i < k; i++) numerator -= phi[i][k - 1] * r[k - 1 - i]; denominator = 1; for (int i = 0; i < k; i++) denominator -= phi[k - 1 - i][k - 1] * r[k - 1 - i]; phi[k][k] = numerator / denominator; // Find terms 1 to k-1 for (int i = 0; i < k; i++) phi[i][k] = phi[i][k - 1] - phi[k][k] * phi[k - 1 - i][k - 1]; } return phi; } public double[][] getPartials() { return partials; } public String getRevision() { return RevisionUtils.extract("$Revision: 2:2019 $"); } public void setOptions(String[] options) throws Exception { String maxLagString = Utils.getOption('L', options); if (maxLagString.length() != 0) this.maxLag = Integer.parseInt(maxLagString); else this.maxLag = DEFAULT_MAXLAG; } public static void main(String[] args) { } }
5,958
33.645349
116
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/ARMA.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Utils; import java.util.ArrayList; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import utilities.InstanceTools; /** * <!-- globalinfo-start --> Implementation of AR function as a Weka * SimpleBatchFilter Series to series transform independent of class value <!-- * globalinfo-end --> <!-- options-start --> Valid options are: * <p/> * * <pre> * -L * set the max lag. * </pre> * * <!-- options-end --> * * * author: Anthony Bagnall circa 2008. Reviewed and tidied up 2019 This should * not really be a batch filter, as it is series to series, but it makes the use * case simpler. */ public class ARMA implements Transformer { double[] ar; // Max number of AR terms to consider. public static int globalMaxLag = 25; // Defaults to 1/4 length of series public int maxLag = globalMaxLag; public boolean useAIC = false; public void setUseAIC(final boolean b) { useAIC = b; } public void setMaxLag(final int a) { maxLag = a; } @Override public Instances determineOutputFormat(final Instances inputFormat) { if (inputFormat.classIndex() >= 0) // Classification set, dont transform the target class! maxLag = (inputFormat.numAttributes() - 1 > maxLag) ? maxLag : inputFormat.numAttributes() - 1; else maxLag = (inputFormat.numAttributes() > maxLag) ? maxLag : inputFormat.numAttributes(); // Set up instances size and format. final ArrayList<Attribute> atts = new ArrayList<>(); String name; for (int i = 0; i < maxLag; i++) { name = "ARMA_" + i; atts.add(new Attribute(name)); } if (inputFormat.classIndex() >= 0) { // Classification set, set class // Get the class values as a fast vector final Attribute target = inputFormat.attribute(inputFormat.classIndex()); final ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) vals.add(target.value(i)); atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } final Instances result = new Instances("ARMA" + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) result.setClassIndex(result.numAttributes() - 1); return result; } @Override public Instance transform(Instance inst) { // 1. Get series double[] d = InstanceTools.ConvertInstanceToArrayRemovingClassValue(inst); // 2. Fit Autocorrelations final double[] pi = calculateValues(d); // 6. Stuff back into new Instances. int length = pi.length + (inst.classIndex() >= 0 ? 1 : 0); final Instance out = new DenseInstance(length); // Set class value. if (inst.classIndex() >= 0) out.setValue(length - 1, inst.classValue()); for (int k = 0; k < pi.length; k++) { out.setValue(k, pi[k]); } return out; } private double[] calculateValues(double[] d) { double[] autos = ACF.fitAutoCorrelations(d, maxLag); // 3. Form Partials double[][] partials = PACF.formPartials(autos); // 4. Find best AIC. Could also use BIC? int best = maxLag; if (useAIC) best = findBestAIC(autos, partials, maxLag, d); // 5. Find parameters final double[] pi = new double[maxLag]; for (int k = 0; k < best; k++) pi[k] = partials[k][best - 1]; return pi; } public static double[] fitAR(final double[] d) { // 2. Fit Autocorrelations final double[] autos = ACF.fitAutoCorrelations(d, globalMaxLag); // 3. Form Partials final double[][] partials = PACF.formPartials(autos); // 4. Find bet AIC. Could also use BIC? final int best = findBestAIC(autos, partials, globalMaxLag, d); // 5. Find parameters final double[] pi = new double[globalMaxLag]; for (int k = 0; k < best; k++) pi[k] = partials[k][best - 1]; return pi; } public static int findBestAIC(final double[] autoCorrelations, final double[][] partialCorrelations, final int maxLag, final double[] d) { // need the variance of the series double sigma2; final int n = d.length; double var = 0, mean = 0; for (int i = 0; i < d.length; i++) mean += d[i]; for (int i = 0; i < d.length; i++) var += (d[i] - mean) * (d[i] - mean); var /= (d.length - 1); double AIC = Double.MAX_VALUE; double bestAIC = Double.MAX_VALUE; int bestPos = 0; int i = 0; boolean found = false; while (i < maxLag && !found) { sigma2 = 1; for (int j = 0; j <= i; j++) { sigma2 -= autoCorrelations[j] * partialCorrelations[j][i]; // System.out.println("\tStep ="+j+" incremental sigma ="+sigma2); } sigma2 *= var; AIC = Math.log(sigma2); i++; AIC += ((double) 2 * (i + 1)) / n; // System.out.println("LAG ="+i+" final sigma = "+sigma2+" // log(sigma)="+Math.log(sigma2)+" AIC = "+AIC); if (AIC == Double.NaN) AIC = Double.MAX_VALUE; if (AIC < bestAIC) { bestAIC = AIC; bestPos = i; } else found = true; } return bestPos; } public void setOptions(final String[] options) throws Exception { final String maxLagString = Utils.getOption('L', options); if (maxLagString.length() != 0) this.maxLag = Integer.parseInt(maxLagString); else this.maxLag = globalMaxLag; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { // could do this across all dimensions. double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for (TimeSeries ts : inst) { out[i++] = calculateValues(ts.toValueArray()); } // create a new output instance with the ACF data. return new TimeSeriesInstance(out, inst.getLabelIndex()); } /* * This function verifies the output is the same as from R The R code to perform * the ACF, ARMA and PACF comparison is in .... * */ /* * public static void testTransform(String path){ Instances * data=ClassifierTools.loadData(path+"ACFTest"); ACF acf=new ACF(); * acf.setNormalized(false); PACF pacf=new PACF(); ARMA arma=new ARMA(); int * lag=10; acf.setMaxLag(lag); pacf.setMaxLag(lag); arma.setMaxLag(lag); * arma.setUseAIC(false); try{ Instances acfD=acf.process(data); Instances * pacfD=pacf.process(data); Instances armaD=arma.process(data); //Save first * case to file OutFile of=new OutFile(path+"ACFTest_JavaOutput.csv"); * of.writeLine(",acf1,pacf1,arma"); for(int i=0;i<acfD.numAttributes()-1;i++) * of.writeLine("ar"+(i+1)+","+acfD.instance(0).value(i)+","+pacfD.instance(0). * value(i)+","+armaD.instance(0).value(i)); double[][] * partials=pacf.getPartials(); of.writeLine("\n\n"); for(int * i=0;i<partials.length;i++){ of.writeString("\n"); for(int * j=0;j<partials[i].length;j++) of.writeString(partials[i][j]+","); * * } * * } catch(Exception e){ System.out.println("Exception caught, exit "+e); * e.printStackTrace(); System.exit(0); } } public static void main(String[] * args){ * * testTransform("C:\\Users\\ajb\\Dropbox\\TSC Problems\\TestData\\"); * System.exit(0); //Debug code to test. ARMA ar = new ARMA(); * ar.setUseAIC(false); * * //Generate a model double[][] paras={{0.5},{0.7}}; int n=100; int cases=1; // * double[][] * paras={{1.3532,0.4188,-1.2153,0.3091,0.1877,-0.0876,0.0075,0.0004}, // * {1.0524,0.9042,-1.2193,0.0312,0.263,-0.0567,-0.0019} }; * * //Generate a series * * //Fit and compare paramaters without AIC * * //Fit using AIC and compare again * * * } */ }
10,661
41.648
119
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/AudioFeatures.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import experiments.data.DatasetLists; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import utilities.InstanceTools; import org.apache.commons.math3.complex.Complex; import org.apache.commons.math3.transform.DftNormalization; import org.apache.commons.math3.transform.FastFourierTransformer; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import static experiments.data.DatasetLoading.loadDataNullable; import static org.apache.commons.math3.transform.TransformType.FORWARD; public class AudioFeatures implements Transformer { @Override public Instances determineOutputFormat(Instances inputFormat) { ArrayList<Attribute> atts = new ArrayList<>(); for (int i = 1; i < 7; i++) { atts.add(new Attribute("AF_att" + i)); } atts.add(inputFormat.classAttribute()); Instances transformHeader = new Instances("audioTransform", atts, inputFormat.numInstances()); transformHeader.setClassIndex(transformHeader.numAttributes() - 1); return transformHeader; } @Override public Instance transform(Instance inst) { return new DenseInstance(1, audioTransform(InstanceTools.ConvertInstanceToArrayRemovingClassValue(inst), inst.classValue())); } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { // could do this across all dimensions. double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for (TimeSeries ts : inst) { out[i++] = audioTransform(ts.toValueArray()); } // create a new output instance with the ACF data. return new TimeSeriesInstance(out, inst.getLabelIndex()); } private double[] audioTransform(double[] series, double classVal) { return audioTransform(series, classVal, true); } private double[] audioTransform(double[] series) { return audioTransform(series, Double.NaN, false); } private double[] audioTransform(double[] series, double classVal, boolean includeClassVal) { int fs = (series.length + 1) * 2; int nfft = fs; nfft = nearestPowerOF2(nfft); Complex[] complexData = new Complex[nfft]; double[] spectralMag = new double[nfft / 2]; FastFourierTransformer fft = new FastFourierTransformer(DftNormalization.STANDARD); for (int i = 0; i < complexData.length; i++) { complexData[i] = new Complex(0.0, 0.0); } double mean = 0; if (series.length < nfft) { for (int i = 0; i < series.length; i++) { mean += series[i]; } mean /= series.length; } for (int i = 0; i < nfft; i++) { if (i < series.length) complexData[i] = new Complex(series[i], 0); else complexData[i] = new Complex(mean, 0); } complexData = fft.transform(complexData, FORWARD); for (int i = 0; i < (nfft / 2); i++) { spectralMag[i] = complexData[i].abs(); } double[] output = new double[includeClassVal ? 7 : 6]; output[0] = spectralCentroid(spectralMag, nfft); output[1] = spectralSpread(spectralMag, nfft); output[2] = spectralFlatness(spectralMag); output[3] = spectralSkewness(spectralMag); output[4] = spectralKurtosis(spectralMag); output[5] = zeroCrossingRate(series, nfft); if(includeClassVal) output[6] = classVal; return output; } private static int nearestPowerOF2(int x) { float power = (float) (Math.log(x) / Math.log(2)); int m = (int) Math.ceil(power); return (int) Math.pow(2.0, (double) m); } private static double spectralCentroid(double[] spectralMag, int fs) { int nfft = fs; double numerator = 0.0; double denominator = 0.0; double binWidth = ((double) fs / (double) nfft); for (int i = 0; i < spectralMag.length; i++) { numerator += (((i * binWidth) + (binWidth / 2)) * Math.pow(spectralMag[i], 2)); denominator += Math.pow(spectralMag[i], 2); } return numerator / denominator; } private static double spectralSpread(double[] spectralMag, int fs) { int nfft = fs; double numerator = 0.0; double denominator = 0.0; double binWidth = ((double) fs / (double) nfft); double spectralCentroid = spectralCentroid(spectralMag, fs); for (int i = 0; i < spectralMag.length; i++) { numerator += (Math.pow(((i * binWidth) + (binWidth / 2)) - spectralCentroid, 2) * Math.pow(spectralMag[i], 2)); denominator += Math.pow(spectralMag[i], 2); } return Math.sqrt(numerator / denominator); } private static double spectralFlatness(double[] spectralMag) { int numBands = (10 > spectralMag.length ? spectralMag.length : 10); double numerator = 0.0; double denominator = 0.0; double spectralFlatness = 0.0; int bandWidth = spectralMag.length / numBands; for (int i = 0; i < numBands; i++) { double numeratorTmp = 0.0; double denominatorTmp = 0.0; for (int j = (i * bandWidth); j < (i * bandWidth) + bandWidth; j++) { numeratorTmp *= Math.pow(spectralMag[j], 2); denominatorTmp += Math.pow(spectralMag[j], 2); } numerator = Math.pow(numeratorTmp, (1 / bandWidth)); denominator = ((double) 1 / (double) bandWidth) * denominatorTmp; spectralFlatness += (numerator / denominator); } return spectralFlatness / numBands; } private static double spectralSkewness(double[] spectralMag) { double numerator = 0.0; double denominator = 0.0; double spectralMean = 0.0; for (int i = 0; i < spectralMag.length; i++) { spectralMean += spectralMag[i]; } spectralMean /= spectralMag.length; for (int i = 0; i < spectralMag.length; i++) { numerator += Math.pow((spectralMag[i] - spectralMean), 3); denominator += Math.pow((spectralMag[i] - spectralMean), 2); } numerator = ((double) 1 / (double) spectralMag.length) * numerator; denominator = Math.pow(((double) 1 / (double) spectralMag.length) * denominator, (3 / 2)); return numerator / denominator; } private static double spectralKurtosis(double[] spectralMag) { double numerator = 0.0; double denominator = 0.0; double spectralMean = 0.0; double fourthMoment = 0.0; for (int i = 0; i < spectralMag.length; i++) { spectralMean += spectralMag[i]; } spectralMean /= spectralMag.length; for (int i = 0; i < spectralMag.length; i++) { fourthMoment += Math.pow(spectralMag[i] - spectralMean, 4); } fourthMoment /= (spectralMag.length - 1); for (int i = 0; i < spectralMag.length; i++) { numerator += Math.pow((spectralMag[i] - spectralMean), 4); } denominator = fourthMoment * (spectralMag.length - 1); return (numerator / denominator) - 3; } private static double zeroCrossingRate(double[] series, int fs) { double zcr = 0.0; for (int i = 1; i < series.length; i++) { zcr += Math.abs((series[i] >= 0 ? 1 : 0) - (series[i - 1] >= 0 ? 1 : 0)); } return (fs / series.length) * zcr; } public static void main(String[] args) { AudioFeatures af = new AudioFeatures(); Instances[] data = new Instances[2]; data[0] = loadDataNullable("Z:/ArchiveData/Univariate_arff" + "/" + DatasetLists.tscProblems112[88] + "/" + DatasetLists.tscProblems112[88] + "_TRAIN"); data[0].addAll(loadDataNullable("Z:/ArchiveData/Univariate_arff" + "/" + DatasetLists.tscProblems112[88] + "/" + DatasetLists.tscProblems112[88] + "_TEST")); System.out.println(data[0].relationName()); try { data[1] = af.transform(data[0]); } catch (Exception e) { e.printStackTrace(); } // Before transform. System.out.println(data[0].get(0).toString()); // After transform. for (int i = 0; i < data[1].size(); i++) { System.out.println(data[1].get(i).toString()); } } }
9,452
35.639535
118
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/BagOfPatterns.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import experiments.data.DatasetLoading; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import utilities.NumUtils; import utilities.StatisticalUtilities; import java.io.File; import java.io.IOException; import java.util.*; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.SparseInstance; import weka.filters.SimpleBatchFilter; /** * Filter to transform time series into a bag of patterns representation. i.e * pass a sliding window over each series normalise and convert each window to * sax form build a histogram of non-trivially matching patterns * * Resulting in a bag (histogram) of patterns (SAX words) describing the * high-level structure of each timeseries * * Params: wordLength, alphabetSize, windowLength * * @author James */ public class BagOfPatterns implements TrainableTransformer { public TreeSet<String> dictionary; private final int windowSize; private final int numIntervals; private final int alphabetSize; private boolean useRealAttributes = true; private boolean numerosityReduction = false; // can expand to different types of nr // like those in senin implementation later, if wanted private List<String> alphabet = null; private boolean isFit; private static final long serialVersionUID = 1L; public BagOfPatterns() { this(4, 4, 10); } public BagOfPatterns(int PAA_intervalsPerWindow, int SAX_alphabetSize, int windowSize) { this.numIntervals = PAA_intervalsPerWindow; this.alphabetSize = SAX_alphabetSize; this.windowSize = windowSize; alphabet = SAX.getAlphabet(SAX_alphabetSize); } public int getWindowSize() { return numIntervals; } public int getNumIntervals() { return numIntervals; } public int getAlphabetSize() { return alphabetSize; } public void useRealValuedAttributes(boolean b) { useRealAttributes = b; } public void performNumerosityReduction(boolean b) { numerosityReduction = b; } private HashMap<String, Integer> buildHistogram(LinkedList<double[]> patterns) { HashMap<String, Integer> hist = new HashMap<>(); for (double[] pattern : patterns) { // convert to string String word = ""; for (int j = 0; j < pattern.length; ++j) word += (String) alphabet.get((int) pattern[j]); Integer val = hist.get(word); if (val == null) val = 0; hist.put(word, val + 1); } return hist; } public HashMap<String, Integer> buildBag(TimeSeries series) { LinkedList<double[]> patterns = new LinkedList<>(); double[] prevPattern = new double[windowSize]; for (int i = 0; i < windowSize; ++i) prevPattern[i] = -1; for (int windowStart = 0; windowStart + windowSize - 1 < series.getSeriesLength(); ++windowStart) { double[] pattern = series.getVSliceArray(windowStart, windowStart+windowSize); StatisticalUtilities.normInPlace(pattern); pattern = SAX.convertSequence(pattern, alphabetSize, numIntervals); if (!(numerosityReduction && identicalPattern(pattern, prevPattern))) patterns.add(pattern); prevPattern = pattern; } return buildHistogram(patterns); } public HashMap<String, Integer> buildBag(Instance series) { LinkedList<double[]> patterns = new LinkedList<>(); double[] prevPattern = new double[windowSize]; for (int i = 0; i < windowSize; ++i) prevPattern[i] = -1; for (int windowStart = 0; windowStart + windowSize - 1 < series.numAttributes() - 1; ++windowStart) { double[] pattern = slidingWindow(series, windowStart); StatisticalUtilities.normInPlace(pattern); pattern = SAX.convertSequence(pattern, alphabetSize, numIntervals); if (!(numerosityReduction && identicalPattern(pattern, prevPattern))) patterns.add(pattern); prevPattern = pattern; } return buildHistogram(patterns); } private double[] slidingWindow(Instance series, int windowStart) { double[] window = new double[windowSize]; // copy the elements windowStart to windowStart+windowSize from data into the // window for (int i = 0; i < windowSize; ++i) window[i] = series.value(i + windowStart); return window; } private boolean identicalPattern(double[] a, double[] b) { for (int i = 0; i < a.length; ++i) if (a[i] != b[i]) return false; return true; } public Instances determineOutputFormat(Instances inputFormat) { ArrayList<Attribute> attributes = new ArrayList<>(); for (String word : dictionary) attributes.add(new Attribute(word)); Instances result = new Instances("BagOfPatterns_" + inputFormat.relationName(), attributes, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) { // Classification set, set class // Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) { vals.add(target.value(i)); } result.insertAttributeAt(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals), result.numAttributes()); result.setClassIndex(result.numAttributes() - 1); } return result; } // TODO: Review, as we build bag twice on train. *Could* override fittransform // to avoid too much work. @Override public void fit(Instances data) { dictionary = new TreeSet<>(); for (Instance inst : data) { HashMap<String, Integer> bag = buildBag(inst); dictionary.addAll(bag.keySet()); } isFit = true; } @Override public void fit(TimeSeriesInstances data) { dictionary = new TreeSet<>(); for (TimeSeriesInstance inst : data) { for (TimeSeries ts : inst){ HashMap<String, Integer> bag = buildBag(ts); dictionary.addAll(bag.keySet()); } } isFit = true; } @Override public boolean isFit() { return isFit; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { //could do this across all dimensions. double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for(TimeSeries ts : inst){ out[i++] = bagToArray(buildBag(ts)); } //create a new output instance with the ACF data. return new TimeSeriesInstance(out, inst.getLabelIndex()); } @Override public Instance transform(Instance inst) { double[] bag = bagToArray(buildBag(inst)); int size = bag.length + (inst.classIndex() >= 0 ? 1 : 0); Instance out = new DenseInstance(size); for (int j = 0; j < bag.length; j++) out.setValue(j, bag[j]); if (inst.classIndex() >= 0) out.setValue(out.numAttributes() - 1, inst.classValue()); return out; } public double[] bagToArray(HashMap<String, Integer> bag) { double[] res = new double[dictionary.size()]; int j = 0; for (String word : dictionary) { Integer val = bag.get(word); if (val != null) res[j] += val; ++j; } return res; } public static void main(String[] args) throws IOException { String localPath="src/main/java/experiments/data/tsc/"; String datasetName = "Chinatown"; Instances train = DatasetLoading .loadData(localPath + datasetName + File.separator + datasetName + "_TRAIN.ts"); Instances test = DatasetLoading .loadData(localPath + datasetName + File.separator + datasetName + "_TEST.ts"); BagOfPatterns transform = new BagOfPatterns(); Instances out_train = transform.fitTransform(train); Instances out_test = transform.transform(test); System.out.println(out_train.toString()); System.out.println(out_test.toString()); } }
9,480
30.498339
113
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/BaseTrainableTransformer.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.Converter; import weka.core.Instance; import weka.core.Instances; public abstract class BaseTrainableTransformer implements TrainableTransformer { private boolean fitted; private String[] labels; @Override public void fit(final TimeSeriesInstances data) { fitted = true; labels = data.getClassLabels(); } public void reset() { fitted = false; } protected String[] getLabels() { return labels; } @Override public boolean isFit() { return fitted; } @Override public Instance transform(final Instance inst) { if(!isFit()) { throw new IllegalStateException("not fitted"); } return Converter.toArff(transform(Converter.fromArff(inst)), labels); } @Override public void fit(final Instances data) { fit(Converter.fromArff(data)); } }
1,761
29.37931
80
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/BinaryTransform.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; //import java.io.FileWriter; import java.util.ArrayList; import java.util.Collections; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import tsml.transformers.shapelet_tools.OrderLineObj; import utilities.class_counts.TreeSetClassCounts; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.filters.SimpleBatchFilter; /** * A binary filter that uses information gain quality measure to determine the * split point/ copyright: Anthony Bagnall * * @author Jon Hills j.hills@uea.ac.uk */ public class BinaryTransform implements TrainableTransformer { private boolean isFit = false; private double[] splits; @Override public Instances determineOutputFormat(Instances inputFormat) { int length = inputFormat.numAttributes(); if (inputFormat.classIndex() >= 0) length--; // Set up instances size and format. ArrayList<Attribute> atts = new ArrayList<>(); ArrayList<String> attributeValues = new ArrayList<>(); attributeValues.add("0"); attributeValues.add("1"); String name; for (int i = 0; i < length; i++) { name = "Binary_" + i; atts.add(new Attribute(name, attributeValues)); } if (inputFormat.classIndex() >= 0) { // Classification set, set class // Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList<>(); for (int i = 0; i < target.numValues(); i++) vals.add(target.value(i)); atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances("Binary" + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } return result; } public double findSplitValue(Instances data, double[] vals, double[] classes) { // return 1; // Put into an order list ArrayList<OrderLineObj> list = new ArrayList<OrderLineObj>(); for (int i = 0; i < vals.length; i++) list.add(new OrderLineObj(vals[i], classes[i])); // Sort the vals int[] tree = new TreeSetClassCounts(data).values().stream().mapToInt(e -> e.intValue()).toArray(); Collections.sort(list); return infoGainThreshold(list, tree); } private static double entropy(int[] classDistributions) { if (classDistributions.length == 1) { return 0; } double thisPart; double toAdd; int total = 0; for (int i : classDistributions) { total += i; } // to avoid NaN calculations, the individual parts of the entropy are calculated // and summed. // i.e. if there is 0 of a class, then that part would calculate as NaN, but // this can be caught and // set to 0. ArrayList<Double> entropyParts = new ArrayList<Double>(); for (int i : classDistributions) { thisPart = (double) i / total; toAdd = -thisPart * Math.log10(thisPart) / Math.log10(2); if (Double.isNaN(toAdd)) toAdd = 0; entropyParts.add(toAdd); } double entropy = 0; for (int i = 0; i < entropyParts.size(); i++) { entropy += entropyParts.get(i); } return entropy; } public static double infoGainThreshold(ArrayList<OrderLineObj> orderline, int[] classDistribution) { // for each split point, starting between 0 and 1, ending between end-1 and end // addition: track the last threshold that was used, don't bother if it's the // same as the last one double lastDist = orderline.get(0).getDistance(); // must be initialised as not visited(no point breaking before // any data!) double thisDist = -1; double bsfGain = -1; double threshold = -1; // check that there is actually a split point // for example, if all for (int i = 1; i < orderline.size(); i++) { thisDist = orderline.get(i).getDistance(); if (i == 1 || thisDist != lastDist) { // check that threshold has moved(no point in sampling identical // thresholds)- special case - if 0 and 1 are the same dist // count class instances below and above threshold int[] lessClasses = new int[classDistribution.length]; int[] greaterClasses = new int[classDistribution.length]; int sumOfLessClasses = 0; int sumOfGreaterClasses = 0; // visit those below threshold for (int j = 0; j < i; j++) { lessClasses[(int)orderline.get(j).getClassVal()]++; sumOfLessClasses++; } for (int j = i; j < orderline.size(); j++) { greaterClasses[(int)orderline.get(j).getClassVal()]++; sumOfGreaterClasses++; } int sumOfAllClasses = sumOfLessClasses + sumOfGreaterClasses; double parentEntropy = entropy(classDistribution); // calculate the info gain below the threshold double lessFrac = (double) sumOfLessClasses / sumOfAllClasses; double entropyLess = entropy(lessClasses); // calculate the info gain above the threshold double greaterFrac = (double) sumOfGreaterClasses / sumOfAllClasses; double entropyGreater = entropy(greaterClasses); double gain = parentEntropy - lessFrac * entropyLess - greaterFrac * entropyGreater; // System.out.println(parentEntropy+" - "+lessFrac+" * "+entropyLess+" - // "+greaterFrac+" * "+entropyGreater); // System.out.println("gain calc:"+gain); if (gain > bsfGain) { bsfGain = gain; threshold = (thisDist - lastDist) / 2 + lastDist; } } lastDist = thisDist; } return threshold; } public double findSplitValue(TimeSeriesInstances data, double[] vals, double[] classes) { // return 1; // Put into an order list ArrayList<OrderLineObj> list = new ArrayList<OrderLineObj>(); for (int i = 0; i < vals.length; i++) list.add(new OrderLineObj(vals[i], classes[i])); // Sort the vals int[] tree = data.getClassCounts(); Collections.sort(list); return infoGainThreshold(list, tree); } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { return null; } @Override public void fit(TimeSeriesInstances data) { /* splits = new double[data.getMaxLength()]; int[] classes = new int[data.numInstances()]; int i=0; for(TimeSeriesInstance inst :data) classes[i++] = inst.getLabelIndex(); for(TimeSeriesInstance inst :data) // Get values of attribute j for(TimeSeries ts : inst){ // find the IG split point splits[j] = findSplitValue(data, vals, classes); } } isFit = true;*/ } @Override public Instance transform(Instance inst) { Instance newInst = new DenseInstance(inst.numAttributes()); for (int j = 0; j < inst.numAttributes(); j++) { if (j != inst.classIndex()) { if (inst.value(j) < splits[j]) newInst.setValue(j, 0); else newInst.setValue(j, 1); } else newInst.setValue(j, inst.classValue()); } return newInst; } @Override public void fit(Instances data) { splits = new double[data.numAttributes()]; double[] classes = new double[data.numInstances()]; for (int i = 0; i < classes.length; i++) classes[i] = data.instance(i).classValue(); for (int j = 0; j < data.numAttributes(); j++) { // for each data if (j != data.classIndex()) { // Get values of attribute j double[] vals = new double[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) vals[i] = data.instance(i).value(j); // find the IG split point splits[j] = findSplitValue(data, vals, classes); } } isFit = true; } @Override public boolean isFit() { return isFit; } }
9,893
34.718412
120
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/CachedTransformer.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import experiments.data.DatasetLoading; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import org.junit.Assert; import tsml.classifiers.distance_based.utils.collections.params.ParamSet; import tsml.data_containers.utilities.Converter; import weka.core.Instance; import weka.core.Instances; /** * Purpose: cache the filtering operation using a map. Note, the instances must * be hashed first to use the cache reliably otherwise issues occur with * instance copying changing the hashcode due to memory locations. * <p> * Contributors: goastler, abostrom */ public class CachedTransformer extends BaseTrainableTransformer { // the filter to cache the output of private Transformer transformer; // whether to only cache instances from the fit() call OR all instances handed // to the transform method private boolean cacheFittedDataOnly; // the cache to store instances against their corresponding transform output private Map<TimeSeriesInstance, TimeSeriesInstance> tsCache; // use object as key so we can accept either inst or tsinst private Map<Instance, Instance> arffCache; public CachedTransformer(final Transformer transformer) { setTransformer(transformer); setCacheFittedDataOnly(true); reset(); } public boolean isCacheFittedDataOnly() { return cacheFittedDataOnly; } public void setCacheFittedDataOnly(final boolean cacheFittedDataOnly) { this.cacheFittedDataOnly = cacheFittedDataOnly; } public void reset() { super.reset(); tsCache = new HashMap<>(); arffCache = new HashMap<>(); } @Override public void fit(final Instances data) { super.fit(data); if(transformer instanceof TrainableTransformer) { ((TrainableTransformer) transformer).fit(data); } for (final Instance instance : data) { arffCache.put(instance, null); } } @Override public void fit(final TimeSeriesInstances data) { super.fit(data); if(transformer instanceof TrainableTransformer) { ((TrainableTransformer) transformer).fit(data); } for (final TimeSeriesInstance instance : data) { tsCache.put(instance, null); } } @Override public String toString() { return transformer.getClass().getSimpleName(); } public void setTransformer(final Transformer transformer) { Assert.assertNotNull(transformer); this.transformer = transformer; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { if(!isFit()) { throw new IllegalStateException("must be fitted first"); } TimeSeriesInstance transformed = tsCache.get(inst); if(transformed == null) { transformed = transformer.transform(inst); if(!cacheFittedDataOnly || tsCache.containsKey(inst)) { tsCache.put(inst, transformed); } } return transformed; } @Override public Instance transform(final Instance inst) { if(!isFit()) { throw new IllegalStateException("must be fitted first"); } Instance transformed = arffCache.get(inst); if(transformed == null) { transformed = transformer.transform(inst); if(!cacheFittedDataOnly || arffCache.containsKey(inst)) { arffCache.put(inst, transformed); } } return transformed; } @Override public Instances determineOutputFormat(final Instances data) throws IllegalArgumentException { return transformer.determineOutputFormat(data); } public Transformer getTransformer() { return transformer; } @Override public void setParams(final ParamSet paramSet) throws Exception { super.setParams(paramSet); setTransformer(paramSet.get(TRANSFORMER_FLAG, getTransformer())); } @Override public ParamSet getParams() { return new ParamSet().add(TRANSFORMER_FLAG, transformer); } public static final String TRANSFORMER_FLAG = "f"; public static void main(String[] args) throws Exception { final CachedTransformer ct = new CachedTransformer(new Derivative()); final List<TimeSeriesInstances> data = Arrays.stream(DatasetLoading.sampleGunPoint(0)).map(Converter::fromArff).collect(Collectors.toList()); ct.fit(data.get(0)); ct.transform(data.get(1).get(0)); ct.transform(data.get(0).get(0)); } }
5,572
32.371257
124
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/Catch22.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import org.apache.commons.math3.complex.Complex; import org.apache.commons.math3.transform.DftNormalization; import org.apache.commons.math3.transform.FastFourierTransformer; import org.apache.commons.math3.transform.TransformType; import utilities.GenericTools; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import java.util.Arrays; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import static utilities.ArrayUtilities.mean; import static utilities.ClusteringUtilities.zNormalise; import static utilities.GenericTools.*; import static utilities.StatisticalUtilities.median; import static utilities.StatisticalUtilities.standardDeviation; import static utilities.Utilities.extractTimeSeries; /** * Transform for the catch22 set of features. * * C.H. Lubba, S.S. Sethi, P. Knaute, S.R. Schultz, B.D. Fulcher, N.S. Jones. * catch22: CAnonical Time-series CHaracteristics. Data Mining and Knowledge * Discovery (2019) * * Implementation based on C and Matlab code provided on authors github: * https://github.com/chlubba/catch22 * * @author Matthew Middlehurst */ public class Catch22 implements Transformer { // z-norm before transform private boolean norm = false; // specifically normalise for the outlier stats, which can take a long time with // large positive/negative values private boolean outlierNorm = false; // for summary stat by index private int currentSeriesID = Integer.MIN_VALUE; private double idxMin; private double idxMax; private double idxMean; private Complex[] idxFFT; private double[] idxAC; private int idxACFZ; private double[] idxOutlierSeries; private double[] idxSeries; public Catch22() { } public void setNormalise(boolean b) { this.norm = b; } public void setOutlierNormalise(boolean b) { this.outlierNorm = b; } @Override public Instance transform(Instance inst) { double[] arr = extractTimeSeries(inst); double cls = inst.classIndex() >= 0 ? inst.classValue() : Double.MIN_VALUE; double[] featureSet = transform(arr, cls); return new DenseInstance(1, featureSet); } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { //could do this across all dimensions. double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for(TimeSeries ts : inst){ out[i++] = transform(ts.toValueArray()); } //create a new output instance with the ACF data. return new TimeSeriesInstance(out, inst.getLabelIndex()); } @Override public Instances determineOutputFormat(Instances data) throws IllegalArgumentException { ArrayList<Attribute> atts = new ArrayList<>(); for (int i = 1; i <= 22; i++) { atts.add(new Attribute("att" + i)); } if (data.classIndex() >= 0) atts.add(data.classAttribute()); Instances transformedData = new Instances("Catch22Transform", atts, data.numInstances()); if (data.classIndex() >= 0) transformedData.setClassIndex(transformedData.numAttributes()-1); return transformedData; } public double[] transform(double[] series){ return transform(series, Double.MIN_VALUE); } //no class value in series public double[] transform(double[] series, double classValue){ int atts = classValue == Double.MIN_VALUE ? 22 : 23; double[] featureSet = new double[atts]; double[] arr; double[] outlierArr; if (norm) { arr = new double[series.length]; System.arraycopy(series, 0, arr, 0, series.length); zNormalise(arr); outlierArr = arr; } else if (outlierNorm) { arr = series; outlierArr = new double[series.length]; System.arraycopy(series, 0, outlierArr, 0, series.length); zNormalise(outlierArr); } else { arr = series; outlierArr = series; } // can reduce amount of computation by pre-computing stats and transforms double min = Double.MAX_VALUE; double max = -99999999; double mean = 0; for (double v : arr) { if (v < min) { min = v; } if (v > max) { max = v; } mean += v; } mean /= arr.length; int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(series.length) / Math.log(2))); Complex[] fft = new Complex[nfft]; for (int j = 0; j < nfft; j++) { if (j < series.length) fft[j] = new Complex(series[j] - mean, 0); else fft[j] = new Complex(0, 0); } FastFourierTransformer f = new FastFourierTransformer(DftNormalization.STANDARD); fft = f.transform(fft, TransformType.FORWARD); double[] ac = autoCorr(arr, fft); int acfz = acFirstZero(ac); featureSet[0] = histMode5DN(arr, min, max); featureSet[1] = histMode10DN(arr, min, max); featureSet[2] = binaryStatsMeanLongstretch1SB(arr, mean); featureSet[3] = outlierIncludeP001mdrmdDN(outlierArr); featureSet[4] = outlierIncludeN001mdrmdDN(outlierArr); featureSet[5] = f1ecacCO(ac); featureSet[6] = firstMinacCO(ac); featureSet[7] = summariesWelchRectArea51SP(arr, fft); featureSet[8] = summariesWelchRectCentroidSP(arr, fft); featureSet[9] = localSimpleMean3StderrFC(arr); featureSet[10] = trev1NumCO(arr); featureSet[11] = histogramAMIeven25CO(arr, min, max); featureSet[12] = autoMutualInfoStats40GaussianFmmiIN(ac); featureSet[13] = hrvClassicPnn40MD(arr); featureSet[14] = binaryStatsDiffLongstretch0SB(arr); featureSet[15] = motifThreeQuantileHhSB(arr); featureSet[16] = localSimpleMean1TauresratFC(arr, acfz); featureSet[17] = embed2DistTauDExpfitMeandiffCO(arr, acfz); featureSet[18] = fluctAnal2Dfa5012LogiPropR1SC(arr); featureSet[19] = fluctAnal2Rsrangefit501LogiPropR1SC(arr); featureSet[20] = transitionMatrix3acSumdiagcovSB(arr, acfz); featureSet[21] = periodicityWangTh001PD(arr); if (classValue > Double.MIN_VALUE) featureSet[22] = classValue; for (int i = 0; i < featureSet.length; i++){ if (Double.isNaN(featureSet[i]) || Double.isInfinite(featureSet[i])){ featureSet[i] = 0; } } return featureSet; } public double getSummaryStatByIndex(int summaryStatIndex, int seriesID, double[] series) throws Exception { if (seriesID != currentSeriesID) { currentSeriesID = seriesID; idxMin = Double.MAX_VALUE; idxMax = -99999999; idxMean = Double.MIN_VALUE; idxFFT = null; idxAC = null; idxACFZ = -1; idxOutlierSeries = null; if (norm) { idxSeries = new double[series.length]; System.arraycopy(series, 0, idxSeries, 0, series.length); zNormalise(idxSeries); } else { idxSeries = series; } } switch (summaryStatIndex) { case 0: case 1: case 11: if (idxMin == Double.MAX_VALUE) { for (double v : idxSeries) { if (v < idxMin) { idxMin = v; } if (v > idxMax) { idxMax = v; } } } break; case 2: if (idxMean == Double.MIN_VALUE) { idxMean = mean(idxSeries); } break; case 3: case 4: if (idxOutlierSeries == null) { if (outlierNorm && !norm) { idxOutlierSeries = new double[idxSeries.length]; System.arraycopy(idxSeries, 0, idxOutlierSeries, 0, idxSeries.length); zNormalise(idxOutlierSeries); } else { idxOutlierSeries = idxSeries; } } break; case 7: case 8: if (idxFFT == null) { if (idxMean == Double.MIN_VALUE) { idxMean = mean(idxSeries); } int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(idxSeries.length) / Math.log(2))); idxFFT = new Complex[nfft]; for (int j = 0; j < nfft; j++) { if (j < idxSeries.length) idxFFT[j] = new Complex(idxSeries[j] - idxMean, 0); else idxFFT[j] = new Complex(0, 0); } FastFourierTransformer f = new FastFourierTransformer(DftNormalization.STANDARD); idxFFT = f.transform(idxFFT, TransformType.FORWARD); } break; case 5: case 6: case 12: if (idxAC == null) { if (idxFFT == null) { if (idxMean == Double.MIN_VALUE) { idxMean = mean(idxSeries); } int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(idxSeries.length) / Math.log(2))); idxFFT = new Complex[nfft]; for (int j = 0; j < nfft; j++) { if (j < idxSeries.length) idxFFT[j] = new Complex(idxSeries[j] - idxMean, 0); else idxFFT[j] = new Complex(0, 0); } FastFourierTransformer f = new FastFourierTransformer(DftNormalization.STANDARD); idxFFT = f.transform(idxFFT, TransformType.FORWARD); } idxAC = autoCorr(idxSeries, idxFFT); } break; case 16: case 17: case 20: if (idxACFZ == -1){ if (idxAC == null) { if (idxFFT == null) { if (idxMean == Double.MIN_VALUE) { idxMean = mean(idxSeries); } int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(idxSeries.length) / Math.log(2))); idxFFT = new Complex[nfft]; for (int j = 0; j < nfft; j++) { if (j < idxSeries.length) idxFFT[j] = new Complex(idxSeries[j] - idxMean, 0); else idxFFT[j] = new Complex(0, 0); } FastFourierTransformer f = new FastFourierTransformer(DftNormalization.STANDARD); idxFFT = f.transform(idxFFT, TransformType.FORWARD); } idxAC = autoCorr(idxSeries, idxFFT); } idxACFZ = acFirstZero(idxAC); } break; } double feature; switch (summaryStatIndex) { case 0: feature = histMode5DN(idxSeries, idxMin, idxMax); break; case 1: feature = histMode10DN(idxSeries, idxMin, idxMax); break; case 2: feature = binaryStatsMeanLongstretch1SB(idxSeries, idxMean); break; case 3: feature = outlierIncludeP001mdrmdDN(idxOutlierSeries); break; case 4: feature = outlierIncludeN001mdrmdDN(idxOutlierSeries); break; case 5: feature = f1ecacCO(idxAC); break; case 6: feature = firstMinacCO(idxAC); break; case 7: feature = summariesWelchRectArea51SP(idxSeries, idxFFT); break; case 8: feature = summariesWelchRectCentroidSP(idxSeries, idxFFT); break; case 9: feature = localSimpleMean3StderrFC(idxSeries); break; case 10: feature = trev1NumCO(idxSeries); break; case 11: feature = histogramAMIeven25CO(idxSeries, idxMin, idxMax); break; case 12: feature = autoMutualInfoStats40GaussianFmmiIN(idxAC); break; case 13: feature = hrvClassicPnn40MD(idxSeries); break; case 14: feature = binaryStatsDiffLongstretch0SB(idxSeries); break; case 15: feature = motifThreeQuantileHhSB(idxSeries); break; case 16: feature = localSimpleMean1TauresratFC(idxSeries, idxACFZ); break; case 17: feature = embed2DistTauDExpfitMeandiffCO(idxSeries, idxACFZ); break; case 18: feature = fluctAnal2Dfa5012LogiPropR1SC(idxSeries); break; case 19: feature = fluctAnal2Rsrangefit501LogiPropR1SC(idxSeries); break; case 20: feature = transitionMatrix3acSumdiagcovSB(idxSeries, idxACFZ); break; case 21: feature = periodicityWangTh001PD(idxSeries); break; default: throw new Exception("Invalid Catch22 summary stat index."); } if (Double.isNaN(feature) || Double.isInfinite(feature)){ feature = 0; } return feature; } public static double getSummaryStatByIndex(int summaryStatIndex, double[] series, boolean outlierNorm) { double min = Double.MAX_VALUE; double max = -99999999; double mean = Double.MIN_VALUE; Complex[] fft = null; double[] ac = null; int acfz = -1; double[] newSeries = series; if (summaryStatIndex < 0 || summaryStatIndex > 21){ System.err.println("Invalid Catch22 summary stat index."); return Double.MAX_VALUE; } switch (summaryStatIndex) { case 0: case 1: case 11: for (double v : newSeries) { if (v < min) { min = v; } if (v > max) { max = v; } } break; case 2: mean = mean(newSeries); break; case 3: case 4: if (outlierNorm) { newSeries = new double[newSeries.length]; System.arraycopy(series, 0, newSeries, 0, series.length); zNormalise(newSeries); } break; case 7: case 8: mean = mean(newSeries); int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(newSeries.length) / Math.log(2))); fft = new Complex[nfft]; for (int j = 0; j < nfft; j++) { if (j < newSeries.length) fft[j] = new Complex(newSeries[j] - mean, 0); else fft[j] = new Complex(0, 0); } FastFourierTransformer f = new FastFourierTransformer(DftNormalization.STANDARD); fft = f.transform(fft, TransformType.FORWARD); break; case 5: case 6: case 12: mean = mean(newSeries); int nfft2 = (int) Math.pow(2.0, (int) Math.ceil(Math.log(newSeries.length) / Math.log(2))); fft = new Complex[nfft2]; for (int j = 0; j < nfft2; j++) { if (j < newSeries.length) fft[j] = new Complex(newSeries[j] - mean, 0); else fft[j] = new Complex(0, 0); } FastFourierTransformer f2 = new FastFourierTransformer(DftNormalization.STANDARD); fft = f2.transform(fft, TransformType.FORWARD); ac = autoCorr(newSeries, fft); break; case 16: case 17: case 20: mean = mean(newSeries); int nfft3 = (int) Math.pow(2.0, (int) Math.ceil(Math.log(newSeries.length) / Math.log(2))); fft = new Complex[nfft3]; for (int j = 0; j < nfft3; j++) { if (j < newSeries.length) fft[j] = new Complex(newSeries[j] - mean, 0); else fft[j] = new Complex(0, 0); } FastFourierTransformer f3 = new FastFourierTransformer(DftNormalization.STANDARD); fft = f3.transform(fft, TransformType.FORWARD); ac = autoCorr(newSeries, fft); acfz = acFirstZero(ac); break; } double feature; switch (summaryStatIndex) { case 0: feature = histMode5DN(newSeries, min, max); break; case 1: feature = histMode10DN(newSeries, min, max); break; case 2: feature = binaryStatsMeanLongstretch1SB(newSeries, mean); break; case 3: feature = outlierIncludeP001mdrmdDN(newSeries); break; case 4: feature = outlierIncludeN001mdrmdDN(newSeries); break; case 5: feature = f1ecacCO(ac); break; case 6: feature = firstMinacCO(ac); break; case 7: feature = summariesWelchRectArea51SP(newSeries, fft); break; case 8: feature = summariesWelchRectCentroidSP(newSeries, fft); break; case 9: feature = localSimpleMean3StderrFC(newSeries); break; case 10: feature = trev1NumCO(newSeries); break; case 11: feature = histogramAMIeven25CO(newSeries, min, max); break; case 12: feature = autoMutualInfoStats40GaussianFmmiIN(ac); break; case 13: feature = hrvClassicPnn40MD(newSeries); break; case 14: feature = binaryStatsDiffLongstretch0SB(newSeries); break; case 15: feature = motifThreeQuantileHhSB(newSeries); break; case 16: feature = localSimpleMean1TauresratFC(newSeries, acfz); break; case 17: feature = embed2DistTauDExpfitMeandiffCO(newSeries, acfz); break; case 18: feature = fluctAnal2Dfa5012LogiPropR1SC(newSeries); break; case 19: feature = fluctAnal2Rsrangefit501LogiPropR1SC(newSeries); break; case 20: feature = transitionMatrix3acSumdiagcovSB(newSeries, acfz); break; case 21: feature = periodicityWangTh001PD(newSeries); break; default: feature = Double.MAX_VALUE; } if (Double.isNaN(feature) || Double.isInfinite(feature)){ feature = 0; } return feature; } public static String getSummaryStatNameByIndex(int summaryStatIndex) throws Exception { switch(summaryStatIndex){ case 0: return "DN_HistogramMode_5"; case 1: return "DN_HistogramMode_10"; case 2: return "SB_BinaryStats_mean_longstretch1"; case 3: return "DN_OutlierInclude_p_001_mdrmd"; case 4: return "DN_OutlierInclude_n_001_mdrmd"; case 5: return "CO_f1ecac"; case 6: return "CO_FirstMin_ac"; case 7: return "SP_Summaries_welch_rect_area_5_1"; case 8: return "SP_Summaries_welch_rect_centroid"; case 9: return "FC_LocalSimple_mean3_stderr"; case 10: return "CO_trev_1_num"; case 11: return "CO_HistogramAMI_even_2_5"; case 12: return "IN_AutoMutualInfoStats_40_gaussian_fmmi"; case 13: return "MD_hrv_classic_pnn40"; case 14: return "SB_BinaryStats_diff_longstretch0"; case 15: return "SB_MotifThree_quantile_hh"; case 16: return "FC_LocalSimple_mean1_tauresrat"; case 17: return "CO_Embed2_Dist_tau_d_expfit_meandiff"; case 18: return "SC_FluctAnal_2_dfa_50_1_2_logi_prop_r1"; case 19: return "SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1"; case 20: return "SB_TransitionMatrix_3ac_sumdiagcov"; case 21: return "PD_PeriodicityWang_th0_01"; default: throw new Exception("Invalid Catch22 summary stat index."); } } // Mode of z-scored distribution (5-bin histogram) private static double histMode5DN(double[] arr, double min, double max) { return histogramMode(arr, 5, min, max); } // Mode of z-scored distribution (10-bin histogram) private static double histMode10DN(double[] arr, double min, double max) { return histogramMode(arr, 10, min, max); } // Longest period of consecutive values above the mean private static double binaryStatsMeanLongstretch1SB(double[] arr, double mean) { int[] meanBinary = new int[arr.length]; for (int i = 0; i < arr.length; i++) { if (arr[i] - mean > 0) { meanBinary[i] = 1; } } return longStretch(meanBinary, 1); } // Time intervals between successive extreme events above the mean private static double outlierIncludeP001mdrmdDN(double[] arr) { return outlierInclude(arr); } // Time intervals between successive extreme events below the mean private static double outlierIncludeN001mdrmdDN(double[] arr) { double[] newArr = new double[arr.length]; for (int i = 0; i < arr.length; i++) { newArr[i] = -arr[i]; } return outlierInclude(newArr); } // First 1/e crossing of autocorrelation function private static double f1ecacCO(double[] ac) { double threshold = 0.36787944117144233; // 1/Math.exp(1); for (int i = 1; i < ac.length; i++) { if ((ac[i - 1] - threshold) * (ac[i] - threshold) < 0) { return i; } } return ac.length; } // First minimum of autocorrelation function private static double firstMinacCO(double[] ac) { for (int i = 1; i < ac.length - 1; i++) { if (ac[i] < ac[i - 1] && ac[i] < ac[i + 1]) { return i; } } return ac.length; } // Total power in lowest fifth of frequencies in the Fourier power spectrum private static double summariesWelchRectArea51SP(double[] arr, Complex[] fft) { return summariesWelchRect(arr, false, fft); } // Centroid of the Fourier power spectrum private static double summariesWelchRectCentroidSP(double[] arr, Complex[] fft) { return summariesWelchRect(arr, true, fft); } // Mean error from a rolling 3-sample mean forecasting private static double localSimpleMean3StderrFC(double[] arr) { if (arr.length - 3 < 3) return 0; double[] res = localSimpleMean(arr, 3); return standardDeviation(res, false); } // Time-reversibility statistic, ((x_t+1 − x_t)^3)_t private static double trev1NumCO(double[] arr) { double[] y = new double[arr.length - 1]; for (int i = 0; i < y.length; i++) { y[i] = Math.pow(arr[i + 1] - arr[i], 3); } return mean(y); } // Automutual information, m = 2, τ = 5 private static double histogramAMIeven25CO(double[] arr, double min, double max) { double newMin = min - 0.1; double newMax = max + 0.1; double binWidth = (newMax - newMin) / 5; double[][] histogram = new double[5][5]; double[] sumx = new double[5]; double[] sumy = new double[5]; double v = 1.0 / (arr.length - 2); for (int i = 0; i < arr.length - 2; i++) { int idx1 = (int) ((arr[i] - newMin) / binWidth); int idx2 = (int) ((arr[i + 2] - newMin) / binWidth); histogram[idx1][idx2] += v; sumx[idx1] += v; sumy[idx2] += v; } double sum = 0; for (int i = 0; i < 5; i++) { for (int n = 0; n < 5; n++) { if (histogram[i][n] > 0) { sum += histogram[i][n] * Math.log(histogram[i][n] / sumx[i] / sumy[n]); } } } return sum; } // First minimum of the automutual information function private static double autoMutualInfoStats40GaussianFmmiIN(double[] ac) { int tau = Math.min(40, (int) Math.ceil(ac.length / 2)); double[] diffs = new double[tau - 1]; double prev = -0.5 * Math.log(1 - Math.pow(ac[1], 2)); for (int i = 0; i < diffs.length; i++) { double corr = -0.5 * Math.log(1 - Math.pow(ac[i + 2], 2)); diffs[i] = corr - prev; prev = corr; } for (int i = 0; i < diffs.length - 1; i++) { if (diffs[i] * diffs[i + 1] < 0 && diffs[i] < 0) { return i + 1; } } return tau; } // Proportion of successive differences exceeding 0.04σ (Mietus 2002) private static double hrvClassicPnn40MD(double[] arr) { double[] diffs = new double[arr.length - 1]; for (int i = 0; i < diffs.length; i++) { diffs[i] = Math.abs(arr[i + 1] - arr[i]) * 1000; } double sum = 0; for (double diff : diffs) { if (diff > 40) { sum++; } } return sum / diffs.length; } // Longest period of successive incremental decreases private static double binaryStatsDiffLongstretch0SB(double[] arr) { int[] diffBinary = new int[arr.length - 1]; for (int i = 0; i < diffBinary.length; i++) { if (arr[i + 1] - arr[i] >= 0) { diffBinary[i] = 1; } } return longStretch(diffBinary, 0); } // Shannon entropy of two successive letters in equiprobable 3-letter // symbolization private static double motifThreeQuantileHhSB(double[] arr) { GenericTools.SortIndexAscending sort = new GenericTools.SortIndexAscending(arr); Integer[] indicies = sort.getIndicies(); Arrays.sort(indicies, sort); ArrayList<ArrayList<Integer>> p = new ArrayList<>(); int[] bins = new int[arr.length]; int q1 = arr.length / 3; int q2 = q1 * 2; p.add(new ArrayList<>()); for (int i = 0; i <= q1; i++) { p.get(0).add(indicies[i]); } p.add(new ArrayList<>()); for (int i = q1 + 1; i <= q2; i++) { bins[indicies[i]] = 1; p.get(1).add(indicies[i]); } p.add(new ArrayList<>()); for (int i = q2 + 1; i < indicies.length; i++) { bins[indicies[i]] = 2; p.get(2).add(indicies[i]); } double sum = 0; for (int i = 0; i < 3; i++) { ArrayList<Integer> o = p.get(i); o.remove((Integer) (arr.length - 1)); for (int n = 0; n < 3; n++) { double sum2 = 0; for (Integer v : o) { if (bins[v + 1] == n) { sum2++; } } if (sum2 > 0) { sum2 /= (arr.length - 1); sum += sum2 * Math.log(sum2); } } } return -sum; } // Change in correlation length after iterative differencing private static double localSimpleMean1TauresratFC(double[] arr, int acfz) { if (arr.length - 1 < 1) return 0; double[] res = localSimpleMean(arr, 1); double mean = mean(res); int nfft = (int) Math.pow(2.0, (int) Math.ceil(Math.log(res.length) / Math.log(2))); Complex[] fft = new Complex[nfft]; for (int j = 0; j < nfft; j++) { if (j < res.length) fft[j] = new Complex(res[j] - mean, 0); else fft[j] = new Complex(0, 0); } FastFourierTransformer f = new FastFourierTransformer(DftNormalization.STANDARD); fft = f.transform(fft, TransformType.FORWARD); double[] resAc = autoCorr(res, fft); return (double) acFirstZero(resAc) / acfz; } // Exponential fit to successive distances in 2-d embedding space private static double embed2DistTauDExpfitMeandiffCO(double[] arr, int acfz) { int tau = acfz; if (tau > arr.length / 10) { tau = arr.length / 10; } double[] d = new double[arr.length - tau - 1]; double dMean = 0; for (int i = 0; i < d.length; i++) { double n = Math.sqrt(Math.pow(arr[i + 1] - arr[i], 2) + Math.pow(arr[i + tau + 1] - arr[i + tau], 2)); d[i] = n; dMean += n; } dMean /= arr.length - tau - 1; double min = min(d); double max = max(d); double range = max - min; double std = standardDeviation(d, false, dMean); if (std == 0) return Double.NaN; int numBins = (int) Math.ceil(range / (3.5 * std / Math.pow(d.length, 0.3333333333333333))); double binWidth = range / numBins; if (numBins == 0) { return Double.NaN; } double[] histogram = new double[numBins]; for (double val : d) { int idx = (int) ((val - min) / binWidth); if (idx >= numBins) idx = numBins - 1; histogram[idx]++; } double sum = 0; for (int i = 0; i < numBins; i++) { double center = ((min + binWidth * i) * 2 + binWidth) / 2; double n = Math.exp(-center / dMean) / dMean; if (n < 0) n = 0; sum += Math.abs(histogram[i] / d.length - n); } return sum / numBins; } // Proportion of slower timescale fluctuations that scale with DFA (50% // sampling) private static double fluctAnal2Dfa5012LogiPropR1SC(double[] arr) { double[] cs = new double[arr.length / 2]; cs[0] = arr[0]; for (int i = 1; i < cs.length; i++) { cs[i] = cs[i - 1] + arr[i * 2]; } return fluctProp(cs, arr.length, true); } // Proportion of slower timescale fluctuations that scale with linearly rescaled // range fits private static double fluctAnal2Rsrangefit501LogiPropR1SC(double[] arr) { double[] cs = new double[arr.length]; cs[0] = arr[0]; for (int i = 1; i < arr.length; i++) { cs[i] = cs[i - 1] + arr[i]; } return fluctProp(cs, arr.length, false); } // Trace of covariance of transition matrix between symbols in 3-letter alphabet private static double transitionMatrix3acSumdiagcovSB(double[] arr, int acfz) { // int numGroups = 3; double[] ds = new double[(arr.length - 1) / acfz + 1]; for (int i = 0; i < ds.length; i++) { ds[i] = arr[i * acfz]; } GenericTools.SortIndexAscending sort = new GenericTools.SortIndexAscending(ds); Integer[] indicies = sort.getIndicies(); Arrays.sort(indicies, sort); int[] bins = new int[ds.length]; int q1 = ds.length / 3; int q2 = q1 * 2; for (int i = q1 + 1; i <= q2; i++) { bins[indicies[i]] = 1; } for (int i = q2 + 1; i < indicies.length; i++) { bins[indicies[i]] = 2; } double[][] t = new double[3][3]; for (int i = 0; i < ds.length - 1; i++) { t[bins[i + 1]][bins[i]] += 1; } for (int i = 0; i < 3; i++) { for (int n = 0; n < 3; n++) { t[i][n] /= (ds.length - 1); } } double[] means = new double[3]; for (int i = 0; i < 3; i++) { means[i] = mean(t[i]); } double[][] cov = new double[3][3]; for (int i = 0; i < 3; i++) { for (int n = i; n < 3; n++) { double covariance = 0; for (int j = 0; j < 3; j++) { covariance += (t[i][j] - means[i]) * (t[n][j] - means[n]); } covariance /= 2; cov[i][n] = covariance; cov[n][i] = covariance; } } double sum = 0; for (int i = 0; i < 3; i++) { sum += cov[i][i]; } return sum; } // Periodicity measure of (Wang et al. 2007) private static double periodicityWangTh001PD(double[] arr) { double[] ySpline = splineFit(arr); double[] ySub = new double[arr.length]; for (int i = 0; i < arr.length; i++) { ySub[i] = arr[i] - ySpline[i]; } int acmax = (int) Math.ceil(arr.length / 3.0); double[] acf = new double[acmax]; for (int tau = 1; tau <= acmax; tau++) { double covariance = 0; for (int i = 0; i < arr.length - tau; i++) { covariance += ySub[i] * ySub[i + tau]; } acf[tau - 1] = covariance / (arr.length - tau); } int[] troughs = new int[acmax]; int[] peaks = new int[acmax]; int nTroughs = 0; int nPeaks = 0; for (int i = 1; i < acmax - 1; i++) { double slopeIn = acf[i] - acf[i - 1]; double slopeOut = acf[i + 1] - acf[i]; if (slopeIn < 0 && slopeOut > 0) { troughs[nTroughs] = i; nTroughs += 1; } else if (slopeIn > 0 && slopeOut < 0) { peaks[nPeaks] = i; nPeaks += 1; } } int out = 0; for (int i = 0; i < nPeaks; i++) { int j = -1; while (troughs[j + 1] < peaks[i] && j + 1 < nTroughs) { j++; } if (j == -1 || acf[peaks[i]] - acf[troughs[j]] < 0.01 || acf[peaks[i]] < 0) continue; out = peaks[i]; break; } return out; } private static double histogramMode(double[] arr, int numBins, double min, double max) { double binWidth = (max - min) / numBins; if (binWidth == 0) { return Double.NaN; } double[] histogram = new double[numBins]; for (double val : arr) { int idx = (int) ((val - min) / binWidth); if (idx >= numBins) idx = numBins - 1; histogram[idx]++; } double[] edges = new double[numBins + 1]; for (int i = 0; i < edges.length; i++) { edges[i] = i * binWidth + min; } double maxCount = 0; int numMaxs = 1; double maxSum = 0; for (int i = 0; i < numBins; i++) { double v = (edges[i] + edges[i + 1]) / 2; if (histogram[i] > maxCount) { maxCount = histogram[i]; numMaxs = 1; maxSum = v; } else if (histogram[i] == maxCount) { numMaxs += 1; maxSum += v; } } return maxSum / numMaxs; } private static double longStretch(int[] binary, int val) { double lastVal = 0; double maxStretch = 0; for (int i = 0; i < binary.length; i++) { if (binary[i] != val || i == binary.length - 1) { double stretch = i - lastVal; if (stretch > maxStretch) { maxStretch = stretch; } lastVal = i; } } return maxStretch; } private static double outlierInclude(double[] arr) { double total = 0; double threshold = 0; for (double v : arr) { if (v >= 0) { total++; if (v > threshold) { threshold = v; } } } if (threshold < 0.01) return 0; int numThresholds = (int) (threshold / 0.01) + 1; double[] means = new double[numThresholds]; double[] dists = new double[numThresholds]; double[] medians = new double[numThresholds]; for (int i = 0; i < numThresholds; i++) { double d = i * 0.01; ArrayList<Double> r = new ArrayList<>(arr.length); for (int n = 0; n < arr.length; n++) { if (arr[n] >= d) { r.add(n + 1.0); } } if (r.size() == 0) continue; double[] diff = new double[r.size() - 1]; for (int n = 0; n < diff.length; n++) { diff[n] = r.get(n + 1) - r.get(n); } means[i] = mean(diff); dists[i] = diff.length * 100.0 / total; medians[i] = median(r, false) / (arr.length / 2.0) - 1; } int mj = 0; int fbi = numThresholds - 1; for (int i = 0; i < numThresholds; i++) { if (dists[i] > 2) { mj = i; } if (Double.isNaN(means[i])) { fbi = numThresholds - 1 - i; } } int trimLimit = Math.max(mj, fbi); return median(Arrays.copyOf(medians, trimLimit + 1), false); } private static double[] autoCorr(double[] arr, Complex[] fft) { Complex[] c = new Complex[fft.length]; for (int i = 0; i < fft.length; i++) { c[i] = fft[i].multiply(new Complex(fft[i].getReal(), -fft[i].getImaginary())); } FastFourierTransformer f = new FastFourierTransformer(DftNormalization.STANDARD); c = f.transform(c, TransformType.INVERSE); double[] acf = new double[arr.length]; double d = c[0].getReal(); if (d != 0) { for (int i = 0; i < arr.length; i++) { acf[i] = c[i].getReal() / d; } } return acf; } private static double summariesWelchRect(double[] arr, boolean centroid, Complex[] fft) { int newLength = fft.length / 2 + 1; double[] p = new double[newLength]; double pi2 = 2 * Math.PI; p[0] = (Math.pow(complexMagnitude(fft[0]), 2) / arr.length) / pi2; for (int i = 1; i < newLength - 1; i++) { p[i] = ((Math.pow(complexMagnitude(fft[i]), 2) / arr.length) * 2) / pi2; } p[newLength - 1] = (Math.pow(complexMagnitude(fft[newLength - 1]), 2) / arr.length) / pi2; double[] w = new double[newLength]; double a = 1.0 / fft.length; for (int i = 0; i < newLength; i++) { w[i] = i * a * Math.PI * 2; } if (centroid) { double[] cs = new double[newLength]; cs[0] = p[0]; for (int i = 1; i < newLength; i++) { cs[i] = cs[i - 1] + p[i]; } double threshold = cs[newLength - 1] / 2; for (int i = 0; i < newLength; i++) { if (cs[i] > threshold) { return w[i]; } } return Double.NaN; } else { double tau = Math.floor(newLength / 5); double sum = 0; for (int i = 0; i < tau; i++) { sum += p[i]; } return sum * (w[1] - w[0]); } } private static double complexMagnitude(Complex c){ return Math.sqrt(c.getReal() * c.getReal() + c.getImaginary() * c.getImaginary()); } private static double[] localSimpleMean(double[] arr, int trainLength) { double[] res = new double[arr.length - trainLength]; for (int i = 0; i < res.length; i++) { double sum = 0; for (int n = 0; n < trainLength; n++) { sum += arr[i + n]; } res[i] = arr[i + trainLength] - sum / trainLength; } return res; } private static int acFirstZero(double[] ac) { for (int i = 1; i < ac.length; i++) { if (ac[i] <= 0) { return i; } } return ac.length; } private static double fluctProp(double[] arr, double ogLength, boolean dfa) { // int q = 2; ArrayList<Integer> a = new ArrayList<>(); a.add(5); double min = 1.6094379124341003; //Math.log(5); double max = Math.log(ogLength/2); double inc = (max - min)/49; for (int i = 1; i < 50; i++){ int val = (int)Math.round(Math.exp(min + inc*i)); if (val != a.get(a.size()-1)){ a.add(val); } } int nTau = a.size(); if (a.size() < 12) return Double.NaN; double[] f = new double[nTau]; for (int i = 0; i < nTau; i++) { int tau = a.get(i); int buffSize = arr.length / tau; int lag = 0; if (buffSize == 0) { buffSize = 1; lag = 1; } double[][] buffer = new double[buffSize][tau]; int count = 0; for (int n = 0; n < buffer.length; n++) { for (int j = 0; j < tau - lag; j++) { buffer[n][j] = arr[count++]; } } double[] d = new double[tau]; for (int n = 0; n < tau; n++) { d[n] = n + 1; } for (int n = 0; n < buffer.length; n++) { double[] co = linearRegression(d, buffer[n], tau, 0); for (int j = 0; j < tau; j++) { buffer[n][j] = buffer[n][j] - (co[0] * (j + 1) + co[1]); } if (dfa) { for (int j = 0; j < tau; j++) { f[i] += buffer[n][j] * buffer[n][j]; } } else { f[i] += Math.pow(max(buffer[n]) - min(buffer[n]), 2); } } if (dfa) { f[i] = Math.sqrt(f[i] / (buffer.length * tau)); } else { f[i] = Math.sqrt(f[i] / buffer.length); } } double[] logA = new double[nTau]; double[] logF = new double[nTau]; for (int i = 0; i < nTau; i++) { logA[i] = Math.log(a.get(i)); logF[i] = Math.log(f[i]); } // int minPoints = 6; int nsserr = (nTau - 11); // (nTau - 2*minPoints + 1); double[] sserr = new double[nsserr]; for (int i = 6; i < nTau - 5; i++) { // (nTau - minPoints + 1); double[] co = linearRegression(logA, logF, i, 0); double[] co2 = linearRegression(logA, logF, nTau - i + 1, i - 1); double sum1 = 0; for (int n = 0; n < i; n++) { sum1 += Math.pow(logA[n] * co[0] + co[1] - logF[n], 2); } sserr[i - 6] += Math.sqrt(sum1); double sum2 = 0; for (int n = 0; n < nTau - i + 1; n++) { sum2 += Math.pow(logA[n + i - 1] * co2[0] + co2[1] - logF[n + i - 1], 2); } sserr[i - 6] += Math.sqrt(sum2); } return (indexOfMin(sserr) + 6) / nTau; } private static double[] linearRegression(double[] x, double[] y, int n, int lag) { double[] co = new double[2]; double sumx = 0; double sumx2 = 0; double sumxy = 0; double sumy = 0; for (int i = lag; i < n + lag; i++) { sumx += x[i]; sumx2 += x[i] * x[i]; sumxy += x[i] * y[i]; sumy += y[i]; } double denom = n * sumx2 - sumx * sumx; if (denom != 0) { co[0] = (n * sumxy - sumx * sumy) / denom; co[1] = (sumy * sumx2 - sumx * sumxy) / denom; } return co; } private static double[] splineFit(double[] arr) { // int deg = 3; // int n = 4; // int nBreaks = 3; // int peices = 2; // int piecesExt = 8; int[] breaks = { 0, arr.length / 2 - 1, arr.length - 1 }; int[] h0 = { breaks[1] - breaks[0], breaks[2] - breaks[1] }; int[] hCopy = { h0[0], h0[1], h0[0], h0[1] }; int[] hl = { hCopy[3], hCopy[2], hCopy[1] }; int[] hr = { hCopy[0], hCopy[1], hCopy[2] }; int[] hlCS = new int[3]; hlCS[0] = hl[0]; for (int i = 1; i < 3; i++) { hlCS[i] = hlCS[i - 1] + hl[i]; } int[] bl = new int[3]; for (int i = 0; i < 3; i++) { bl[i] = breaks[0] - hlCS[i]; } int[] hrCS = new int[3]; hrCS[0] = hr[0]; for (int i = 1; i < 3; i++) { hrCS[i] = hrCS[i - 1] + hr[i]; } int[] br = new int[3]; for (int i = 0; i < 3; i++) { br[i] = breaks[2] + hrCS[i]; } int[] breaksExt = new int[9]; for (int i = 0; i < 3; i++) { breaksExt[i] = bl[2 - i]; breaksExt[i + 3] = breaks[i]; breaksExt[i + 6] = br[i]; } int[] hExt = new int[8]; for (int i = 0; i < 8; i++) { hExt[i] = breaksExt[i + 1] - breaksExt[i]; } double[][] coeffs = new double[32][4]; for (int i = 0; i < 32; i += 4) { coeffs[i][0] = 1; } int[][] ii = new int[4][8]; for (int i = 0; i < 8; i++) { ii[0][i] = i; ii[1][i] = Math.min(1 + i, 7); ii[2][i] = Math.min(2 + i, 7); ii[3][i] = Math.min(3 + i, 7); } double[] H = new double[32]; for (int i = 0; i < 32; i++) { H[i] = hExt[ii[i % 4][i / 4]]; } for (int k = 1; k < 4; k++) { for (int j = 0; j < k; j++) { for (int l = 0; l < 32; l++) { coeffs[l][j] *= H[l] / (k - j); } } double[][] Q = new double[4][8]; for (int l = 0; l < 32; l++) { for (int m = 0; m < 4; m++) { Q[l % 4][l / 4] += coeffs[l][m]; } } for (int l = 0; l < 8; l++) { for (int m = 1; m < 4; m++) { Q[m][l] += Q[m - 1][l]; } } for (int l = 0; l < 32; l++) { if (l % 4 > 0) { coeffs[l][k] = Q[l % 4 - 1][l / 4]; } } double[] fmax = new double[32]; for (int i = 0; i < 8; i++) { for (int j = 0; j < 4; j++) { fmax[i * 4 + j] = Q[3][i]; } } for (int j = 0; j < k + 1; j++) { for (int l = 0; l < 32; l++) { coeffs[l][j] /= fmax[l]; } } for (int i = 0; i < 29; i++) { for (int j = 0; j < k + 1; j++) { coeffs[i][j] -= coeffs[3 + i][j]; } } for (int i = 0; i < 32; i += 4) { coeffs[i][k] = 0; } } double[] scale = new double[32]; for (int i = 0; i < 32; i++) { scale[i] = 1; } for (int k = 0; k < 3; k++) { for (int i = 0; i < 32; i++) { scale[i] /= H[i]; } for (int i = 0; i < 32; i++) { coeffs[i][3 - (k + 1)] *= scale[i]; } } int[][] jj = new int[4][2]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 2; j++) { if (i == 0) jj[i][j] = 4 * (1 + j); else jj[i][j] = 3; } } for (int i = 1; i < 4; i++) { for (int j = 0; j < 2; j++) { jj[i][j] += jj[i - 1][j]; } } double[][] coeffsOut = new double[8][4]; for (int i = 0; i < 8; i++) { coeffsOut[i] = coeffs[jj[i % 4][i / 4] - 1]; } int[] xsB = new int[arr.length * 4]; int[] indexB = new int[xsB.length]; int breakInd = 1; for (int i = 0; i < arr.length; i++) { if (i >= breaks[1] & breakInd < 2) breakInd++; for (int j = 0; j < 4; j++) { xsB[i * 4 + j] = i - breaks[breakInd - 1]; indexB[i * 4 + j] = j + (breakInd - 1) * 4; } } double[] vB = new double[xsB.length]; for (int i = 0; i < xsB.length; i++) { vB[i] = coeffsOut[indexB[i]][0]; } for (int i = 1; i < 4; i++) { for (int j = 0; j < xsB.length; j++) { vB[j] = vB[j] * xsB[j] + coeffsOut[indexB[j]][i]; } } double[] A = new double[arr.length * 5]; breakInd = 0; for (int i = 0; i < xsB.length; i++) { if (i / 4 >= breaks[1]) breakInd = 1; A[(i % 4) + breakInd + (i / 4) * 5] = vB[i]; } double[] AT = new double[A.length]; double[] ATA = new double[25]; double[] ATb = new double[5]; for (int i = 0; i < arr.length; i++) { for (int j = 0; j < 5; j++) { AT[j * arr.length + i] = A[i * 5 + j]; } } for (int i = 0; i < 5; i++) { for (int j = 0; j < 5; j++) { for (int k = 0; k < arr.length; k++) { ATA[i * 5 + j] += AT[i * arr.length + k] * A[k * 5 + j]; } } } for (int i = 0; i < 5; i++) { for (int k = 0; k < arr.length; k++) { ATb[i] += AT[i * arr.length + k] * arr[k]; } } double[][] AElim = new double[5][5]; for (int i = 0; i < 5; i++) { System.arraycopy(ATA, i * 5, AElim[i], 0, 5); } for (int i = 0; i < 5; i++) { for (int j = i + 1; j < 5; j++) { double factor = AElim[j][i] / AElim[i][i]; ATb[j] = ATb[j] - factor * ATb[i]; for (int k = i; k < 5; k++) { AElim[j][k] = AElim[j][k] - factor * AElim[i][k]; } } } double[] x = new double[5]; for (int i = 4; i >= 0; i--) { double bMinusATemp = ATb[i]; for (int j = i + 1; j < 5; j++) { bMinusATemp -= x[j] * AElim[i][j]; } x[i] = bMinusATemp / AElim[i][i]; } double[][] C = new double[5][8]; for (int i = 0; i < 32; i++) { C[i % 4 + (i / 4) % 2][i / 4] = coeffsOut[i % 8][i / 8]; } double[][] coeffsSpline = new double[2][4]; for (int j = 0; j < 8; j++) { int coefCol = j / 2; int coefRow = j % 2; for (int i = 0; i < 5; i++) { coeffsSpline[coefRow][coefCol] += C[i][j] * x[i]; } } double[] yOut = new double[arr.length]; for (int i = 0; i < arr.length; i++) { int secondHalf = i < breaks[1] ? 0 : 1; yOut[i] = coeffsSpline[secondHalf][0]; } for (int i = 1; i < 4; i++) { for (int j = 0; j < arr.length; j++) { int secondHalf = j < breaks[1] ? 0 : 1; yOut[j] = yOut[j] * (j - breaks[1] * secondHalf) + coeffsSpline[secondHalf][i]; } } return yOut; } }
54,252
32.24326
118
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/Clipping.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.io.FileReader; import java.util.Arrays; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.utilities.TimeSeriesStatsTools; import utilities.InstanceTools; import weka.core.*; public class Clipping implements Transformer { boolean useMean = true; boolean useRealAttributes = true; public void setUseRealAttributes(boolean f) { useRealAttributes = f; } @Override public Instances determineOutputFormat(Instances inputFormat) { // Must convert all attributes to binary. Attribute a; FastVector<String> fv = new FastVector<>(); if (!useRealAttributes) { fv.addElement("0"); fv.addElement("1"); } FastVector<Attribute> atts = new FastVector<>(); for (int i = 0; i < inputFormat.numAttributes(); i++) { // System.out.println(" Create Attribute "+i); if (i != inputFormat.classIndex()) { if (!useRealAttributes) a = new Attribute("Clipped" + inputFormat.attribute(i).name(), fv); else a = new Attribute("Clipped" + inputFormat.attribute(i).name()); } else a = inputFormat.attribute(i); atts.addElement(a); // System.out.println(" Add Attribute "+i); // result.insertAttributeAt(a,i); } Instances result = new Instances("Clipped" + inputFormat.relationName(), atts, inputFormat.numInstances()); // System.out.println(" Output format ="+result); if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } return result; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { //could do this across all dimensions. double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for(TimeSeries ts : inst){ double mean = TimeSeriesStatsTools.mean(ts); out[i++] = ts.streamValues().map(e -> e < mean ? 0.0 : 1.0).toArray(); } //create a new output instance with the ACF data. return new TimeSeriesInstance(out, inst.getLabelIndex()); } @Override public Instance transform(Instance inst) { Instance newInst; double average = InstanceTools.mean(inst); if (!useRealAttributes) { newInst = new DenseInstance(inst.numAttributes()); for (int j = 0; j < inst.numAttributes(); j++) { if (inst.isMissing(j)) newInst.setValue(j, "?"); if (j != inst.classIndex()) newInst.setValue(j, inst.value(j) < average ? "0" : "1"); else newInst.setValue(j, inst.stringValue(j)); } return newInst; } else { newInst = new DenseInstance(inst.numAttributes()); for (int j = 0; j < inst.numAttributes(); j++) { if (inst.isMissing(j)) continue; //skip/set to 0.0 if it's a missing value. if (j != inst.classIndex()) newInst.setValue(j, inst.value(j) < average ? 0 : 1); else newInst.setValue(j, inst.value(j)); } } return newInst; } public static void main(String[] args) { Clipping cp = new Clipping(); Instances data = null; String fileName = "C:\\Research\\Data\\Time Series Data\\Time Series Classification\\TestData\\TimeSeries_Train.arff"; try { FileReader r; r = new FileReader(fileName); data = new Instances(r); data.setClassIndex(data.numAttributes() - 1); System.out.println(" Class type numeric =" + data.attribute(data.numAttributes() - 1).isNumeric()); System.out.println(" Class type nominal =" + data.attribute(data.numAttributes() - 1).isNominal()); Instances newInst = cp.transform(data); System.out.println(newInst); } catch (Exception e) { System.out.println(" Error =" + e); StackTraceElement[] st = e.getStackTrace(); for (int i = st.length - 1; i >= 0; i--) System.out.println(st[i]); } } }
4,467
30.244755
120
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/ColumnNormalizer.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ /** Class NormalizeAttribute.java * * @author AJB * @version 1 * @since 14/4/09 * * Class normalizes attributes, basic version. 1. Assumes no missing values. 2. Assumes all attributes real values 3. Assumes class index same in all data (vague checks made) but can be none set (classIndex==-1) 4. Batch process, by default it calculates the ranges from the instances in trainData, then uses this to process the instances passed. Note that this may produce values outside the interval range, since the min or max of the test data may be separate. If you want to avoid this, the only way at the moment is to first merge train and test, then pass the merged set. Easy to hack round this if I have to. * Normalise onto [0,1] if norm==NormType.INTERVAL, * Normalise onto Normal(0,1) if norm==NormType.STD_NORMAL, * * Useage: * Instances train = //Get Train * Instances test = //Get Train * * NormalizeAttributes na = new NormalizeAttributes(train); * * na.setNormMethod(NormalizeAttribute.NormType.INTERVAL); //Defaults to interval anyway try{ //Both processed with the stats from train. Instances newTrain=na.process(train); Instances newTest=na.process(test); */ package tsml.transformers; import org.apache.commons.lang3.NotImplementedException; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.utilities.TimeSeriesSummaryStatistics; import utilities.ArrayUtilities; import weka.core.Instance; import weka.core.Instances; public class ColumnNormalizer implements TrainableTransformer { public enum NormType { INTERVAL, STD_NORMAL }; double[] min; double[] max; double[] mean; double[] stdev; int classIndex; NormType norm = NormType.INTERVAL; boolean isFit; public ColumnNormalizer() { } public ColumnNormalizer(Instances data) { classIndex = data.classIndex(); // Finds all the stats, doesnt cost much more really findStats(data); } protected void findStats(Instances r) { // Find min and max // assert(classIndex==r.classIndex()); max = new double[r.numAttributes()]; min = new double[r.numAttributes()]; for (int j = 0; j < r.numAttributes(); j++) { max[j] = Double.MIN_VALUE; min[j] = Double.MAX_VALUE; for (int i = 0; i < r.numInstances(); i++) { double x = r.instance(i).value(j); if (x > max[j]) max[j] = x; if (x < min[j]) min[j] = x; } } // Find mean and stdev mean = new double[r.numAttributes()]; stdev = new double[r.numAttributes()]; double sum, sumSq, x, y; for (int j = 0; j < r.numAttributes(); j++) { sum = 0; sumSq = 0; for (int i = 0; i < r.numInstances(); i++) { x = r.instance(i).value(j); sum += x; sumSq += x * x; } stdev[j] = (sumSq - (sum * sum) / r.numInstances()) / (r.numInstances()-1); mean[j] = sum / r.numInstances(); stdev[j] = Math.sqrt(stdev[j]); } } protected void findStats(TimeSeriesInstances r) { max = new double[r.getMaxLength()]; min = new double[r.getMaxLength()]; mean = new double[r.getMaxLength()]; stdev = new double[r.getMaxLength()]; for (int j = 0; j < r.getMaxLength(); j++) { double[] slice = r.getVSliceArray(j); max[j] = TimeSeriesSummaryStatistics.max(slice); min[j] = TimeSeriesSummaryStatistics.min(slice); mean[j] = TimeSeriesSummaryStatistics.mean(slice); stdev[j] = Math.sqrt(TimeSeriesSummaryStatistics.variance(slice, mean[j])); } } public double[] getRanges() { double[] r = new double[max.length]; for (int i = 0; i < r.length; i++) r[i] = max[i] - min[i]; return r; } @Override public Instance transform(Instance inst) { throw new NotImplementedException("Column wise normalisation doesn't make sense for single instances"); } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { throw new NotImplementedException("Column wise normalisation doesn't make sense for single instances"); } // This should probably be connected to trainData? public Instances determineOutputFormat(Instances inputFormat) { return new Instances(inputFormat, 0); } public void setTrainData(Instances data) { // Same as the constructor classIndex = data.classIndex(); // Finds all the stats, doesnt cost much more really findStats(data); } public void setNormMethod(NormType n) { norm = n; } @Override public TimeSeriesInstances transform(TimeSeriesInstances inst) { double[][][] out = null; switch (norm) { case INTERVAL: out = intervalNorm(inst); break; case STD_NORMAL: out = standardNorm(inst); break; } return new TimeSeriesInstances(out, inst.getClassIndexes(), inst.getClassLabels()); } /* Wont normalise the class value */ public double[][][] intervalNorm(TimeSeriesInstances r) { double[][][] out = new double[r.numInstances()][][]; int i =0; for (TimeSeriesInstance inst : r) { out[i++] = ArrayUtilities.transposeMatrix(intervalNorm(inst)); } return out; } public double[][] intervalNorm(TimeSeriesInstance r) { double[][] out = new double[r.getMaxLength()][]; for (int j = 0; j < r.getMaxLength(); j++) { out[j] = TimeSeriesSummaryStatistics.intervalNorm(r.getVSliceArray(j), min[j], max[j]); } return out; } public double[][][] standardNorm(TimeSeriesInstances r) { double[][][] out = new double[r.numInstances()][][]; int index=0; for(int i=0; i<r.numInstances(); i++){ out[index] = new double[r.getMaxLength()][]; for (int j = 0; j < r.getMaxLength(); j++) { out[index][j] = TimeSeriesSummaryStatistics.standardNorm(r.get(i).getVSliceArray(j), mean[j], stdev[j]); } out[index++] = ArrayUtilities.transposeMatrix(out[index]); } return out; } public Instances transform(Instances inst) { Instances result = new Instances(inst); switch (norm) { case INTERVAL: intervalNorm(result); break; case STD_NORMAL: standardNorm(result); break; } return result; } /* Wont normalise the class value */ public void intervalNorm(Instances r) { for (int i = 0; i < r.numInstances(); i++) { intervalNorm(r.instance(i)); } } public void intervalNorm(Instance r) { for (int j = 0; j < r.numAttributes(); j++) { if (j != classIndex) { double x = r.value(j); r.setValue(j, (x - min[j]) / (max[j] - min[j])); } } } public void standardNorm(Instances r) { for (int j = 0; j < r.numAttributes(); j++) { if (j != classIndex) { for (int i = 0; i < r.numInstances(); i++) { double x = r.instance(i).value(j); r.instance(i).setValue(j, (x - mean[j]) / (stdev[j])); } } } } @Override public boolean isFit() { return isFit; } @Override public void fit(Instances data) { classIndex = data.classIndex(); // Finds all the stats, doesnt cost much more really findStats(data); isFit = true; } @Override public void fit(TimeSeriesInstances data) { findStats(data); isFit = true; } }
7,754
26.597865
108
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/CombineTransformer.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; import experiments.data.DatasetLoading; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import tsml.data_containers.ts_fileIO.TSReader; import utilities.InstanceTools; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; public class CombineTransformer implements Transformer { Transformer[] transforms; public CombineTransformer(List<Transformer> transformers) { this((Transformer[]) transformers.toArray()); } public CombineTransformer(Transformer[] transformers) { transforms = transformers; // TODO: could deep copy here } @Override public Instance transform(Instance inst) { List<Double> data = new ArrayList<>(); for (Transformer trans : transforms) { Instance out = trans.transform(inst); // TODO: Change: we assume the class value is at the end. for (double d : InstanceTools.ConvertInstanceToArrayRemovingClassValue(out, out.numAttributes() - 1)) data.add(d); } // put the class value on the end. data.add(inst.classValue()); // this is the cleanest way to convert from a List<Double> to double[]. can't // cast etc, have to unpack. Instance out = new DenseInstance(1.0, data.stream().mapToDouble(Double::doubleValue).toArray()); return out; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { //initiliase empty container. List<List<Double>> data = new ArrayList<>(); for(int i=0; i< inst.getNumDimensions(); i++){ data.add(new ArrayList<>()); } for (Transformer trans : transforms) { TimeSeriesInstance ts = trans.transform(inst); //append the data to the end of the 2D array. Yuck! double[][] out = ts.toValueArray(); for(int i=0; i< out.length; ++i) for (double d : out[i]) data.get(i).add(d); } return new TimeSeriesInstance(data, inst.getLabelIndex()); } @Override public Instances determineOutputFormat(Instances inputFormat) throws IllegalArgumentException { ArrayList<Attribute> atts = new ArrayList<>(); String transform_names = "Concat"; for (Transformer trans : transforms) { Instances out = trans.determineOutputFormat(inputFormat); transform_names += " | " + out.relationName(); for (int i = 0; i < out.numAttributes(); i++) { if (out.classIndex() == i) continue; // skip class index. atts.add(new Attribute("Concat_" + out.attribute(i).name())); } } System.out.println(transform_names); if (inputFormat.classIndex() >= 0) { // Classification set, set class // Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList<>(); for (int i = 0; i < target.numValues(); i++) vals.add(target.value(i)); atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances(transform_names, atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } return result; } public static void main(String[] args) throws FileNotFoundException, IOException { String local_path = "D:\\Work\\Data\\Univariate_ts\\"; // Aarons local path for testing. String dataset_name = "ChinaTown"; File f1 = new File(local_path + dataset_name + File.separator + dataset_name + "_TRAIN.ts"); TSReader ts_reader_multi = new TSReader(new FileReader(f1)); TimeSeriesInstances train = ts_reader_multi.GetInstances(); CombineTransformer combined = new CombineTransformer(new Transformer[] { new Cosine(), new Sine() }); System.out.println(train); System.out.println(combined.transform(train)); } }
5,321
35.958333
113
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/ConstantAttributeRemover.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.util.ArrayList; import java.util.List; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.TimeSeriesInstances; import utilities.InstanceTools; import utilities.NumUtils; import weka.core.Instance; import weka.core.Instances; public class ConstantAttributeRemover implements TrainableTransformer { ArrayList<Integer> indexesToRemove; boolean isFit; int minChecksToDiscard = 3; private boolean IsAttributeConstant(final Instances data, final int attToCheck) { final double firstVal = data.firstInstance().value(attToCheck); for (int i = 1; i < data.numInstances(); i++) { if (!NumUtils.isNearlyEqual(firstVal, data.get(i).value(attToCheck))) return false; } return true; } private ArrayList<Integer> FindConstantAtts(final Instances data) { ArrayList<Integer> out = new ArrayList<>(); // loop through all attributes from the end. for (int i = data.numAttributes() - 1; i >= 0; --i) { if (IsAttributeConstant(data, i)) { out.add(i); } } return out; } private boolean isAttributeConstant(final TimeSeriesInstances data, final int attToCheck){ //in the first series, in the first dimension, get the att to check. final double firstVal = data.get(0).get(0).getValue(attToCheck); int count =0; for(TimeSeriesInstance inst : data){ for(TimeSeries ts : inst){ //if a single value, across any dimension is different then not constant. //need to deal with odd length series. if(ts.hasValidValueAt(attToCheck)) continue; if (!NumUtils.isNearlyEqual(firstVal, ts.getValue(attToCheck))) return false; else count++; } } //if we've been checking atts, need to make sure in unequal arrays we have checked against atleast one other. return count >= minChecksToDiscard; } private ArrayList<Integer> FindConstantAtts(final TimeSeriesInstances data) { ArrayList<Integer> out = new ArrayList<>(); // loop through all attributes from the end. for (int i = data.getMaxLength() - 1; i >= 0; --i) { if (isAttributeConstant(data, i)) { out.add(i); } } return out; } @Override public void fit(final Instances data) { indexesToRemove = FindConstantAtts(data); isFit = true; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { List<List<Double>> out = new ArrayList<>(); for(TimeSeries ts : inst){ out.add(ts.getVSliceComplementList(indexesToRemove)); } return new TimeSeriesInstance(out); } @Override public void fit(TimeSeriesInstances data) { indexesToRemove = FindConstantAtts(data); isFit = true; } @Override public boolean isFit() { return isFit; } @Override public Instances transform(final Instances data) { // could clone the instances. for (final int att : indexesToRemove) data.deleteAttributeAt(att); return data; } @Override public Instance transform(final Instance inst) { // could clone the instances. for (final int att : indexesToRemove) inst.deleteAttributeAt(att); return inst; } public static void main(final String[] args) { final double[][] t1 = { { 1, 0, 1, 0.00000000000000004 }, { 2, 0, 2, 0 }, { 3, 0, 3, 0 }, { 2, 0, 2, 0.000000000000000000001 } }; final double[][] t2 = { { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 3, 3, 3, 3 }, { 4, 4, 4, 4 } }; final Instances train = InstanceTools.toWekaInstances(t1); final Instances test = InstanceTools.toWekaInstances(t2); final ConstantAttributeRemover rr = new ConstantAttributeRemover(); final Instances out_train = rr.fitTransform(train); final Instances out_test = rr.transform(test); System.out.println(out_train); System.out.println(out_test); } @Override public Instances determineOutputFormat(Instances data) throws IllegalArgumentException { // TODO Auto-generated method stub return null; } }
5,317
30.654762
117
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/Convolution.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.util.Arrays; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.utilities.Converter; import weka.core.Instance; import weka.core.Instances; public class Convolution implements Transformer { public enum ConvolutionType { FILL, SYMM }; double[] kernel1D = null; double[][] kernel2D = null; ConvolutionType convType = ConvolutionType.FILL; double padValue = 0; public Convolution(int kernelSize1D, double constantValue){ kernel1D = new double[kernelSize1D]; Arrays.fill(kernel1D, constantValue); } public Convolution(int kernelSize2DX, int kernelSize2DY, double constantValue){ kernel2D = new double[kernelSize2DX][kernelSize2DY]; for(int i=0; i<kernel2D.length; i++) Arrays.fill(kernel2D[i], constantValue); } public Convolution(double[][] kernel) { kernel2D = kernel; } public Convolution(double[] kernel) { kernel1D = kernel; } public void setPad(double pad){ convType = ConvolutionType.FILL; padValue = pad; } public void setSymm(){ convType = ConvolutionType.SYMM; } //TODO: WEKA Version. Bleh @Override public Instances determineOutputFormat(Instances data) throws IllegalArgumentException { // TODO Auto-generated method stub return null; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { double[][] out; if(kernel1D != null){ out = new double[inst.getNumDimensions()][]; int i = 0; for (TimeSeries ts : inst) { out[i++] = convolution1D(ts.toValueArray()); } } else{ out = convolution2D(inst.toValueArray()); } return new TimeSeriesInstance(out, inst.getLabelIndex()); } public double[] convolution1D(double[] data) { return convolution1D(data, this.kernel1D, convType, padValue); } public static double[] convolution1D(double[] data, double[] kernel1D, ConvolutionType convType, double padValue) { int kCenter = kernel1D.length / 2; double[] out = new double[data.length]; for (int i = 0; i < data.length; i++) { for (int j = 0; j < kernel1D.length; j++) { int mm = kernel1D.length - 1 - j; // row index of flipped kernel int ii = i + (kCenter - mm); boolean iPad = ii >= 0 && ii < data.length; // ignore input samples which are out of bound if (iPad) out[i] += data[ii] * kernel1D[mm]; else { if (convType == ConvolutionType.SYMM) { // symmetrical flip the values. if (!iPad) ii = i - (kCenter - mm); out[i] += data[ii] * kernel1D[mm]; } // else always just do a pad else { out[i] += padValue * kernel1D[mm]; } } } } return out; } public double[][] convolution2D(double[][] data) { return convolution2D(data, this.kernel2D, convType, padValue); } public static double[][] convolution2D(double[][] data, double[][] kernel2D, ConvolutionType convType, double padValue) { double[][] out = new double[data.length][]; // find center position of kernel (half of kernel size) int kCenterX = kernel2D.length / 2; int kCenterY = kernel2D[0].length / 2; for (int i = 0; i < data.length; ++i) { out[i] = new double[data[i].length]; for (int j = 0; j < data[i].length; ++j) { for (int m = 0; m < kernel2D.length; ++m) { int mm = kernel2D.length - 1 - m; // row index of flipped kernel for (int n = 0; n < kernel2D[mm].length; ++n) { int nn = kernel2D[mm].length - 1 - n; // column index of flipped kernel int ii = i + (kCenterY - mm); int jj = j + (kCenterX - nn); boolean iPad = ii >= 0 && ii < data.length; boolean jPad = jj >= 0 && jj < data[i].length; // ignore input samples which are out of bound if (iPad && jPad) out[i][j] += data[ii][jj] * kernel2D[mm][nn]; else { if (convType == ConvolutionType.SYMM) { // symmetrical flip the values. if (!iPad) ii = i - (kCenterY - mm); if (!jPad) jj = j - (kCenterX - nn); out[i][j] += data[ii][jj] * kernel2D[mm][nn]; } // else always just do a pad else { out[i][j] += padValue * kernel2D[mm][nn]; } } } } } } return out; } public static void main(String[] args) { // 3x3 averaging kernel double[][] kernel = { { 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0 }, { 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0 }, { 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0 } }; // 10x10 data. double[][] data = { { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 }, { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 } }; double[][] out = convolution2D(data, kernel, ConvolutionType.FILL, 1); System.out.println(Arrays.deepToString(out)); TimeSeriesInstance ts = new TimeSeriesInstance(data); Convolution conv = new Convolution(3, 3, 1.0/9.0); conv.setPad(1); TimeSeriesInstance out_ts = conv.transform(ts); System.out.println(out_ts); double[][] data1 = { { 0.0, 1.0, 2.0, 4.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0 } }; ts = new TimeSeriesInstance(data1); conv = new Convolution(3, 1.0/3.0); //conv.setPad(1); conv.setSymm(); out_ts = conv.transform(ts); System.out.println(out_ts); } }
7,899
33.955752
119
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/Cosine.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.io.File; import java.io.IOException; import experiments.data.DatasetLoading; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import weka.core.*; /* * copyright: Anthony Bagnall * @author Aaron Bostrom * * */ public class Cosine implements Transformer { @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { //multidimensional cosine. Cosine applied series wise for each dimension double[][] out = new double[inst.getNumDimensions()][]; int index = 0; for(TimeSeries ts : inst){ double[] data = new double[ts.getSeriesLength()]; double n = data.length; for (int k = 0; k < n; k++) { double fk = 0; for (int i = 0; i < n; i++) { double c = k * (i + 0.5) * (Math.PI / n); fk += ts.getValue(i) * Math.cos(c); } data[k] = fk; } out[index++] = data; } return new TimeSeriesInstance(out, inst.getLabelIndex()); } @Override public Instance transform(Instance inst) { int n = inst.numAttributes() - 1; Instance newInst = new DenseInstance(inst.numAttributes()); for (int k = 0; k < n; k++) { double fk = 0; for (int i = 0; i < n; i++) { double c = k * (i + 0.5) * (Math.PI / n); fk += inst.value(i) * Math.cos(c); } newInst.setValue(k, fk); } // overrided cosine class value, with original. if (inst.classIndex() >= 0) newInst.setValue(inst.classIndex(), inst.classValue()); return newInst; } public Instances determineOutputFormat(Instances inputFormat) { FastVector<Attribute> atts = new FastVector<>(); for (int i = 0; i < inputFormat.numAttributes() - 1; i++) { // Add to attribute list String name = "Cosine_" + i; atts.addElement(new Attribute(name)); } // Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); FastVector<String> vals = new FastVector<>(target.numValues()); for (int i = 0; i < target.numValues(); i++) vals.addElement(target.value(i)); atts.addElement(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); Instances result = new Instances("COSINE" + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } System.out.println(result); return result; } public static void main(String[] args) throws IOException { String localPath = "src/main/java/experiments/data/tsc/"; // path for testing. String datasetName = "ChinaTown"; Instances train = DatasetLoading .loadData(localPath + datasetName + File.separator + datasetName + "_TRAIN.ts"); Instances test = DatasetLoading .loadData(localPath + datasetName + File.separator + datasetName + "_TEST.ts"); Cosine cosTransform = new Cosine(); Instances out_train = cosTransform.transform(train); Instances out_test = cosTransform.transform(test); System.out.println(out_train.toString()); System.out.println(out_test.toString()); } }
4,328
34.195122
114
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/DWT.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import org.apache.commons.lang3.ArrayUtils; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import java.util.Arrays; /** * This class performs a Haar Wavelet transformation on a given time series. The * result is the approximate coefficients of the highest level and the wavelet * coefficients of every level from numLevels down to one are all concatenated * together. * * @author Vincent Nicholson * */ public class DWT implements Transformer { private int numLevels; public DWT() { this.numLevels = 3; } public DWT(int numLevels) { this.numLevels = numLevels; } public int getNumLevels() { return this.numLevels; } public void setNumLevels(int numLevels) { this.numLevels = numLevels; } @Override public Instance transform(Instance inst) { checkParameters(); double[] data = inst.toDoubleArray(); // remove class attribute if needed double[] temp; int c = inst.classIndex(); if (c >= 0) { temp = new double[data.length - 1]; System.arraycopy(data, 0, temp, 0, c); // assumes class attribute is in last index data = temp; } double[] waveletCoeffs = getDWTCoefficients(data); // Now in DWT form, extract out the terms and set the attributes of new instance Instance newInstance; int numAtts = waveletCoeffs.length; if (inst.classIndex() >= 0) newInstance = new DenseInstance(numAtts + 1); else newInstance = new DenseInstance(numAtts); // Copy over the values into the Instance for (int j = 0; j < numAtts; j++) newInstance.setValue(j, waveletCoeffs[j]); // Set the class value if (inst.classIndex() >= 0) newInstance.setValue(newInstance.numAttributes() - 1, inst.classValue()); return newInstance; } /** * Private function for calculating the wavelet coefficients of a given time * series. * * @param inst - the time series to be transformed. * @return the transformed inst. */ private double[] getDWTCoefficients(double[] inst) { // For temporary storage of each array double[][] vectors = new double[this.numLevels + 1][]; if (numLevels == 0) { return inst; } else { // Extract the coefficients on each level double[] current = inst; for (int i = 0; i < numLevels; i++) { double[] approxCoeffs = getApproxCoefficients(current); double[] waveletCoeffs = getWaveletCoefficients(current); vectors[i] = waveletCoeffs; current = approxCoeffs; } vectors[numLevels] = current; } // Combine the double array into one. return concatenateVectors(vectors); } /** * Private method for combining the 2d array of vectors into the correct order. * * @param vectors * @return */ private double[] concatenateVectors(double[][] vectors) { double[] out = new double[] {}; for (int i = vectors.length - 1; i > -1; i--) { out = ArrayUtils.addAll(out, vectors[i]); } return out; } /** * Private method to calculate the approximate coefficients of a time series t. * * @param t - the time series. * @return */ public double[] getApproxCoefficients(double[] t) { if (t.length == 1) { return t; } int total = (int) Math.floor(t.length / 2); double[] coeffs = new double[total]; for (int i = 0; i < total; i++) { coeffs[i] = ((t[2 * i] + t[2 * i + 1]) / Math.sqrt(2)); } return coeffs; } /** * Private method to calculate the wavelet coefficients of a time series t. * * @param t - the time series. * @return */ public double[] getWaveletCoefficients(double[] t) { if (t.length == 1) { return t; } int total = (int) Math.floor(t.length / 2); double[] coeffs = new double[total]; for (int i = 0; i < total; i++) { coeffs[i] = ((t[2 * i] - t[2 * i + 1]) / Math.sqrt(2)); } return coeffs; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { //could do this across all dimensions. double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for(TimeSeries ts : inst){ out[i++] = getDWTCoefficients(ts.toValueArray()); } //create a new output instance with the ACF data. return new TimeSeriesInstance(out, inst.getLabelIndex()); } @Override public Instances determineOutputFormat(Instances inputFormat) throws IllegalArgumentException { // If the class index exists. if (inputFormat.classIndex() >= 0) { if (inputFormat.classIndex() != inputFormat.numAttributes() - 1) { throw new IllegalArgumentException("cannot handle class values not at end"); } } int numAttributes = calculateNumAttributes(inputFormat.numAttributes()); ArrayList<Attribute> attributes = new ArrayList<>(); // Create a list of attributes for (int i = 0; i < numAttributes; i++) { attributes.add(new Attribute("DWTCoefficient_" + i)); } // Add the class attribute (if it exists) if (inputFormat.classIndex() >= 0) { attributes.add(inputFormat.classAttribute()); } Instances result = new Instances("DWT" + inputFormat.relationName(), attributes, inputFormat.numInstances()); // Set the class attribute (if it exists) if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } return result; } /** * Private method to calculate the number of attributes produced by DWT. * * @return - int, the number of attributes that DWT produces. */ private int calculateNumAttributes(int timeSeriesLength) { int numLevels = this.numLevels; if (numLevels == 0) { return timeSeriesLength; } else { int counter = 0; // Record the length of the time series at the current level int timeSeriesLengthAtCurLevel = timeSeriesLength; for (int i = 0; i < numLevels; i++) { if (timeSeriesLengthAtCurLevel != 1) { timeSeriesLengthAtCurLevel = (int) Math.floor(timeSeriesLengthAtCurLevel / 2); } counter += timeSeriesLengthAtCurLevel; // If at the last level if (i == numLevels - 1) { counter += timeSeriesLengthAtCurLevel; } } return counter; } } private void checkParameters() { if (this.numLevels < 0) { throw new IllegalArgumentException("numLevels cannot be negative."); } } public static void main(String[] args) throws Exception { Instances data = createData(new double[] { 1, 2, 3, 4, 5 }); // test bad num_levels // num_levels cannot be negative int[] badNumLevels = new int[] { -1, -5, -999 }; for (int badNumLevel : badNumLevels) { try { DWT d = new DWT(badNumLevel); d.transform(data); System.out.println("Test failed."); } catch (IllegalArgumentException e) { System.out.println("Test passed."); } } // test good num_levels int[] goodNumLevels = new int[] { 0, 1, 999 }; for (int goodNumLevel : goodNumLevels) { try { DWT d = new DWT(goodNumLevel); d.transform(data); System.out.println("Test passed."); } catch (IllegalArgumentException e) { System.out.println("Test failed."); } } // check output data = createData(new double[] { 4, 6, 10, 12, 8, 6, 5, 5 }); DWT d = new DWT(2); double[] resArr = d.transform(data).get(0).toDoubleArray(); // This is equivalent but rounding errors prevent from checking exactly System.out.println( Arrays.equals(resArr, new double[] { 16, 12, -6, 2, -Math.sqrt(2), -Math.sqrt(2), Math.sqrt(2), 0 })); data = createData(new double[] { -5, 2.5, 1, 3, 10, -1.5, 6, 12, -3 }); resArr = d.transform(data).get(0).toDoubleArray(); // Same issue here System.out.println( Arrays.equals(resArr, new double[] { 0.75, 13.25, -3.25, -4.75, -5.303, -1.414, 8.132, -4.243 })); // check that num levels being zero does no change data = createData(new double[] { 1, 2, 3, 4, 5, 6, 7, 8 }); d = new DWT(0); resArr = d.transform(data).get(0).toDoubleArray(); System.out.println(Arrays.equals(data.get(0).toDoubleArray(), resArr)); } /** * Function to create data for testing purposes. * * @return */ private static Instances createData(double[] data) { // Create the attributes ArrayList<Attribute> atts = new ArrayList<>(); for (int i = 0; i < data.length; i++) { atts.add(new Attribute("test_" + i)); } Instances newInsts = new Instances("Test_dataset", atts, 1); // create the test data createInst(data, newInsts); return newInsts; } /** * private function for creating an instance from a double array. Used for * testing purposes. * * @param arr * @return */ private static void createInst(double[] arr, Instances dataset) { Instance inst = new DenseInstance(arr.length); for (int i = 0; i < arr.length; i++) { inst.setValue(i, arr[i]); } inst.setDataset(dataset); dataset.add(inst); } }
11,129
33.890282
118
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/Derivative.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import java.io.Serializable; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; /** * Purpose: class to take the derivative of a time series. * <p> * Contributors: goastler, Jason Lines */ public class Derivative implements Transformer, Serializable { private static Derivative INSTANCE; // Global derivative function which is cached, i.e. if you ask it to convert the // same instance twice it will // instead fetch from the cache the second time private static CachedTransformer GLOBAL_CACHE; public static Derivative getGlobalInstance() { if (INSTANCE == null) { INSTANCE = new Derivative(); } return INSTANCE; } public static CachedTransformer getGlobalCachedTransformer() { if (GLOBAL_CACHE == null) { GLOBAL_CACHE = new CachedTransformer(getGlobalInstance()); } return GLOBAL_CACHE; } @Override public boolean equals(final Object o) { return super.equals(o) && o instanceof Derivative; // no internal state, so always equal } @Override public int hashCode() { return 0; // no internal state, so all derivative objects are equal and have same hash } public static double[] getDerivative(double[] input, boolean classValOn) { int classPenalty = 0; if (classValOn) { classPenalty = 1; } double[] derivative = new double[input.length]; for (int i = 1; i < input.length - 1 - classPenalty; i++) { // avoids class Val if present derivative[i] = ((input[i] - input[i - 1]) + ((input[i + 1] - input[i - 1]) / 2)) / 2; } derivative[0] = derivative[1]; derivative[derivative.length - 1 - classPenalty] = derivative[derivative.length - 2 - classPenalty]; if (classValOn) { derivative[derivative.length - 1] = input[input.length - 1]; } return derivative; } @Override public Instances determineOutputFormat(Instances inputFormat) { //If the class index exists. if(inputFormat.classIndex() >= 0) { if (inputFormat.classIndex() != inputFormat.numAttributes() - 1) { throw new IllegalArgumentException("cannot handle class values not at end"); } } return new Instances(inputFormat, inputFormat.size()); } @Override public Instance transform(Instance inst) { final double[] derivative = getDerivative(inst.toDoubleArray(), true); final Instance copy = new DenseInstance(inst.weight(), derivative); copy.setDataset(inst.dataset()); return copy; // careful! } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for (TimeSeries ts : inst) { out[i++] = getDerivative(ts.toValueArray(), false); } return new TimeSeriesInstance(out, inst.getLabelIndex()); } }
4,003
33.517241
108
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/Differences.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import java.util.ArrayList; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; /* simple Filter that just creates a new series of differences order k. * The new series has k fewer attributes than the original * */ public class Differences implements Transformer { private int order = 1; private boolean subtractFormerValue = false; String attName = ""; public void setOrder(int m) { order = m; } public void setSubtractFormerValue(boolean b) { subtractFormerValue = b; } private static final long serialVersionUID = 1L; public void setAttName(String s) { attName = s; } public Instances determineOutputFormat(Instances inputFormat) { // Set up instances size and format. int classIndexMod = (inputFormat.classIndex() >= 0 ? 1 : 0); ArrayList<Attribute> atts = new ArrayList<>(); String name; for (int i = 0; i < inputFormat.numAttributes() - order - classIndexMod; i++) { name = attName + "Difference" + order + "_" + (i + 1); atts.add(new Attribute(name)); } if (inputFormat.classIndex() >= 0) { // Classification set, set class // Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); ArrayList<String> vals = new ArrayList<>(); for (int i = 0; i < target.numValues(); i++) vals.add(target.value(i)); atts.add(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances("Difference" + order + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) { result.setClassIndex(result.numAttributes() - 1); } return result; } @Override public Instance transform(Instance inst) { // 1. Get series: double[] d = inst.toDoubleArray(); // 2. Remove target class double[] temp; int c = inst.classIndex(); if (c >= 0) { temp = new double[d.length - 1]; System.arraycopy(d, 0, temp, 0, c); d = temp; } // 3. Create Difference series int classIndexMod = (c >= 0 ? 1 : 0); int numAtts = inst.numAttributes() - order - classIndexMod; //if have a classindex then make it one shorter. double[] diffs; if (subtractFormerValue) diffs = calculateDifferences2(d, numAtts); else diffs = calculateDifferences(d, numAtts); // Extract out the terms and set the attributes Instance newInst = new DenseInstance(diffs.length + classIndexMod); for (int j = 0; j < diffs.length; j++) { newInst.setValue(j, diffs[j]); } if (c >= 0) newInst.setValue(diffs.length, inst.classValue()); return newInst; } private double[] calculateDifferences(double[] d, int numAtts) { double[] diffs = new double[numAtts]; for (int j = 0; j < diffs.length; j++) diffs[j] = d[j] - d[j + order]; return diffs; } private double[] calculateDifferences2(double[] d, int numAtts) { double[] diffs = new double[numAtts]; for (int j = 0; j < diffs.length; j++) diffs[j] = d[j + order] - d[j]; return diffs; } public static void main(String[] args) { /** * Debug code to test SummaryStats generation: * * * try{ Instances test=ClassifierTools.loadData("C:\\Users\\ajb\\Dropbox\\TSC * Problems\\Beef\\Beef_TRAIN"); // Instances filter=new * SummaryStats().process(test); SummaryStats m=new SummaryStats(); * m.setInputFormat(test); Instances filter=Filter.useFilter(test,m); * System.out.println(filter); } catch(Exception e){ * System.out.println("Exception thrown ="+e); e.printStackTrace(); * * } * */ } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for (TimeSeries ts : inst) { if (subtractFormerValue) out[i++] = calculateDifferences2(ts.toValueArray(), ts.getSeriesLength() - order); else out[i++] = calculateDifferences(ts.toValueArray(), ts.getSeriesLength() - order); } return new TimeSeriesInstance(out, inst.getLabelIndex()); } }
4,964
31.032258
110
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/DimensionIndependentTransformer.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import weka.core.*; import java.util.ArrayList; import tsml.data_containers.TimeSeriesInstance; /* * This class uses a univariate transformer on a multivariate dataset by executing * the transformer along each dimension of a time series. * * @author Vincent Nicholson * * */ public class DimensionIndependentTransformer implements Transformer { private Transformer transformer; // Need this to set the dataset on a newly created Instance. private Instances dataHeader; /** * DimensionIndependentTransformer - this class applies a given transformer * along each dimension given a multivariate time series. * * @param t - the transformer to be applied along each dimension. */ public DimensionIndependentTransformer(Transformer t) { if (t instanceof TrainableTransformer) { throw new IllegalArgumentException("t cannot be of type TrainableTransformer."); } this.transformer = t; } public DimensionIndependentTransformer() { this.transformer = new PAA(); } @Override public Instance transform(Instance inst) { Instances dimensions = inst.relationalValue(0); Instances transformedInsts = transformer.transform(dimensions); Instance res = new DenseInstance(2); res.setDataset(dataHeader); int index = res.attribute(0).addRelation(transformedInsts); res.setValue(0, index); if (inst.classIndex() >= 0) { res.setClassValue(inst.classValue()); res.setValue(1, inst.classValue()); } return res; } @Override public Instances determineOutputFormat(Instances data) throws IllegalArgumentException { // Create the relation from the transformer Instances outputFormat = transformer.determineOutputFormat(data.attribute(0).relation()); // Just 2 attributes, the relational attribute and the class value (if it has // one). ArrayList<Attribute> attributes = new ArrayList<>(); attributes.add(new Attribute("relationalAtt", outputFormat)); if (data.classIndex() >= 0) { attributes.add(data.classAttribute()); } // Create the header to store the data in Instances result = new Instances("MultiDimensional_" + outputFormat.relationName(), attributes, data.numInstances()); if (data.classIndex() >= 0) { result.setClassIndex(1); } this.dataHeader = result; return result; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { // TODO Auto-generated method stub //not implementing this as for TSInstances we already support multi out of the box. return null; } }
3,579
34.8
103
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/FFT.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; /* Performs a FFT of the data set. NOTE: * 1. If algorithm type is set to DFT, then this will only perform a FFT if the series is length power of 2. * otherwise it will perform the order m^2 DFT. * 2. If algorithm type is set to FFT, then, if the length is not a powerr of 2, it either truncates or pads * (determined by the variable pad) with the mean the each series (i.e. each Instance) * so that the new length is power of 2 by flag pad (default true) * 2. By default, stoAlgorithmTyperes the complex terms in order, so att 1 is real coeff of Fourier term 1, attribute 2 the imag etc * 3. Only stores the first half of the Fourier terms (which are duplicates of the second half) * * Note that the series does store the first fourier term (series mean) and the * imaginary part will always be zero */ import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import java.util.Arrays; public class FFT implements Transformer { /** * */ public enum AlgorithmType { DFT, FFT } // If set to DFT, this will only perform a FFT if the series is length power of // 2, otherwise resorts to DFT AlgorithmType algo = AlgorithmType.DFT; // If set to FFT, this will pad (or truncate) series to the nearest power of // 2 private static final long serialVersionUID = 1L; private boolean pad = true; private static final double TWOPI = (Math.PI * 2); private int fullLength = -1; public void padSeries(boolean b) { pad = b; } public void useDFT() { algo = AlgorithmType.DFT; } public void useFFT() { algo = AlgorithmType.FFT; } @Override public Instances determineOutputFormat(Instances inputFormat) { /** * This method determines whether padding is required. The If the DFT is being * calculated, the length will be 2*m, where m= (numAttributes -1) If the FFT is * being used if pad ==true find x=first ^2 greater than m length=x else find * x=first ^2 greater than m , y last ^2 less than m length= min(x-m,m-y) **/ int length = findLength(inputFormat); // Set up instances size and format. FastVector atts = new FastVector(); String name; for (int i = 0; i < length; i++) { if (i % 2 == 0) name = "FFT_" + (i / 2) + "_Real"; else name = "FFT_" + (i / 2) + "_Imag"; atts.addElement(new Attribute(name)); } if (inputFormat.classIndex() >= 0) { // Classification set, set class // Get the class values as a fast vector Attribute target = inputFormat.attribute(inputFormat.classIndex()); FastVector vals = new FastVector(target.numValues()); for (int i = 0; i < target.numValues(); i++) vals.addElement(target.value(i)); atts.addElement(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), vals)); } Instances result = new Instances("FFT_" + inputFormat.relationName(), atts, inputFormat.numInstances()); if (inputFormat.classIndex() >= 0) result.setClassIndex(result.numAttributes() - 1); return result; } protected int findLength(Instances inputFormat) { if (algo == AlgorithmType.FFT) return findPowerOfTwoLength(inputFormat); else if (algo == AlgorithmType.DFT) { if (inputFormat.classIndex() >= 0) { // Classification set, dont transform the target class! return (inputFormat.numAttributes() - 1); } else return inputFormat.numAttributes(); } throw new RuntimeException("Algorithm Type+ " + algo + " has not been implemented for FFT Class"); } protected int findLength(Instance inputFormat) { if (algo == AlgorithmType.FFT) return findPowerOfTwoLength(inputFormat); else if (algo == AlgorithmType.DFT) { if (inputFormat.classIndex() >= 0) { // Classification set, dont transform the target class! return (inputFormat.numAttributes() - 1); } else return inputFormat.numAttributes(); } throw new RuntimeException("Algorithm Type+ " + algo + " has not been implemented for FFT Class"); } // Length of the series NOT COUNTING THE CLASS ATTRIBUTE protected int findPowerOfTwoLength(Instance inputFormat) { int oldLength = 0; int length = 0; if (inputFormat.classIndex() >= 0) // Classification set, dont transform the target class! oldLength = inputFormat.numAttributes() - 1; else oldLength = inputFormat.numAttributes(); // Check if a power of 2, if not either pad or truncate if (!MathsPower2.isPow2(oldLength)) { length = (int) MathsPower2.roundPow2((float) oldLength); if (pad) { if (length < oldLength) length *= 2; } else { if (length > oldLength) length /= 2; } } else length = oldLength; return length; } // Length of the series NOT COUNTING THE CLASS ATTRIBUTE protected int findPowerOfTwoLength(Instances inputFormat) { int oldLength = 0; int length = 0; if (inputFormat.classIndex() >= 0) // Classification set, dont transform the target class! oldLength = inputFormat.numAttributes() - 1; else oldLength = inputFormat.numAttributes(); // Check if a power of 2, if not either pad or truncate if (!MathsPower2.isPow2(oldLength)) { length = (int) MathsPower2.roundPow2((float) oldLength); if (pad) { if (length < oldLength) length *= 2; } else { if (length > oldLength) length /= 2; } } else length = oldLength; return length; } /** * * @param instances * @return Fourier transforms, each consecutive two terms are the real/imaginary * @throws Exception This process only stores half the Fourier terms, since the * second half are just a duplicate of the first half with a * different sign for the imaginary term If the DFT algorithm * is used, it returns exactly m terms (where m is the * original series length If FFT is used it returns x/2, where * x is either the smallest power of 2 greater than m * (padding), or the largest power of 2 less than m * (truncating). If the variable pad is true, it ALWAYS pads, * if pad==false it will go to the closest power of 2 above or * below. */ public Instances transform(Instances data) { // if we're attached to a dataset then do it normally, and if we haven't been // calculated before. if (fullLength <= 0) fullLength = findLength(data); Instances output = determineOutputFormat(data); for (Instance inst : data) { output.add(transform(inst)); } return output; } @Override public Instance transform(Instance inst) { int originalLength = inst.numAttributes(); if (inst.classIndex() >= 0) { originalLength--; } // if we haven't been calculated before. if we're attached to a dataset then do // it normally else do it from the single series. if (fullLength <= 0) fullLength = inst.dataset() != null ? findLength(inst.dataset()) : findLength(inst); // 1. Get original series stored in a complex array. This may be padded or // truncated // depending on the original length. If DFT is being used, it is neither. Complex[] c = new Complex[fullLength]; int count = 0; double seriesTotal = 0; for (int j = 0; j < originalLength && count < c.length; j++) { // May cut off the trailing values if (inst.classIndex() != j) { c[count] = new Complex(inst.value(j), 0.0); seriesTotal += inst.value(j); count++; } } // Add any Padding required double mean = seriesTotal / count; while (count < c.length) c[count++] = new Complex(mean, 0); // 2. Find FFT/DFT of series. if (algo == AlgorithmType.FFT) fft(c, c.length); else c = dft(c); // Extract out the terms and set the attributes. Instance out = new DenseInstance(c.length + 1); for (int j = 0; j < c.length / 2; j++) { out.setValue(2 * j, c[j].real); out.setValue(2 * j + 1, c[j].imag); } // Set class value. // Set class value. if (inst.classIndex() >= 0) out.setValue(c.length, inst.classValue()); return out; } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { double[][] out = new double[inst.getNumDimensions()][]; if(fullLength <=0){ if (algo == AlgorithmType.FFT){ int oldLength = inst.getMinLength(); int length = 0; if (!MathsPower2.isPow2(oldLength)) { length = (int) MathsPower2.roundPow2((float) oldLength); if (pad) { if (length < oldLength) length *= 2; } else { if (length > oldLength) length /= 2; } } else length = oldLength; fullLength = length; } else fullLength = inst.getMinLength(); } int i = 0; for (TimeSeries ts : inst) { Complex[] c = new Complex[fullLength]; int count = 0; double seriesTotal = 0; for (int j = 0; j < ts.getSeriesLength() && count < c.length; j++) { // May cut off the trailing values c[count] = new Complex(ts.getValue(j), 0.0); seriesTotal += ts.getValue(j); count++; } // Add any Padding required double mean = seriesTotal / count; while (count < c.length) c[count++] = new Complex(mean, 0); // 2. Find FFT/DFT of series. if (algo == AlgorithmType.FFT) fft(c, c.length); else c = dft(c); // Extract out the terms and set the attributes. //construct the sequence of real/imaginary alternating values. out[i] = new double[c.length]; for (int j = 0; j < c.length / 2; j++) { out[i][2 * j] = c[j].real; out[i][2 * j + 1] = c[j].imag; } i++; } return new TimeSeriesInstance(out, inst.getLabelIndex()); } /** * Perform a discrete fourier transform, O(n^2) * */ public Complex[] dft(double[] series) { int n = series.length; Complex[] dft = new Complex[n]; for (int k = 0; k < n; k++) { // For each output element float sumreal = 0; float sumimag = 0; for (int t = 0; t < series.length; t++) { // For each input element sumreal += series[t] * Math.cos(2 * Math.PI * t * k / n); sumimag += -series[t] * Math.sin(2 * Math.PI * t * k / n); } dft[k] = new Complex(sumreal, sumimag); } return dft; } public Complex[] dft(Complex[] complex) { int n = complex.length; Complex[] dft = new Complex[n]; for (int k = 0; k < n; k++) { // For each output element float sumreal = 0; float sumimag = 0; for (int t = 0; t < complex.length; t++) { // For each input element sumreal += complex[t].real * Math.cos(2 * Math.PI * t * k / n) + complex[t].imag * Math.sin(2 * Math.PI * t * k / n); sumimag += -complex[t].real * Math.sin(2 * Math.PI * t * k / n) + complex[t].imag * Math.cos(2 * Math.PI * t * k / n); } dft[k] = new Complex(sumreal, sumimag); } return dft; } /** * Perform an in-place mixed-radix inverse Fast Fourier Transform on the first * <code>n</code> elements of the given set of <code>Complex</code> numbers. If * <code>n</code> is not a power of two then the inverse FFT is performed on the * first N numbers where N is largest power of two less than <code>n</code> */ public void fft(Complex[] complex, int n) { fft(1, complex, n); } /** * Sort a set of <code>Complex</code> numbers into a bit-reversed order - only * sort the first <code>n</code> elements. This method performs the sort * in-place */ public static void bitReverse(Complex[] complex, int n) { int halfN = n / 2; int i, j, m; Complex temp; for (i = j = 0; i < n; ++i) { if (j >= i) { temp = complex[j]; complex[j] = complex[i]; complex[i] = temp; } m = halfN; while (m >= 1 && j >= m) { j -= m; m /= 2; } j += m; } temp = null; } /** * Perform an in-place mixed-radix inverse Fast Fourier Transform on the first * <code>n</code> elements of the given set of <code>Complex</code> numbers. If * <code>n</code> is not a power of two then the inverse FFT is performed on the * first N numbers where N is largest power of two less than <code>n</code> */ public void inverseFFT(Complex[] complex, int n) { fft(-1, complex, n); } // Perform an in-place mixed-radix FFT (if sign is 1) or inverse // FFT (if sign is -1) on the first n elements of the given set of // Complex numbers. Round n to the nearest power of two. // // This method performs the FFT in-place on the given set. private void fft(int sign, Complex[] complex, int n) { // n is number of data elements upon which FFT will be // performed. Round number of data elements to nearest power // of 2 n = (int) MathsPower2.roundPow2(n); // Sort the first n elements into bit-reversed order bitReverse(complex, n); if (n == 2) { // Perform a radix-2 FFT radix2FFT(sign, complex, n, 0); } else if (((float) Math.log(n) % (float) Math.log(4)) == 0) { // Perform a radix-4 FFT radix4FFT(sign, complex, n, 0); } else { // n is a multiple or two or four [8, 32, 128, ...] // Perform a mixed-radix FFT int halfN = n / 2; // Do a radix-4 transform on elements 0..halfN - 1 which // contains even-indexed elements from the original // unsorted set of numbers by definition of the bit // reversal operation radix4FFT(sign, complex, halfN, 0); // Do a radix-4 transform on elements halfN - 1 .. n - 1 // which contains odd-indexed elements from the original // unsorted set of numbers by definition of the bit // reversal operation radix4FFT(sign, complex, halfN, halfN); // Pair off even and odd elements and do final radix-2 // transforms, multiplying by twiddle factors as required // Loop variables used to point to pairs of even and odd // elements int g, h; // Array of two complex numbers for performing radix-2 // FFTs on pairs of elements Complex[] radix2x2 = new Complex[2]; // Twiddle factor Complex twiddle = new Complex(); // Values defining twiddle factor double delta = -sign * TWOPI / n; double w = 0; for (g = 0, h = halfN; g < halfN; g++, h++) { // Twiddle factors... twiddle.setRealImag((float) Math.cos(w), (float) Math.sin(w)); complex[h].multiply(twiddle); radix2x2[0] = complex[g]; radix2x2[1] = complex[h]; // Perform the radix-2 FFT radix2FFT(sign, radix2x2, 2, 0); complex[g] = radix2x2[0]; complex[h] = radix2x2[1]; w += delta; } radix2x2 = null; twiddle = null; } if (sign == -1) { // Divide all values by n for (int g = 0; g < n; g++) { complex[g].divide(n); } } } // Perform an in-place radix-4 FFT (if sign is 1) or inverse // FFT (if sign is -1). FFT is performed in the n elements // starting at index lower // // Assumes that n is a power of 2 and that lower + n is less than // or equal to the number of complex numbers given // // This method performs the FFT in-place on the given set. private static void radix4FFT(int sign, Complex[] complex, int n, int lower) { // Index of last element in array which will take part in the // FFT int upper = n + lower; // Variables used to hold the indicies of the elements forming // the four inputs to a butterfly int i, j, k, l; // Variables holding the results of the four main operations // performed when processing a butterfly Complex ijAdd = new Complex(); Complex klAdd = new Complex(); Complex ijSub = new Complex(); Complex klSub = new Complex(); // Twiddle factor Complex twiddle = new Complex(); // Values defining twiddle factor double delta, w, w2, w3; double deltaLower = -sign * TWOPI; // intraGap is number of array elements between the // two inputs to a butterfly (equivalent to the number of // butterflies in a cluster) int intraGap; // interGap is the number of array elements between the first // input of the ith butterfly in two adjacent clusters int interGap; for (intraGap = 1, interGap = 4 * intraGap; intraGap < n; intraGap = interGap, interGap = 4 * intraGap) { delta = deltaLower / (float) interGap; // For each butterfly in a cluster w = w2 = w3 = 0; for (int but = 0; but < intraGap; ++but) { // Process the intraGap-th butterfly in each cluster // i is the top input to a butterfly and j the second, // k third and l fourth for (i = (but + lower), j = i + intraGap, k = j + intraGap, l = k + intraGap; i < upper; i += interGap, j += interGap, k += interGap, l += interGap) { // Calculate and apply twiddle factors // cos(0) = 1 and sin(0) = 0 twiddle.setRealImag(1, 0); complex[i].multiply(twiddle); twiddle.setRealImag((float) Math.cos(w2), (float) Math.sin(w2)); complex[j].multiply(twiddle); twiddle.setRealImag((float) Math.cos(w), (float) Math.sin(w)); complex[k].multiply(twiddle); twiddle.setRealImag((float) Math.cos(w3), (float) Math.sin(w3)); complex[l].multiply(twiddle); // Compute the butterfly Complex.add(complex[i], complex[j], ijAdd); Complex.subtract(complex[i], complex[j], ijSub); Complex.add(complex[k], complex[l], klAdd); Complex.subtract(complex[k], complex[l], klSub); // Assign values Complex.add(ijAdd, klAdd, complex[i]); klSub.multiply(sign); complex[j].setRealImag(ijSub.getReal() + klSub.getImag(), ijSub.getImag() - klSub.getReal()); Complex.subtract(ijAdd, klAdd, complex[k]); complex[l].setRealImag(ijSub.getReal() - klSub.getImag(), ijSub.getImag() + klSub.getReal()); } w += delta; w2 = w + w; w3 = w2 + w; } intraGap = interGap; } ijAdd = klAdd = ijSub = klSub = twiddle = null; } // Perform an in-place radix-2 FFT (if sign is 1) or inverse // FFT (if sign is -1). FFT is performed in the n elements // starting at index lower // // Assumes that n is a power of 2 and that lower + n is less than // or equal to the number of complex numbers given... // // This method performs the FFT in-place on the given set. private static void radix2FFT(int sign, Complex[] complex, int n, int lower) { // Index of last element in array which will take part in the // FFT int upper = n + lower; // Variables used to hold the indicies of the elements forming // the two inputs to a butterfly int i, j; // intraGap is number of array elements between the // two inputs to a butterfly (equivalent to the number of // butterflies in a cluster) int intraGap; // interGap is the number of array elements between the first // input of the ith butterfly in two adjacent clusters int interGap; // The twiddle factor Complex twiddle = new Complex(); // Values defining twiddle factor float deltaLower = -(float) (sign * Math.PI); float w, delta; // Variable used to hold result of multiplying butterfly input // by a twiddle factor Complex twiddledInput = new Complex(); for (intraGap = 1, interGap = intraGap + intraGap; intraGap < n; intraGap = interGap, interGap = intraGap + intraGap) { delta = deltaLower / (float) intraGap; // For each butterfly in a cluster w = 0; for (int butterfly = 0; butterfly < intraGap; ++butterfly) { // Calculate the twiddle factor twiddle.setRealImag((float) Math.cos(w), (float) Math.sin(w)); // i is the top input to a butterfly and j the // bottom for (i = (butterfly + lower), j = i + intraGap; i < upper; i += interGap, j += interGap) { // Calculate the butterfly-th butterfly in // each cluster // Apply the twiddle factor Complex.multiply(complex[j], twiddle, twiddledInput); // Subtraction part of butterfly Complex.subtract(complex[i], twiddledInput, complex[j]); // Addition part of butterfly complex[i].add(twiddledInput); } w += delta; } intraGap = interGap; } twiddle = twiddledInput = null; } public String getRevision() { return null; } public static class MathsPower2 { /** Return 2 to the power of <code>power</code> */ public static int pow2(int power) { return (1 << power); } /** Is <code>value</code> a power of 2? */ public static boolean isPow2(int value) { return (value == (int) roundPow2(value)); } /** Round <code>value</code> to nearest power of 2 */ public static float roundPow2(float value) { float power = (float) (Math.log(value) / Math.log(2)); int intPower = Math.round(power); return (float) (pow2(intPower)); } /** * Return the log to base 2 of <code>value</code> rounded to the nearest integer */ public static int integerLog2(float value) { int intValue; if (value < 2) { intValue = 0; } else if (value < 4) { intValue = 1; } else if (value < 8) { intValue = 2; } else if (value < 16) { intValue = 3; } else if (value < 32) { intValue = 4; } else if (value < 64) { intValue = 5; } else if (value < 128) { intValue = 6; } else if (value < 256) { intValue = 7; } else if (value < 512) { intValue = 8; } else if (value < 1024) { intValue = 9; } else if (value < 2048) { intValue = 10; } else if (value < 4098) { intValue = 11; } else if (value < 8192) { intValue = 12; } else { intValue = Math.round(roundPow2(value)); } return intValue; } } /** * Remove all attributes unless the target class I'm not sure if the indexing * changes * * @param n */ public void truncate(Instances d, int n) { int att = n; if (att < d.numAttributes() - 1) {// Remove the first two terms first d.deleteAttributeAt(0); d.deleteAttributeAt(0); } while (att < d.numAttributes()) { if (att == d.classIndex()) att++; else d.deleteAttributeAt(att); } } public static void computeDft(double[] inreal, double[] inimag, double[] outreal, double[] outimag) { int n = inreal.length; for (int k = 0; k < n; k++) { // For each output element double sumreal = 0; double sumimag = 0; for (int t = 0; t < n; t++) { // For each input element sumreal += inreal[t] * Math.cos(2 * Math.PI * t * k / n) + inimag[t] * Math.sin(2 * Math.PI * t * k / n); sumimag += -inreal[t] * Math.sin(2 * Math.PI * t * k / n) + inimag[t] * Math.cos(2 * Math.PI * t * k / n); } outreal[k] = sumreal; outimag[k] = sumimag; } } /** Author Mike Jackson - University of Edinburgh - 1999-2001 */ /** * The <code>Complex</code> class generates objects that represent complex * numbers in terms of real and imaginary components and supports addition, * subtraction, multiplication, scalar multiplication and division or these * numbers. The calculation of complex conjugates, magnitude, phase and power * (in decibels) of the <code>Complex</code> numbers are also supported. */ public static class Complex implements Cloneable { /** Constant required to calculate power values in dBs: log 10 */ protected static final float LOG10 = (float) Math.log(10); /** * Constant required to calculate power values in dBs: 20 / log 10 */ protected static final float DBLOG = 20 / LOG10; /** Real component */ protected float real; /** Imaginary component */ protected float imag; /** Create a new <code>Complex</code> number 0 + j0 */ public Complex() { real = imag = 0f; } /** * Create a new <code>Complex</code> number <code>real</code> + * j(<code>imag</code>) */ public Complex(float real, float imag) { this.real = real; this.imag = imag; } public String toString() { return real + "+" + imag + "*i"; } public Complex(double real, double imag) { this.real = (float) real; this.imag = (float) imag; } /** * Set the <code>Complex</code> number to be <code>real</code> + * j(<code>imag</code>) */ public void setRealImag(float real, float imag) { this.real = real; this.imag = imag; } /** Get real component */ public float getReal() { return real; } /** Set real component */ public void setReal(float real) { this.real = real; } /** Get imaginary component */ public float getImag() { return imag; } /** Set imaginary component */ public void setImag(float imag) { this.imag = imag; } /** * Add the given <code>Complex</code> number to this <code>Complex</code> number */ public void add(Complex complex) { real += complex.real; imag += complex.imag; } /** * Subtract the given <code>Complex</code> number from this <code>Complex</code> * number */ public void subtract(Complex complex) { real -= complex.real; imag -= complex.imag; } /** * Multiply this <code>Complex</code> number by the given factor */ public void multiply(float factor) { real *= factor; imag *= factor; } /** Divide this <code>Complex</code> number by the given factor */ public void divide(float factor) { real /= factor; imag /= factor; } /** * Multiply this <code>Complex</code> number by the given <code>Complex</code> * number */ public void multiply(Complex complex) { float nuReal = real * complex.real - imag * complex.imag; float nuImag = real * complex.imag + imag * complex.real; real = nuReal; imag = nuImag; } /** * Set this <code>Complex</code> number to be its complex conjugate */ public void conjugate() { imag = (-imag); } /** * Return result of adding the complex conjugate of this <code>Complex</code> * number to this <code>Complex</code> number */ public float addConjugate() { return real + real; } /** * Return result of subtracting the complex conjugate of this * <code>Complex</code> number from this <code>Complex</code> number */ public float subtractConjugate() { return imag + imag; } /** Return the magnitude of the <code>Complex</code> number */ public float getMagnitude() { return magnitude(real, imag); } /** Return the phase of the <code>Complex</code> number */ public float getPhase() { return phase(real, imag); } /** Return the power of this <code>Complex</code> number in dBs */ public float getPower() { return power(real, imag); } /** Add two <code>Complex</code> numbers: c = a + b */ public static void add(Complex a, Complex b, Complex c) { c.real = a.real + b.real; c.imag = a.imag + b.imag; } /** Subtract two <code>Complex</code> numbers: c = a - b */ public static void subtract(Complex a, Complex b, Complex c) { c.real = a.real - b.real; c.imag = a.imag - b.imag; } /** * Multiply a <code>Complex</code> number by a factor: b = a * factor */ public static void multiply(Complex a, float factor, Complex b) { b.real = a.real * factor; b.imag = a.imag * factor; } /** * Divide a <code>Complex</code> number by a factor: b = a / factor */ public static void divide(Complex a, float factor, Complex b) { b.real = a.real / factor; b.imag = a.imag / factor; } /** Multiply two <code>Complex</code> numbers: c = a * b */ public static void multiply(Complex a, Complex b, Complex c) { c.real = a.real * b.real - a.imag * b.imag; c.imag = a.real * b.imag + a.imag * b.real; } /** Place the <code>Complex</code> conjugate of a into b */ public static void conjugate(Complex a, Complex b) { b.real = a.real; b.imag = -a.imag; } /** * Return the magnitude of a <code>Complex</code> number <code>real</code> + * (<code>imag</code>)j */ public static float magnitude(float real, float imag) { return (float) Math.sqrt(real * real + imag * imag); } /** * Return the phase of a <code>Complex</code> number <code>real</code> + * (<code>imag</code>)j */ public static float phase(float real, float imag) { return (float) Math.atan2(imag, real); } /** * Return the power of a <code>Complex</code> number <code>real</code> + * (<code>imag</code>)j */ public static float power(float real, float imag) { return DBLOG * (float) Math.log(magnitude(real, imag)); } /** * Place the real components of the first <code>n</code> elements of the array * <code>complex</code> of <code>Complex</code> numbers into the given * <code>reals</code> array */ public static void reals(int n, Complex[] complex, float[] reals) { for (int i = 0; i < n; ++i) { reals[i] = complex[i].real; } } /** * Place the imaginary components of the first <code>n</code> elements of the * array <code>complex</code> of <code>Complex</code> numbers into the given * <code>imags</code> array */ public static void imaginaries(int n, Complex[] complex, float[] imags) { for (int i = 0; i < n; ++i) { imags[i] = complex[i].imag; } } /** * Place the magnitudes of the first <code>n</code> elements of the array * <code>complex</code> of <code>Complex</code> numbers into the given * <code>mags</code> array */ public static void magnitudes(int n, Complex[] complex, float[] mags) { for (int i = 0; i < n; ++i) { mags[i] = complex[i].getMagnitude(); } } /** * Place the powers (in dBs) of the first <code>n</code> elements of the array * <code>complex</code> of <code>Complex</code> numbers into the given * <code>powers</code> array */ public static void powers(int n, Complex[] complex, float[] powers) { for (int i = 0; i < n; ++i) { powers[i] = complex[i].getPower(); } } /** * Place the phases (in radians) of the first <code>n</code> elements of the * array <code>complex</code> of <code>Complex</code> numbers into the given * <code>phases</code> array */ public static void phase(int n, Complex[] complex, float[] phases) { for (int i = 0; i < n; ++i) { phases[i] = complex[i].getPhase(); } } /** Return a clone of the <code>Complex</code> object */ public Object clone() { return new Complex(real, imag); } } // Primitives version, assumes zero mean global, passes max run length public int[] processSingleSeries(double[] d, int mrl) { return null; } public static void basicTest() { // Test FFT // Series 30,-1,2,3,3,2,-1,-4 /* * FFT Desired 34 19.9289321881345-5.82842712474618i 32-2i * 34.0710678118655+0.171572875253798i 34 34.0710678118655-0.171572875253815i * 32+2i 19.9289321881345+5.8284271247462i FFT Achieved 34 0 19.928932 * -5.8284273 32 -2 34.071068 0.17157269 34 0 34.071068 -0.17157269 32 2 * 19.928932 5.8284273 * * */ // Test FFT with truncation System.out.println("Basic test of FFT"); System.out.println("Series: 30,-1,2,3,3,2,-1,-4"); System.out.println(" /*FFT Desired" + " 34\n" + "19.9289321881345-5.82842712474618i\n" + "32-2i\n" + "34.0710678118655+0.171572875253798i\n" + "34\n" + "34.0710678118655-0.171572875253815i\n" + "32+2i\n" + "19.9289321881345+5.8284271247462i\n" + "FFT Achieved\n" + "34 0\n" + "19.928932 -5.8284273\n" + "32 -2\n" + "34.071068 0.17157269\n" + "34 0\n" + "34.071068 -0.17157269\n" + "32 2\n" + "19.928932 5.8284273"); double[] d = { 30, -1, 2, 3, 3, 2, -1, -4 }; int n = 8; Complex[] x = new Complex[n]; for (int i = 0; i < n; i++) x[i] = new Complex(d[i], 0.0); for (int i = 0; i < n; i++) System.out.println(x[i].getReal() + "," + x[i].getImag()); System.out.println("Transformed"); FFT fft = new FFT(); fft.fft(x, x.length); for (int i = 0; i < n; i++) System.out.println(x[i].getReal() + "," + x[i].getImag()); fft.fft(x, x.length); } public static void paddingTest() { /* * Test to check it works correctly with padded series //Series * 30,-1,2,3,3,2,-1,-4,3 //Padded series 30,-1,2,3,3,2,-1,-4,3,0,0,0,0, */ } public static void main(String[] args) { // basicTest(); FFT fft = new FFT(); int size = 8; double[] testSeries = new double[size]; for (int i = 0; i < size; i++) { testSeries[i] = Math.random(); } Complex[] dft = fft.dft(testSeries); Complex[] dft2 = new Complex[size]; for (int i = 0; i < size; i++) dft2[i] = new Complex(testSeries[i], 0); Complex[] dft3 = fft.dft(dft2); for (int i = 0; i < size; i++) System.out.println(dft[i] + " ::: " + dft3[i]); System.exit(0); matlabComparison(); } /* * Comparison to running the Matlab script FFT_Testing * */ public static void matlabComparison() { // MATLAB Output generated by // Power of 2: use FFT // Create set of instances with 16 attributes, with values // Case 1: All Zeros // Case 2: 1,2,...16 // Case 3: -8,-7, -6,...,0,1,...7 // Case 4: 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1 // Instances test1=ClassifierTools.loadData("C:\\Users\\ajb\\Dropbox\\TSC // Problems\\TestData\\FFT_test1"); /* * Instances test2=ClassifierTools. * loadData("C:\\Users\\ajb\\Dropbox\\TSC Problems\\TestData\\FFT_test2"); * Instances t2; try{ // t2=fft.process(test1); // * System.out.println(" FFT ="+t2); fft.padSeries(true); t2=fft.process(test2); * System.out.println(" FFT with padding="+t2); fft=new FFT(); * fft.padSeries(false); t2=fft.process(test2); * System.out.println(" FFT with truncation="+t2); fft=new FFT(); fft.useDFT(); * t2=fft.process(test2); System.out.println(" DFT ="+t2); * * * }catch(Exception e){ System.out.println(" Errrrrrr = "+e); * e.printStackTrace(); System.exit(0); } */ // Not a power of 2: use padding // Not a power of 2: use truncate // Not a power of 2: use DFT } }
34,191
29.019315
132
java
tsml-java
tsml-java-master/src/main/java/tsml/transformers/Fast_FFT.java
/* * This file is part of the UEA Time Series Machine Learning (TSML) toolbox. * * The UEA TSML toolbox is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The UEA TSML toolbox is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>. */ package tsml.transformers; import experiments.data.DatasetLists; import tsml.data_containers.TimeSeries; import tsml.data_containers.TimeSeriesInstance; import tsml.data_containers.utilities.TimeSeriesSummaryStatistics; import utilities.InstanceTools; import org.apache.commons.math3.complex.Complex; import org.apache.commons.math3.transform.DftNormalization; import org.apache.commons.math3.transform.FastFourierTransformer; import org.apache.commons.math3.transform.TransformType; import weka.core.*; import static experiments.data.DatasetLoading.loadDataNullable; public class Fast_FFT implements Transformer { final String className = "sandbox.transforms.FFT"; int nfft = 512; /** * Parses a given list of options. * <p/> * * <!-- options-start --> Valid options are: * <p/> * * <pre> * -L &lt;num&gt; * max lag for the ACF function * </pre> * * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String nfftString = Utils.getOption('L', options); if (nfftString.length() != 0) nfft = Integer.parseInt(nfftString); else nfft = 512; } public void setNFFT(int nfft) { this.nfft = nfft; } @Override public Instances determineOutputFormat(Instances inputFormat) { Instances instances = null; if (inputFormat.attribute("samplerate") != null) { nfft = (int) inputFormat.get(0).value(inputFormat.attribute("samplerate")); inputFormat.deleteAttributeAt(inputFormat.attribute("samplerate").index()); } else { // nfft = inputFormat.numAttributes() - 1; } // nearestPowerOF2(nfft); FastVector attributes = new FastVector(nfft / 2); for (int i = 0; i < (nfft / 2); i++) { attributes.addElement(new Attribute("FFT_att" + String.valueOf(i + 1))); } FastVector classValues = new FastVector(inputFormat.classAttribute().numValues()); for (int i = 0; i < inputFormat.classAttribute().numValues(); i++) classValues.addElement(inputFormat.classAttribute().value(i)); attributes.addElement(new Attribute(inputFormat.attribute(inputFormat.classIndex()).name(), classValues)); instances = new Instances("", attributes, 0); instances.setClassIndex(instances.numAttributes() - 1); return instances; } public void nearestPowerOF2(int x) { float power = (float) (Math.log(x) / Math.log(2)); int m = (int) Math.ceil(power); nfft = (int) Math.pow(2.0, (double) m); } @Override public Instance transform(Instance inst) { Complex[] complexData = new Complex[nfft]; double[] data = InstanceTools.ConvertInstanceToArrayRemovingClassValue(inst); for (int j = 0; j < complexData.length; j++) { complexData[j] = new Complex(0.0, 0.0); } double mean = 0; if (data.length < nfft) { for (int j = 0; j < data.length; j++) { mean += data[j]; } mean /= data.length; } // int limit = nfft < data[i].length ? nfft : data[i].length; for (int j = 0; j < nfft; j++) { if (j < data.length) complexData[j] = new Complex(data[j], 0); else complexData[j] = new Complex(mean, 0); } FastFourierTransformer fft = new FastFourierTransformer(DftNormalization.STANDARD); complexData = fft.transform(complexData, TransformType.FORWARD); double[] FFTData = new double[(nfft / 2) + (inst.classIndex() >= 0 ? 1 : 0)]; for (int j = 0; j < (nfft / 2); j++) { FFTData[j] = complexData[j].abs(); } if (inst.classIndex() >= 0) FFTData[FFTData.length - 1] = inst.classValue(); return new DenseInstance(1, FFTData); } @Override public TimeSeriesInstance transform(TimeSeriesInstance inst) { double[][] out = new double[inst.getNumDimensions()][]; int i = 0; for (TimeSeries ts : inst) { //TODO: make this NaN Safe. Mean is NaN safe but toArray isnt. out[i++] = calculate_FFT(ts.toValueArray(), TimeSeriesSummaryStatistics.mean(ts)); } return new TimeSeriesInstance(out, inst.getLabelIndex()); } private double[] calculate_FFT(double[] data, double mean) { Complex[] complexData = new Complex[nfft]; for (int j = 0; j < complexData.length; j++) { complexData[j] = new Complex(0.0, 0.0); } // int limit = nfft < data[i].length ? nfft : data[i].length; for (int j = 0; j < nfft; j++) { if (j < data.length) complexData[j] = new Complex(data[j], 0); else complexData[j] = new Complex(mean, 0); } FastFourierTransformer fft = new FastFourierTransformer(DftNormalization.STANDARD); complexData = fft.transform(complexData, TransformType.FORWARD); double[] FFTData = new double[(nfft / 2)]; for (int j = 0; j < (nfft / 2); j++) { FFTData[j] = complexData[j].abs(); } return FFTData; } public static void main(String[] args) { Fast_FFT fast_fft = new Fast_FFT(); Instances[] data = new Instances[2]; data[0] = loadDataNullable("Z:/ArchiveData/Univariate_arff/" + DatasetLists.tscProblems85[28] + "/" + DatasetLists.tscProblems85[28]); data[1] = fast_fft.transform(data[0]); // Before transform. System.out.println(data[0].get(0).toString()); // After transform. System.out.println(data[1].get(0).toString()); } }
6,734
33.896373
114
java