repo stringlengths 1 191 ⌀ | file stringlengths 23 351 | code stringlengths 0 5.32M | file_length int64 0 5.32M | avg_line_length float64 0 2.9k | max_line_length int64 0 288k | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tsml-java | tsml-java-master/src/main/java/statistics/simulators/IntervalModel.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Interval model for simulators.
Intervals and shapes are fixed for all series from a given class
Shapes differ between models. Shapes are randomised at construction, but and so
the calling class must make sure they are different between classes with calls to
randomiseShape.
Intervals are set externally by this class by calls to generateRandomIntervals
to generate the intervals for one model and setIntervals for models of other classes
*/
package statistics.simulators;
import fileIO.OutFile;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import statistics.simulators.DictionaryModel.Shape;
import statistics.simulators.DictionaryModel.ShapeType;
import static statistics.simulators.Model.rand;
/**
*
* @author ajb
*/
public class IntervalModel extends Model{
private int nosIntervals=3; //
private int seriesLength=300; // Need to set intervals, maybe allow different lengths?
private int noiseToSignal=4;
private int intervalLength=seriesLength/(nosIntervals*noiseToSignal);
private int base=-1;
private int amplitude=2;
private Shape baseShape;
ArrayList<Integer> locations;
public IntervalModel(){
baseShape=new Shape(intervalLength);
locations=new ArrayList<>();
}
public IntervalModel(int n){
this();
nosIntervals=n;
intervalLength=seriesLength/(nosIntervals*2);
baseShape.setLength(intervalLength);
createIntervals();
}
public void setNoiseToSignal(int n){
noiseToSignal=n;
intervalLength=seriesLength/(nosIntervals*noiseToSignal);
baseShape.setLength(intervalLength);
}
public void setNosIntervals(int n){
nosIntervals=n;
intervalLength=seriesLength/(nosIntervals*noiseToSignal);
baseShape.setLength(intervalLength);
}
public void setSeriesLength(int n){
seriesLength=n;
intervalLength=seriesLength/(nosIntervals*noiseToSignal);
baseShape.setLength(intervalLength);
}
public void setBaseShapeType(ShapeType st){
baseShape.setType(st);
baseShape.setLength(intervalLength);
// System.out.println(" Setting base type "+st+" length ="+intervalLength);
}
public final void createIntervals(){
locations=new ArrayList<>(nosIntervals);
setNonOverlappingIntervals();
baseShape.setLength(intervalLength);
}
public void setIntervals(ArrayList<Integer> l, int length){
locations=new ArrayList<>(l);
intervalLength=length;
baseShape.setLength(intervalLength);
}
public ArrayList<Integer> getIntervals(){return locations;}
public int getIntervalLength(){ return intervalLength;}
public boolean setNonOverlappingIntervals(){
//Me giving up and just randomly placing the shapes until they are all non overlapping
for(int i=0;i<nosIntervals;i++){
boolean ok=false;
int l=intervalLength/2;
while(!ok){
ok=true;
//Search mid points to level the distribution up somewhat
l=rand.nextInt(seriesLength-intervalLength)+intervalLength/2;
// System.out.println("trying "+l);
for(int in:locations){
//I think this is setting them too big
if((l>=in-intervalLength && l<in+intervalLength) //l inside ins
||(l<in-intervalLength && l+intervalLength>in) ){ //ins inside l
ok=false;
// System.out.println(l+" overlaps with "+in);
break;
}
}
}
// System.out.println("Adding "+l);
locations.add(l);
}
//Revert to start points
for(int i=0;i<locations.size();i++){
int val=locations.get(i);
locations.set(i, val-intervalLength/2);
}
Collections.sort(locations);
return true;
}
@Override
public double generate(){
//Noise
double value=error.simulate();
int insertionPoint=0;
while(insertionPoint<locations.size() && locations.get(insertionPoint)+intervalLength<t)
insertionPoint++;
if(insertionPoint>=locations.size()){ //Bigger than all the start points, set to last
insertionPoint=locations.size()-1;
}
int point=locations.get(insertionPoint);
if(point<=t && point+intervalLength>t){//in shape1
value+=baseShape.generateWithinShapelet((int)(t-point));
// System.out.println(" IN SHAPE 1 occurence "+insertionPoint+" Time "+t);
}
t++;
return value;
}
@Override
public void setParameters(double[] p) {
nosIntervals=(int)p[0];
intervalLength=(int)p[1];
}
public void randomiseShape(IntervalModel m){
baseShape.randomiseShape();
if(m!=null){
while(baseShape.equals(m.baseShape))
baseShape.randomiseShape();
}
}
public static void main(String[] args){
//Set up two models with same intervals but different shapes
int length=500;
Model.setGlobalRandomSeed(10);
Model.setDefaultSigma(0);
IntervalModel m1=new IntervalModel();
m1.setBaseShapeType(ShapeType.SINE);
m1.setNosIntervals(3);
m1.setSeriesLength(length);
m1.createIntervals();
IntervalModel m2=new IntervalModel();
m2.setBaseShapeType(ShapeType.SPIKE);
m2.setNosIntervals(3);
m2.setIntervals(m1.getIntervals(), m1.getIntervalLength());
double[] d1=m1.generateSeries(length);
double[] d2=m2.generateSeries(length);
OutFile of=new OutFile("C:\\temp\\intervalEx.csv");
for(int i=0;i<length;i++)
of.writeLine(d1[i]+","+d2[i]);
}
@Override
public String toString(){
String s="NosIntervals,"+nosIntervals;
s+="\nIntervalLength,"+intervalLength;
s+="\nNoiseToSignal,"+noiseToSignal;
s+="\nShape,"+baseShape.toString()+" ,Locations,";
for(int i=0;i<nosIntervals;i++)
s+=locations.get(i)+" ";
return s;
}
}
| 7,104 | 35.06599 | 96 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/MatrixProfileModelVersion1.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Bad ones
Model 1 = 93,119,
Model 2 = 67,98,
Classifier MP_ED acc =0.45555555555555555
Model 1 = 84,118,
Model 2 = 66,109,
*/
package statistics.simulators;
import fileIO.OutFile;
import java.util.*;
import java.io.*;
import statistics.distributions.NormalDistribution;
import statistics.simulators.DictionaryModel.ShapeType;
import static statistics.simulators.Model.rand;
import statistics.simulators.ShapeletModel.Shape;
public class MatrixProfileModelVersion1 extends Model {
private int nosLocations=2; //
private int shapeLength=29;
public static double MINBASE=-2;
public static double MINAMP=2;
public static double MAXBASE=2;
public static double MAXAMP=4;
DictionaryModel.Shape shape;//Will change for each series
private static int GLOBALSERIESLENGTH=500;
private int seriesLength; // Need to set intervals, maybe allow different lengths?
private int base=-1;
private int amplitude=2;
private int shapeCount=0;
private boolean invert=false;
boolean discord=false;
double[] shapeVals;
ArrayList<Integer> locations;
public static int getGlobalLength(){ return GLOBALSERIESLENGTH;}
public MatrixProfileModelVersion1(){
shapeCount=0;//rand.nextInt(ShapeType.values().length);
seriesLength=GLOBALSERIESLENGTH;
locations=new ArrayList<>();
setNonOverlappingIntervals();
shapeVals=new double[shapeLength];
generateRandomShapeVals();
}
public MatrixProfileModelVersion1(boolean d){
shapeCount=0;//rand.nextInt(ShapeType.values().length);
discord=d;
if(discord)
nosLocations=1;
seriesLength=GLOBALSERIESLENGTH;
locations=new ArrayList<>();
setNonOverlappingIntervals();
shapeVals=new double[shapeLength];
generateRandomShapeVals();
}
private void generateRandomShapeVals(){
for(int i=0;i<shapeLength;i++)
shapeVals[i]=MINBASE+(MAXBASE-MINBASE)*Model.rand.nextDouble();
}
public void setSeriesLength(int n){
seriesLength=n;
}
public static void setGlobalSeriesLength(int n){
GLOBALSERIESLENGTH=n;
}
public void setNonOverlappingIntervals(){
//Use Aarons way
ArrayList<Integer> startPoints=new ArrayList<>();
for(int i=shapeLength+1;i<seriesLength-shapeLength;i++)
startPoints.add(i);
for(int i=0;i<nosLocations;i++){
int pos=rand.nextInt(startPoints.size());
int l=startPoints.get(pos);
locations.add(l);
//+/- windowSize/2
if(pos<shapeLength)
pos=0;
else
pos=pos-shapeLength;
for(int j=0;startPoints.size()>pos && j<(3*shapeLength);j++)
startPoints.remove(pos);
/*
//Me giving up and just randomly placing the shapes until they are all non overlapping
for(int i=0;i<nosLocations;i++){
boolean ok=false;
int l=shapeLength/2;
while(!ok){
ok=true;
//Search mid points to level the distribution up somewhat
// System.out.println("Series length ="+seriesLength);
do{
l=rand.nextInt(seriesLength-shapeLength)+shapeLength/2;
}
while((l+shapeLength/2)>=seriesLength-shapeLength);
for(int in:locations){
//I think this is setting them too big
if((l>=in-shapeLength && l<in+shapeLength) //l inside ins
||(l<in-shapeLength && l+shapeLength>in) ){ //ins inside l
ok=false;
// System.out.println(l+" overlaps with "+in);
break;
}
}
}
*/
// System.out.println("Adding "+l);
}
/*//Revert to start points
for(int i=0;i<locations.size();i++){
int val=locations.get(i);
locations.set(i, val-shapeLength/2);
}
*/ Collections.sort(locations);
// System.out.println("MODEL START POINTS =");
// for(int i=0;i<locations.size();i++)
// System.out.println(locations.get(i));
}
@Override
public void setParameters(double[] p) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public void setLocations(ArrayList<Integer> l, int length){
locations=new ArrayList<>(l);
shapeLength=length;
}
public ArrayList<Integer> getIntervals(){return locations;}
public int getShapeLength(){ return shapeLength;}
public void generateBaseShape(){
//Randomise BASE and AMPLITUDE
double b=MINBASE+(MAXBASE-MINBASE)*Model.rand.nextDouble();
double a=MINAMP+(MAXAMP-MINAMP)*Model.rand.nextDouble();
ShapeType[] all=ShapeType.values();
ShapeType st=all[(shapeCount++)%all.length];
shape=new DictionaryModel.Shape(st,shapeLength,b,a);
// shape=new DictionaryModel.Shape(DictionaryModel.ShapeType.SPIKE,shapeLength,b,a);
// System.out.println("Shape is "+shape);
// shape.nextShape();
// shape
}
@Override
public double[] generateSeries(int n)
{
t=0;
double[] d;
generateRandomShapeVals();
//Resets the starting locations each time this is called
if(invert){
d= new double[n];
for(int i=0;i<n;i++)
d[i]=-generate();
invert=false;
}
else{
generateBaseShape();
d = new double[n];
for(int i=0;i<n;i++)
d[i]=generate();
invert=true;
}
return d;
}
private double generateConfig1(){
//Noise
// System.out.println("Error var ="+error.getVariance());
double value=0;
//Find the next shape
int insertionPoint=0;
while(insertionPoint<locations.size() && locations.get(insertionPoint)+shapeLength<t)
insertionPoint++;
//Bigger than all the start points, set to last
if(insertionPoint>=locations.size()){
insertionPoint=locations.size()-1;
}
int point=locations.get(insertionPoint);
if(point<=t && point+shapeLength>t)//in shape1
value=shapeVals[(int)(t-point)];
else
value= error.simulate();
// value+=shape.generateWithinShapelet((int)(t-point));
// System.out.println(" IN SHAPE 1 occurence "+insertionPoint+" Time "+t);
t++;
return value;
}
private double generateConfig2(){
//Noise
// System.out.println("Error var ="+error.getVariance());
double value=error.simulate();
//Find the next shape
int insertionPoint=0;
while(insertionPoint<locations.size() && locations.get(insertionPoint)+shapeLength<t)
insertionPoint++;
//Bigger than all the start points, set to last
if(insertionPoint>=locations.size()){
insertionPoint=locations.size()-1;
}
int point=locations.get(insertionPoint);
if(insertionPoint>0 && point==t){//New shape, randomise scale
// double b=shape.getBase()/5;
// double a=shape.getAmp()/5;
double b=MINBASE+(MAXBASE-MINBASE)*Model.rand.nextDouble();
double a=MINAMP+(MAXAMP-MINAMP)*Model.rand.nextDouble();
shape.setAmp(a);
shape.setBase(b);
// System.out.println("changing second shape");
}
if(point<=t && point+shapeLength>t){//in shape1
value+=shape.generateWithinShapelet((int)(t-point));
// System.out.println(" IN SHAPE 1 occurence "+insertionPoint+" Time "+t);
}
t++;
return value;
}
//Generate point t
@Override
public double generate(){
return generateConfig1();
}
public static void generateExampleData(){
int length=500;
GLOBALSERIESLENGTH=length;
Model.setGlobalRandomSeed(3);
Model.setDefaultSigma(0);
MatrixProfileModelVersion1 m1=new MatrixProfileModelVersion1();
MatrixProfileModelVersion1 m2=new MatrixProfileModelVersion1();
double[][] d=new double[20][];
for(int i=0;i<10;i++){
d[i]=m1.generateSeries(length);
}
for(int i=10;i<20;i++){
d[i]=m1.generateSeries(length);
}
OutFile of=new OutFile("C:\\temp\\MP_ExampleSeries.csv");
for(int i=0;i<length;i++){
for(int j=0;j<10;j++)
of.writeString(d[j][i]+",");
of.writeString("\n");
}
}
public String toString(){
String str="";
for(Integer i:locations)
str+=i+",";
return str;
}
public static void main(String[] args){
generateExampleData();
System.exit(0);
//Set up two models with same intervals but different shapes
int length=500;
Model.setGlobalRandomSeed(10);
Model.setDefaultSigma(0.1);
MatrixProfileModelVersion1 m1=new MatrixProfileModelVersion1();
MatrixProfileModelVersion1 m2=new MatrixProfileModelVersion1();
double[] d1=m1.generateSeries(length);
double[] d2=m2.generateSeries(length);
OutFile of=new OutFile("C:\\temp\\MP_Ex.csv");
for(int i=0;i<length;i++)
of.writeLine(d1[i]+","+d2[i]);
}
}
| 10,540 | 34.136667 | 135 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/MatrixProfileModelVersion2.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* This is the discord model.
*
* All data hasa big spike, class 1 has a small recurrence.
*/
package statistics.simulators;
import fileIO.OutFile;
import java.util.*;
import java.io.*;
import statistics.distributions.NormalDistribution;
import statistics.simulators.DictionaryModel.ShapeType;
import static statistics.simulators.Model.rand;
import statistics.simulators.ShapeletModel.Shape;
public class MatrixProfileModelVersion2 extends Model {
private int nosLocations=2; //
private int shapeLength=29;
public static double MINBASE=-2;
public static double MINAMP=2;
public static double MAXBASE=2;
public static double MAXAMP=4;
DictionaryModel.Shape shape;//Will change for each series
private static int GLOBALSERIESLENGTH=500;
private int seriesLength; // Need to set intervals, maybe allow different lengths?
private int base=-1;
private int amplitude=2;
private int shapeCount=0;
private boolean invert=false;
boolean discord=false;
double[] shapeVals;
ArrayList<Integer> locations;
private static double[] spike1;
private static double[] spike2;
private static void makeSpikes(){
spike1=new double[29];
spike2=new double[29];
double max=3;
double min=-2;
spike1[0]=min;
spike1[28]=min;
spike1[15]=max;
for(int i=1;i<=14;i++)
spike1[i]=spike1[i-1]+(max-min)/5;
for(int i=0;i<29;i++)
spike2[i]=spike1[i]/5;
}
public static int getGlobalLength(){ return GLOBALSERIESLENGTH;}
public MatrixProfileModelVersion2(){
shapeCount=0;//rand.nextInt(ShapeType.values().length);
seriesLength=GLOBALSERIESLENGTH;
locations=new ArrayList<>();
setNonOverlappingIntervals();
shapeVals=new double[shapeLength];
generateRandomShapeVals();
}
public MatrixProfileModelVersion2(boolean d){
shapeCount=0;//rand.nextInt(ShapeType.values().length);
discord=d;
if(discord)
nosLocations=1;
seriesLength=GLOBALSERIESLENGTH;
locations=new ArrayList<>();
setNonOverlappingIntervals();
shapeVals=new double[shapeLength];
generateRandomShapeVals();
}
private void generateRandomShapeVals(){
for(int i=0;i<shapeLength;i++)
shapeVals[i]=MINBASE+(MAXBASE-MINBASE)*Model.rand.nextDouble();
}
public void setSeriesLength(int n){
seriesLength=n;
}
public static void setGlobalSeriesLength(int n){
GLOBALSERIESLENGTH=n;
}
public void setNonOverlappingIntervals(){
//Use Aarons way
ArrayList<Integer> startPoints=new ArrayList<>();
for(int i=shapeLength+1;i<seriesLength-shapeLength;i++)
startPoints.add(i);
for(int i=0;i<nosLocations;i++){
int pos=rand.nextInt(startPoints.size());
int l=startPoints.get(pos);
locations.add(l);
//+/- windowSize/2
if(pos<shapeLength/2)
pos=0;
else
pos=pos-shapeLength/2;
for(int j=0;startPoints.size()>pos && j<(2*shapeLength);j++)
startPoints.remove(pos);
}
Collections.sort(locations);
}
@Override
public void setParameters(double[] p) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public void setLocations(ArrayList<Integer> l, int length){
locations=new ArrayList<>(l);
shapeLength=length;
}
public ArrayList<Integer> getIntervals(){return locations;}
public int getShapeLength(){ return shapeLength;}
public void generateBaseShape(){
//Randomise BASE and AMPLITUDE
double b=MINBASE+(MAXBASE-MINBASE)*Model.rand.nextDouble();
double a=MINAMP+(MAXAMP-MINAMP)*Model.rand.nextDouble();
ShapeType[] all=ShapeType.values();
ShapeType st=all[(shapeCount++)%all.length];
shape=new DictionaryModel.Shape(st,shapeLength,b,a);
// shape=new DictionaryModel.Shape(DictionaryModel.ShapeType.SPIKE,shapeLength,b,a);
// System.out.println("Shape is "+shape);
// shape.nextShape();
// shape
}
@Override
public double[] generateSeries(int n)
{
t=0;
double[] d;
generateRandomShapeVals();
//Resets the starting locations each time this is called
if(invert){
d= new double[n];
for(int i=0;i<n;i++)
d[i]=-generate();
invert=false;
}
else{
generateBaseShape();
d = new double[n];
for(int i=0;i<n;i++)
d[i]=generate();
invert=true;
}
return d;
}
private double generateConfig1(){
//Noise
// System.out.println("Error var ="+error.getVariance());
double value=0;
//Find the next shape
int insertionPoint=0;
while(insertionPoint<locations.size() && locations.get(insertionPoint)+shapeLength<t)
insertionPoint++;
//Bigger than all the start points, set to last
if(insertionPoint>=locations.size()){
insertionPoint=locations.size()-1;
}
int point=locations.get(insertionPoint);
if(point<=t && point+shapeLength>t)//in shape1
value=shapeVals[(int)(t-point)];
else
value= error.simulate();
// value+=shape.generateWithinShapelet((int)(t-point));
// System.out.println(" IN SHAPE 1 occurence "+insertionPoint+" Time "+t);
t++;
return value;
}
private double generateConfig2(){
//Noise
// System.out.println("Error var ="+error.getVariance());
double value=error.simulate();
//Find the next shape
int insertionPoint=0;
while(insertionPoint<locations.size() && locations.get(insertionPoint)+shapeLength<t)
insertionPoint++;
//Bigger than all the start points, set to last
if(insertionPoint>=locations.size()){
insertionPoint=locations.size()-1;
}
int point=locations.get(insertionPoint);
if(insertionPoint>0 && point==t){//New shape, randomise scale
// double b=shape.getBase()/5;
// double a=shape.getAmp()/5;
double b=MINBASE+(MAXBASE-MINBASE)*Model.rand.nextDouble();
double a=MINAMP+(MAXAMP-MINAMP)*Model.rand.nextDouble();
shape.setAmp(a);
shape.setBase(b);
// System.out.println("changing second shape");
}
if(point<=t && point+shapeLength>t){//in shape1
value+=shape.generateWithinShapelet((int)(t-point));
// System.out.println(" IN SHAPE 1 occurence "+insertionPoint+" Time "+t);
}
t++;
return value;
}
//Generate point t
@Override
public double generate(){
return generateConfig1();
}
public static void generateExampleData(){
int length=500;
GLOBALSERIESLENGTH=length;
Model.setGlobalRandomSeed(3);
Model.setDefaultSigma(0);
MatrixProfileModelVersion2 m1=new MatrixProfileModelVersion2();
MatrixProfileModelVersion2 m2=new MatrixProfileModelVersion2();
double[][] d=new double[20][];
for(int i=0;i<10;i++){
d[i]=m1.generateSeries(length);
}
for(int i=10;i<20;i++){
d[i]=m1.generateSeries(length);
}
OutFile of=new OutFile("C:\\temp\\MP_ExampleSeries.csv");
for(int i=0;i<length;i++){
for(int j=0;j<10;j++)
of.writeString(d[j][i]+",");
of.writeString("\n");
}
}
public String toString(){
String str="";
for(Integer i:locations)
str+=i+",";
return str;
}
public static void main(String[] args){
generateExampleData();
System.exit(0);
//Set up two models with same intervals but different shapes
int length=500;
Model.setGlobalRandomSeed(10);
Model.setDefaultSigma(0.1);
MatrixProfileModelVersion2 m1=new MatrixProfileModelVersion2();
MatrixProfileModelVersion2 m2=new MatrixProfileModelVersion2();
double[] d1=m1.generateSeries(length);
double[] d2=m2.generateSeries(length);
OutFile of=new OutFile("C:\\temp\\MP_Ex.csv");
for(int i=0;i<length;i++)
of.writeLine(d1[i]+","+d2[i]);
}
}
| 9,532 | 33.415162 | 135 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/Model.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.simulators;
import java.util.Random;
import statistics.distributions.*;
/***
* @author ajb
*
* Base class for Data model to generate simulated data.
*
* In order to be able to recreate data, all random numbers should be generated
* with calls to error.RNG.nextDouble() etc.
*
*/
abstract public class Model {
protected double t = 0;
Distribution error;
public static double defaultSigma = 1;
public static void setDefaultSigma(double x) {
defaultSigma = x;
}
static int seed = 1;
static int count = 1;
double variance;
public static Random rand = new Random(seed);
// public static Random rand=new MersenneTwister();
public Model() {
variance = defaultSigma;
error = new NormalDistribution(0, variance);
//Need different seeds for each model so using a bit of a hack singleton
if (seed >= 0) {
error.setRandomSeed(count * (seed + 1));
count++;
}
}
public static void setGlobalRandomSeed(int s) {
seed = s;
rand = new Random(seed);
count = 1;
}
public static int getRandomSeed() {
return seed;
}
//Bt of a hack, what if non normal error?
public void setVariance(double x) {
variance = x;
error = new NormalDistribution(0, variance);
}
public double getVariance() {
return variance;
}
/*Generate a single data
//Assumes a model independent of previous observations. As
//such will not be relevant for ARMA or HMM models, which just return -1.
* Should probably remove.
*/
double generate(double x) {
return error.simulate();
}
//This will generate the next sequence after currently stored t value
double generate() {
return error.simulate();
}
public void reset() {
t = 0;
}
public void setError(Distribution d) {
error = d;
}
//Generates a series of length n
public double[] generateSeries(int n) {
double[] d = new double[n];
for (int i = 0; i < n; i++)
d[i] = generate();
return d;
}
public String getModelType() {
return "RandomNoise";
}
public String getAttributeName() {
return "t";
}
public String getHeader() {
String header = "%" + getModelType();
header += "%" + "Error=" + error.getClass() + " Sigma = " + error.getVariance() + "\n";
return header;
}
/**
* Subclasses must implement this, how they take them out of the array is their business.
*
* @param p
*/
abstract public void setParameters(double[] p);
} | 3,482 | 25.18797 | 95 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/PolynomialModel.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.simulators;
import statistics.distributions.*;
public class PolynomialModel extends Model{
double[] coefficients;
double[] powers;
// Default min max range for coefficients and powers
static double defaultMin=-4;
static double defaultMax=4;
//DO NOT USE UNTIL DEBUGGED. Then make public
private PolynomialModel(){super();}
private PolynomialModel(double[] c, double[] p){
super();
coefficients=c;
powers=p;
t=0;
}
private PolynomialModel(double[] c, double[] p, Distribution e){
super();
coefficients=c;
powers=p;
error=e;
t=0;
}
@Override
public void setParameters(double[] p) {
if(p.length%2!=0)
throw new UnsupportedOperationException("Error, the Polynomial model input must be of even length, first half the powers, second half coefficients,");
coefficients=new double[p.length/2];
powers=new double[p.length/2];
System.arraycopy(p,0,powers,0, p.length/2);
System.arraycopy(p, p.length/2,coefficients, 0, p.length/2);
error=new NormalDistribution(0,1);
t=0;
}
@Override
public double generate(double x)
{
double p;
p=generateDeterministic(x);
p+=error.simulate();
t=x;
return p;
}
@Override
public double generate()
{
double p=generateDeterministic(t);
p+=error.simulate();
t++;
return p;
}
public double generateDeterministic(double x)
{
double p=0;
if(x==0) //Slight hack, what about constants?
return 0;
for(int i=0;i<powers.length;i++)
p+=coefficients[i]*Math.pow(x,powers[i]);
return p;
}
public void setTime(int t){this.t=t;}
public double[] getCoefficients(){return coefficients;}
public double[] getPowers(){return powers;}
@Override
public String toString()
{
String str="";
for(int i=0;i<powers.length;i++)
str+=coefficients[i]+"*x^"+powers[i]+"\t+\t";
str+=error;
return str;
}
public static PolynomialModel generateRandomModel(int r)
{
return generateRandomModel(r,defaultMin,defaultMax,defaultMin,defaultMax);
}
public static PolynomialModel generateRandomModel(int r, double min, double max)
{
return generateRandomModel(r,min,max,min,max);
}
public static PolynomialModel generateRandomModel(int r, double minC, double maxC,double minP, double maxP)
{
double[] coeffs,powers;
coeffs=new double[r];
powers=new double[r];
for(int i=0;i<r;i++)
{
//HERE CHANGE STRUCTURE
// coeffs[i]=minC+(maxC-minC)*Distribution.RNG.nextDouble();
// powers[i]=minP+(maxP-minP)*Distribution.RNG.nextDouble();
}
return new PolynomialModel(coeffs,powers, new NormalDistribution(0,1));
}
public static void main(String[] args){
System.out.println(" Test Harness not implemented yet");
}
} | 3,574 | 27.6 | 166 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/ShapeletModel.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Written by Jon Hills
Model to simulate data where shapelet approach should be optimal.
*/
package statistics.simulators;
import fileIO.OutFile;
import java.util.*;
import java.io.*;
import statistics.distributions.NormalDistribution;
public class ShapeletModel extends Model {
public enum ShapeType {TRIANGLE,HEADSHOULDERS,SINE, STEP, SPIKE};
protected ArrayList<Shape> shapes;
private static int DEFAULTNUMSHAPELETS=1;
private static int DEFAULTSERIESLENGTH=500;
private static int DEFAULTSHAPELETLENGTH=29;
private static int DEFAULTBASE=-1;
private static int DEFAULTAMP=4;
protected int numShapelets;
protected int seriesLength;
protected int shapeletLength;
//Default Constructor, max start should be at least 29 less than the length
// of the series if using the default shapelet length of 29
public ShapeletModel()
{
this(new double[]{DEFAULTSERIESLENGTH,DEFAULTNUMSHAPELETS,DEFAULTSHAPELETLENGTH});
}
public final void setDefaults(){
seriesLength=DEFAULTSERIESLENGTH;
numShapelets=DEFAULTNUMSHAPELETS;
shapeletLength=DEFAULTSHAPELETLENGTH;
}
public ShapeletModel(double[] param)
{
super();
setDefaults();
//PARAMETER LIST: seriesLength, numShapelets, shapeletLength, maxStart
//Using the fall through for switching, I should be shot!
if(param!=null){
switch(param.length){
default:
case 3: shapeletLength=(int)param[2];
case 2: numShapelets=(int)param[1];
case 1: seriesLength=(int)param[0];
}
}
shapes=new ArrayList<>();
// Shapes are randomised for type and location; the other characteristics, such as length
// must be changed in the inner class.
for(int i=0;i<numShapelets;i++)
{
Shape sh = new Shape(shapeletLength);
sh.randomiseShape();
shapes.add(sh);
}
// error=new NormalDistribution(0,1);
}
//This constructor is used for data of a given length
public ShapeletModel(int s)
{
this(new double[]{(double)s});
}
//This constructor is used for data of a given length in a two class problem
//where the shape distinguishing the first class is known
public ShapeletModel(int seriesLength,Shape shape)
{
setDefaults();
shapes=new ArrayList<Shape>();
for(int i=0;i<numShapelets;i++)
{
Shape sh = new Shape(shapeletLength);
sh.randomiseShape();
shapes.add(sh);
}
}
// This constructor accepts an ArrayList of shapes for the shapelet model,
// rather than determining the shapes randomly.
public ShapeletModel(ArrayList<Shape> s)
{
shapes=new ArrayList<Shape>(s);
}
@Override
public double[] generateSeries(int n)
{
t=0;
//Randomize the starting locations each time this is called
for(Shape s:shapes)
s.randomiseLocation();
double[] d = new double[n];
for(int i=0;i<n;i++)
d[i]=generate();
return d;
}
//Fix all shapelets to a single type
/*Generate a single data
//Assumes a model independent of previous observations. As
//such will not be relevant for ARMA or HMM models, which just return -1.
* Should probably remove.
*/
@Override
public double generate(double x)
{
double value=error.simulate();
//Slightly inefficient for non overlapping shapes, but worth it for clarity and generality
for(Shape s:shapes)
value+=s.generate((int)x);
return value;
}
//This will generate the next sequence after currently stored t value
@Override
public double generate()
{
// System.out.println("t ="+t);
double value=generate(t);
t++;
return value;
}
/**
* Subclasses must implement this, how they take them out of the array is their business.
* @param p
*/
@Override
public void setParameters(double[] p)
{
}
// The implementation of the reset sets t back to zero.
public void reset(){
t=0;
}
public ShapeType getShapeType(){
return shapes.get(0).type;
}
public void setShapeType(ShapeType st){
for(Shape s:shapes){
s.setType(st);
}
}
// The toString() method has not been changed.
@Override
public String toString(){
String str= "nos shapes = "+shapes.size()+"\n";
for(Shape s:shapes)
str+=s.toString()+"\n";
return str;
}
@Override
public String getModelType(){ return "ShapeletSimulator";}
@Override
public String getAttributeName(){return "Shape";}
@Override
public String getHeader(){
String header=super.getHeader();
header+="% Shapelet Length ="+shapeletLength;
header+="% Series Length ="+seriesLength;
header+="% Number of Shapelets ="+numShapelets;
for(int i=0;i<shapes.size();i++)
header+="%\t Shape "+i+" "+shapes.get(i).type+"\n";
return header;
}
// Inner class determining the shape inserted into the shapelet model
public class Shape{
// Type: head and shoulders, spike, step, triangle, or sine wave.
private ShapeType type;
//Length of shape
private int length;
//Position of shape on axis determined by base (lowest point) and amp(litude).
private double base;
private double amp;
//The position in the series at which the shape begins.
private int location;
//Default constructor, call randomise shape to get a random instance
// The default length is 29, the shape extends from -2 to +2, is of
// type head and shoulders, and is located at index 0.
private Shape()
{
this(ShapeType.HEADSHOULDERS,DEFAULTSHAPELETLENGTH,DEFAULTBASE,DEFAULTAMP);
}
//Set length only, default for the others
private Shape(int length){
this(ShapeType.HEADSHOULDERS,length,DEFAULTBASE,DEFAULTAMP);
}
// This constructor produces a completely specified shape
private Shape(ShapeType t,int l, double b, double a){
type=t;
length=l;
base=b;
amp=a;
}
//Checks the location against the value t, and outputs part of the shape
// if appropriate.
private double generate(int t){
if(t<location || t>location+length-1)
return 0;
int offset=t-location;
double value=0;
int lower=0,mid=0,upper=0;
switch(type){
case TRIANGLE:
mid=length/2;
if(offset<=mid) {
if(offset==0)
value=base;
else
value=((offset/(double)(mid))*(amp))+base;
}
else
{
if(offset>=length)
value=base;
else if(length%2==1)
value=((length-offset-1)/(double)(mid)*(amp))+base;
else
value=((length-offset)/(double)(mid)*(amp))+base;
}
break;
case HEADSHOULDERS:
//Need to properly set the boundaries. Should NOT do this here!
lower=length/3;
upper=2*lower;
// Do something about uneven split.
if(length%3==2) //Add two the middle hump, or one each to the sides?
upper+=2;
// if(offset<length/3)//First small hump.
if(offset<lower)//First small hump.
value = ((amp/2)*Math.sin(((2*Math.PI)/((length/3-1)*2))*offset))+base;
else if(offset>=upper){//last small hump
value = ((amp/2)*Math.sin(((2*Math.PI)/((length/3-1)*2))*(offset-upper)))+base;
// if(offset+1>=(2*length)/3){ //last small hump
// if(length%3>0 && offset>=(length/3)*3)//wtf?!?
// value = base;
// else //This is causing the problem when length%3==0
// value = ((amp/2)*Math.sin(((2*Math.PI)/((length/3-1)*2))*(offset+1-(2*length)/3)))+base;
// }
}
else // middle big hump. Need to rescale for middle interval
value = ((amp)*Math.sin(((2*Math.PI)/(((upper-lower)-1)*2))*(offset-length/3)))+base;
if(value< base)
value=base;
break;
case SINE:
value=amp*Math.sin(((2*Math.PI)/(length-1))*offset)/2;
break;
case STEP:
if(offset<length/2)
value=base;
else
value=base+amp;
break;
case SPIKE:
lower=length/4;
upper=3*lower;
if(offset<=lower) //From 0 to base
{
if(offset==0)
value=0;
else
value=(-amp/2)*(offset/(double)(lower));
}
else if(offset>lower && offset<upper)
{
value=-amp/2+(amp)*((offset-lower)/(double)(upper-lower-1));
// if(offset>length/2&&offset<=length/4*3)
// value=(offset-length/2)/(double)(length/4)*(amp/2);
}
else{
value=amp/2-amp/2*((offset-upper+1)/(double)(length-upper));
}
break;
}
return value;
}
private void setLocation(int newLoc){
this.location=newLoc;
}
private void setType(ShapeType newType){
this.type=newType;
if(newType==ShapeType.HEADSHOULDERS)
base=-amp/4;
else
base=-amp/2;
}
private void setLength(int newLength){
this.length=newLength;
}
// Randomises the starting location of a shape. Returns false when
// there is insufficient space to fit all shapes within the value
// of maxStart. This is resolved in the constructor.
private boolean randomiseLocation(){
int start =0;
if(seriesLength>shapeletLength)
start= Model.rand.nextInt(seriesLength-shapeletLength);
setLocation(start);
return true;
}
@Override
public String toString(){
return ""+this.type+" start = "+location+" length ="+length;
}
//gives a shape a random type
private boolean randomiseShape(){
ShapeType [] types = ShapeType.values();
int ranType = Model.rand.nextInt(types.length);
setType(types[ranType]);
return true;
}
}
public void setShapeletLength(int l){
shapeletLength=l;
for(Shape s:shapes)
s.setLength(l);
}
//Test harness
public static void main (String[] args) throws IOException
{
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
for(int shapeletLength=8;shapeletLength<=30;shapeletLength+=1){
for(ShapeType st: ShapeType.values()){
ShapeletModel model=new ShapeletModel();
// ShapeType st=ShapeType.SPIKE;
model.setShapeType(st);
model.setShapeletLength(shapeletLength);
OutFile out=new OutFile("C:\\temp\\"+st.toString()+shapeletLength+".csv");
model.seriesLength=shapeletLength;
// System.out.println(" nos shapelets ="+model.shapes.size());
out.writeString(st.name()+"\n");
double[] series=model.generateSeries(model.seriesLength);
for(int i=0;i<model.seriesLength;i++){
// System.out.println(series[i]+" ");
out.writeLine(series[i]+",");
}
out.writeString("\n");
// System.out.print("\n");
}
}
}
}
| 13,755 | 33.825316 | 118 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SimulateDictionaryData.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Simulate repeated patterns in the data
*/
package statistics.simulators;
import fileIO.OutFile;
import utilities.InstanceTools;
import weka.core.Instances;
/**
*
* @author ajb
*/
public class SimulateDictionaryData extends DataSimulator{
static int[] shapeletsPerClass={5,20};//Also defines the num classes by length
static int shapeLength=11;
//Store a global copy purely to be able to recover the latest metadata
//Probably need to generalise this1?
static DataSimulator sim;
public static void setShapeletsPerClass(int[] c){
shapeletsPerClass=new int[c.length];
for(int i=0;i<c.length;i++)
shapeletsPerClass[i]=c[i];
}
public static void setShapeletLength(int s){
shapeLength=s;
}
/**
* This method creates and returns a set of Instances representing a
* simulated two-class time-series problem.
*
* @param seriesLength The length of the series. All time series in the
* dataset are the same length.
* @param casesPerClass An array of two integers indicating the number of
* instances of class 0 and class 1.
* @return Instances representing the time-series dataset. The Instances
* returned will be empty if the casesPerClass parameter does not contain
* exactly two values.
*/
public static Instances generateDictionaryData(int seriesLength, int []casesPerClass, int[] shapesPerClass)
{
if( casesPerClass.length != 2)
{
System.err.println("Incorrect parameters, dataset will not be co"
+ "rrect.");
int[] tmp = {10,10};
casesPerClass = tmp;
}
DictionaryModel[] shapeMod = new DictionaryModel[casesPerClass.length];
System.out.println("Populating shape array ...");
populateRepeatedShapeletArray(shapeMod, seriesLength,shapesPerClass);
System.out.println("Creating simulator ...");
sim = new DataSimulator(shapeMod);
sim.setSeriesLength(seriesLength);
sim.setCasesPerClass(casesPerClass);
System.out.println("Generating Data ... ");
Instances d=sim.generateDataSet();
return d;
}
public static Instances generateDictionaryData(int seriesLength, int []casesPerClass){
return generateDictionaryData(seriesLength, casesPerClass,shapeletsPerClass);
}
/**
* This is a support method for generateShapeletData
*
* @param seriesLength The length of the series.
*/
private static void populateRepeatedShapeletArray(DictionaryModel [] s, int seriesLength)
{
if(s.length!=shapeletsPerClass.length){//ERROR
throw new RuntimeException("Error, mismatch in number of classes: "+s.length+" VS "+shapeletsPerClass.length);
}
for(int i=0;i<s.length;i++){
double[] p1={seriesLength,shapeletsPerClass[(0+i)],shapeletsPerClass[(1+i)%2],shapeLength};
s[i]=new DictionaryModel(p1);
}
//Fix all the shape types to be the same as first
DictionaryModel.ShapeType st = s[0].getShape1();
for(int i=1;i<s.length;i++)
s[i].setShape1Type(st);
st = s[0].getShape2();
for(int i=1;i<s.length;i++)
s[i].setShape2Type(st);
}
private static void populateRepeatedShapeletArray(DictionaryModel [] s, int seriesLength, int[] shapeletsPerClass)
{
if(s.length!=shapeletsPerClass.length){//ERROR
throw new RuntimeException("Error, mismatch in number of classes: "+s.length+" VS "+shapeletsPerClass.length);
}
for(int i=0;i<s.length;i++){
double[] p1={seriesLength,shapeletsPerClass[(0+i)],shapeletsPerClass[(1+i)%2],shapeLength};
s[i]=new DictionaryModel(p1);
}
//Fix all the shape types to be the same as first
DictionaryModel.ShapeType st = s[0].getShape1();
for(int i=1;i<s.length;i++)
s[i].setShape1Type(st);
st = s[0].getShape2();
for(int i=1;i<s.length;i++)
s[i].setShape2Type(st);
}
public static void generateExampleData(){
int seriesLength=1000;
Model.setDefaultSigma(0.1);
Instances noNoise=generateDictionaryData(seriesLength,new int[]{20,20});
Model.setDefaultSigma(1);
Instances noise=generateDictionaryData(seriesLength,new int[]{20,20});
OutFile of = new OutFile("C:\\temp\\DictionaryNoNoise.csv");
of.writeString(noNoise.toString());
OutFile of2 = new OutFile("C:\\temp\\DictionaryNoise.csv");
of2.writeString(noise.toString());
}
public static void checkGlobalSeedForIntervals(){
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
Instances d=generateDictionaryData(100,new int[]{2,2});
OutFile of = new OutFile("C:\\Temp\\randZeroNoiseSeed0.csv");
of.writeLine(d.toString());
Model.setDefaultSigma(0.1);
Model.setGlobalRandomSeed(1);
d=generateDictionaryData(100,new int[]{2,2});
of = new OutFile("C:\\Temp\\randUnitNoiseSeed1.csv");
of.writeLine(d.toString());
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
d=generateDictionaryData(100,new int[]{2,2});
of = new OutFile("C:\\Temp\\randZeroNoiseSeed0REP.csv");
of.writeLine(d.toString());
Model.setDefaultSigma(0.1);
Model.setGlobalRandomSeed(1);
d=generateDictionaryData(100,new int[]{2,2});
of = new OutFile("C:\\Temp\\randUnitNoiseSeed1REP.csv");
of.writeLine(d.toString());
}
public static void main(String[] args) {
checkGlobalSeedForIntervals();
System.exit(0);
// generateExampleData();
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
//seriesLength=1000;
// casesPerClass=new int[]{20,20};
Instances d=generateDictionaryData(59,new int[]{2,2});
Instances[] split=InstanceTools.resampleInstances(d, 0,0.5);
System.out.println(" DATA "+d);
OutFile of = new OutFile("C:\\Temp\\dictionarySimulationTest.csv");
// of.writeLine(""+sim.generateHeader());
of.writeString(split[0].toString());
of = new OutFile("C:\\Temp\\dictionarySimulationTrain.csv");
of.writeString(split[1].toString());
}
}
| 7,177 | 38.877778 | 122 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SimulateElasticData.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.simulators;
import weka.core.Instances;
/**
*
* @author ajb
*/
public class SimulateElasticData {
static DataSimulator sim;
static double warpPercent=0.1;
public static Instances generateElasticData(int seriesLength, int []casesPerClass)
{
ElasticModel[] elastic = new ElasticModel[casesPerClass.length];
populateElasticModels(elastic,seriesLength);
sim = new DataSimulator(elastic);
sim.setSeriesLength(seriesLength);
sim.setCasesPerClass(casesPerClass);
Instances d=sim.generateDataSet();
return d;
}
//Stop it being a step
private static void populateElasticModels(ElasticModel[] m, int seriesLength){
if(m.length!=2)
System.out.println("ONLY IMPLEMENTED FOR TWO CLASSES");
//Create two models with same interval but different shape.
ElasticModel m1=new ElasticModel();
m1.setSeriesLength(seriesLength);
ElasticModel m2=new ElasticModel();
m2.setSeriesLength(seriesLength);
//Dont use sine, its too easy
DictionaryModel.ShapeType[] vals={
DictionaryModel.ShapeType.TRIANGLE,
DictionaryModel.ShapeType.STEP,DictionaryModel.ShapeType.HEADSHOULDERS
};
//DictionaryModel.ShapeType.SPIKE,
DictionaryModel.ShapeType shape=vals[Model.rand.nextInt(vals.length)];
m1.setShape(shape);
shape=vals[Model.rand.nextInt(vals.length)];
//Choose a shape nt equal to m1
while(shape==m1.getShape())
shape=vals[Model.rand.nextInt(vals.length)];
m2.setShape(shape);
m[0]=m1;
m[1]=m2;
}
}
| 2,445 | 33.450704 | 86 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SimulateIntervalData.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
*/
package statistics.simulators;
import fileIO.OutFile;
import utilities.InstanceTools;
import weka.core.Instances;
/**
*
* @author ajb
*/
public class SimulateIntervalData {
/**
* This method creates and returns a set of Instances representing a
* simulated two-class time-series problem.
*
* @param seriesLength The length of the series. All time series in the
* dataset are the same length.
* @param casesPerClass An array of two integers indicating the number of
* instances of class 0 and class 1.
* @return Instances representing the time-series dataset. The Instances
* returned will be empty if the casesPerClass parameter does not contain
* exactly two values.
*/
static DataSimulator sim;
static int intervalLength;
static int nosIntervals=3;
//The pro
static int noiseToSignal=4;
public static void setNosIntervals(int n){ nosIntervals=n;}
public static void setNoiseToSignal(int n){ noiseToSignal=n;}
static double base=-1;
static double amp=2;
public static void setAmp(double a){
amp=a;
base=-amp/2;
}
public static Instances generateIntervalData(int seriesLength, int []casesPerClass)
{
int s=Model.getRandomSeed();
// OutFile model=new OutFile(DataSets.clusterPath+"temp/model"+s+".csv");
intervalLength=seriesLength/(nosIntervals*noiseToSignal);
DictionaryModel.Shape.DEFAULTBASE=base;
DictionaryModel.Shape.DEFAULTAMP=amp;
/* model.writeString("IN SimulateIntervalData \n SeriesLength,"+seriesLength+",classes,"+casesPerClass.length+",");
for(int x:casesPerClass)
model.writeString(","+x);
model.writeString("\nbase,"+base+","+amp+",SeriesLength,"+seriesLength+",intLength,"+intervalLength+",NosIntervals,"+nosIntervals);
*/
IntervalModel[] intervalMod = new IntervalModel[casesPerClass.length];
populateIntervalModels(intervalMod,seriesLength);
/* model.writeString("\nIN IntervalModel\n");
model.writeLine("Model 1:\n"+intervalMod[0].toString());
model.writeLine("Model 2:\n"+intervalMod[1].toString());
model.closeFile();
*/ sim = new DataSimulator(intervalMod);
sim.setSeriesLength(seriesLength);
sim.setCasesPerClass(casesPerClass);
Instances d=sim.generateDataSet();
return d;
}
private static void populateIntervalModels(IntervalModel[] m, int seriesLength){
if(m.length!=2)
System.out.println("ONLY IMPLEMENTED FOR TWO CLASSES");
//Create two models with same interval but different shape.
IntervalModel m1=new IntervalModel();
m1.setSeriesLength(seriesLength);
m1.setNoiseToSignal(noiseToSignal);
m1.setNosIntervals(nosIntervals);
m1.createIntervals();
IntervalModel m2=new IntervalModel();
m2.setSeriesLength(seriesLength);
m1.setNoiseToSignal(noiseToSignal);
m2.setNosIntervals(nosIntervals);
m2.setIntervals(m1.getIntervals(), m1.getIntervalLength());
//Set shapes for intervals. Start by having same shape on each interval, but
//different shape per model
//Thi may give an advantage to the spectral classifiers, could have a different shape
m1.randomiseShape(null);
m2.randomiseShape(m1);
m[0]=m1;
m[1]=m2;
}
public static void main(String[] args) {
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
int seriesLength=500;
int[] casesPerClass=new int[]{2,2};
SimulateIntervalData.setNosIntervals(4);
SimulateIntervalData.setNoiseToSignal(4);
Instances d=generateIntervalData(seriesLength,casesPerClass);
Instances[] split=InstanceTools.resampleInstances(d, 0,0.5);
System.out.println(" DATA "+d);
OutFile of = new OutFile("C:\\Temp\\intervalSimulationTest.csv");
// of.writeLine(""+sim.generateHeader());
of.writeString(split[0].toString());
of = new OutFile("C:\\Temp\\intervalSimulationTrain.csv");
of.writeString(split[1].toString());
}
}
| 4,974 | 39.120968 | 143 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SimulateMatrixProfileData.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.simulators;
import fileIO.OutFile;
import java.text.DecimalFormat;
import tsml.transformers.MatrixProfile;
import utilities.ClassifierTools;
import utilities.InstanceTools;
import machine_learning.classifiers.kNN;
import weka.core.Instances;
import tsml.transformers.RowNormalizer;
/**
*
* @author ajb
*/
public class SimulateMatrixProfileData {
static DataSimulator sim;
static boolean debug=true;
public static Instances generateMatrixProfileData(int seriesLength, int []casesPerClass)
{
MatrixProfileModelVersion1.setGlobalSeriesLength(seriesLength);
MatrixProfileModelVersion1[] MP_Mod = new MatrixProfileModelVersion1[casesPerClass.length];
populateMatrixProfileModels(MP_Mod);
sim = new DataSimulator(MP_Mod);
sim.setSeriesLength(seriesLength);
sim.setCasesPerClass(casesPerClass);
Instances d=sim.generateDataSet();
return d;
}
private static void populateMatrixProfileModels(MatrixProfileModelVersion1[] m){
if(m.length!=2)
System.out.println("ONLY IMPLEMENTED FOR TWO CLASSES");
//Create two models with same interval but different shape.
MatrixProfileModelVersion1 m1=new MatrixProfileModelVersion1();
MatrixProfileModelVersion1 m2=new MatrixProfileModelVersion1();
m[0]=m1;
m[1]=m2;
if(debug){
System.out.println(" Model 1 = "+m[0]);
System.out.println(" Model 2 = "+m[1]);
}
}
private static void test1NNClassifiers() throws Exception{
for(double sig=0;sig<=1;sig+=0.5){
Model.setDefaultSigma(sig);
double meanAcc=0;
double meanAcc2=0;
double meanAcc3=0;
int r=100;
DecimalFormat df= new DecimalFormat("###.#####");
for(int i=0;i<r;i++){
Model.setGlobalRandomSeed(i);
int seriesLength=150;
int[] casesPerClass=new int[]{50,50};
Instances d=generateMatrixProfileData(seriesLength,casesPerClass);
if(i==1){
OutFile out=new OutFile("C:\\temp\\mpRand"+sig+".csv");
out.writeString(d.toString());
}
Instances[] split=InstanceTools.resampleInstances(d,i,0.1);
kNN knn= new kNN();
knn.setKNN(1);
double acc=ClassifierTools.singleTrainTestSplitAccuracy(knn, split[0], split[1]);
RowNormalizer nc=new RowNormalizer();
split[0]=nc.transform(split[0]);
split[1]=nc.transform(split[1]);
double acc2=ClassifierTools.singleTrainTestSplitAccuracy(knn, split[0], split[1]);
MatrixProfile mp=new MatrixProfile(29);
Instances[] mpSplit=new Instances[2];
mpSplit[0]=mp.transform(split[0]);
mpSplit[1]=mp.transform(split[1]);
double acc3=ClassifierTools.singleTrainTestSplitAccuracy(knn, mpSplit[0], mpSplit[1]);
meanAcc+=acc;
meanAcc2+=acc2;
meanAcc3+=acc3;
System.out.println("Train Size ="+split[0].numInstances()+" 1NN acc = "+df.format(acc)+" 1NN acc normed="+df.format(acc2)+" 1NN MP acc ="+df.format(acc3));
}
System.out.println(" Sig ="+sig+" Mean 1NN Acc ="+df.format(meanAcc/r)+" Mean 1NN Norm Acc ="+df.format(meanAcc2/r)+" Mean 1NN MP Acc = "+df.format(meanAcc3/r));
}
}
public static void createExampleData() throws Exception{
OutFile raw=new OutFile("C:\\Temp\\raw.csv");
OutFile mpFile=new OutFile("C:\\Temp\\mp.csv");
Model.setDefaultSigma(1.0);
DecimalFormat df= new DecimalFormat("###.#####");
Model.setGlobalRandomSeed(1);
int seriesLength=150;
int[] casesPerClass=new int[]{50,50};
Instances d=generateMatrixProfileData(seriesLength,casesPerClass);
MatrixProfile mp=new MatrixProfile(29);
Instances md;
md=mp.transform(d);
raw.writeLine(d.toString());
mpFile.writeLine(md.toString());
}
public static void main(String[] args) throws Exception {
createExampleData();
System.exit(0);
test1NNClassifiers();
Model.setDefaultSigma(1);
Model.setGlobalRandomSeed(0);
int seriesLength=500;
int[] casesPerClass=new int[]{100,100};
RowNormalizer nc=new RowNormalizer();
Instances d=generateMatrixProfileData(seriesLength,casesPerClass);
Instances[] split=InstanceTools.resampleInstances(d, 0,0.1);
OutFile of = new OutFile("C:\\Temp\\train.arff");
of.writeString(split[0].toString()+"");
of = new OutFile("C:\\Temp\\test.arff");
of.writeString(split[1].toString()+"");
MatrixProfile mp=new MatrixProfile(29);
Instances m1=mp.transform(split[0]);
// m1=nc.process(m1);
of = new OutFile("C:\\Temp\\MPTrain.arff");
of.writeString(split[0]+"");
Instances m2=mp.transform(split[1]);
// m2=nc.process(m2);
of = new OutFile("C:\\Temp\\MPTest.arff");
of.writeString(split[1].toString()+"\n\n");
of.writeString(m2.toString());
}
}
| 6,165 | 38.780645 | 173 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SimulateShapeletData.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* This class contains a static method for generating parameterised simulated
* time-series datasets. The datasets are designed for shapelet approaches.
* This will produce a two-class problem.
*/
package statistics.simulators;
import fileIO.OutFile;
import java.util.Random;
import weka.core.Instances;
import statistics.simulators.ShapeletModel.ShapeType;
/**
*
* @author Jon Hills
* j.hills@uea.ac.uk
*/
public class SimulateShapeletData extends DataSimulator{
/**
* This method creates and returns a set of Instances representing a
* simulated two-class time-series problem.
*
* @param seriesLength The length of the series. All time series in the
* dataset are the same length.
* @param casesPerClass An array of two integers indicating the number of
* instances of class 0 and class 1.
* @return Instances representing the time-series dataset. The Instances
* returned will be empty if the casesPerClass parameter does not contain
* exactly two values.
*/
public static Instances generateShapeletData(int seriesLength, int []casesPerClass)
{
if( casesPerClass.length != 2)
{
System.err.println("ONLY WORKS WITH TWO CLASS PROBS AT THE MOMENT");
int[] tmp = {0,0};
casesPerClass = tmp;
}
ShapeletModel[] shapeMod = new ShapeletModel[casesPerClass.length];
populateShapeletArray(shapeMod, seriesLength);
DataSimulator sim = new DataSimulator(shapeMod);
sim.setSeriesLength(seriesLength);
sim.setCasesPerClass(casesPerClass);
Instances d=sim.generateDataSet();
return d;
}
/**
* This is a support method for generateShapeletData
*
* @param array An array of two ShapeletModel2 models, representing the
* simulated shapes inserted into the respective classes.
* @param seriesLength The length of the series.
*/
private static void populateShapeletArray(ShapeletModel [] s, int seriesLength)
{
double[] p1={seriesLength,1};
double[] p2={seriesLength,1};
//Create two ShapeleModels with different base Shapelets
s[0]=new ShapeletModel(p1);
ShapeletModel.ShapeType st=s[0].getShapeType();
s[1]=new ShapeletModel(p2);
while(st==s[1].getShapeType()){ //Force them to be different types of shape
s[1]=new ShapeletModel(p2);
}
}
public static void checkGlobalSeedForIntervals(){
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
Instances d=generateShapeletData(100,new int[]{2,2});
OutFile of = new OutFile("C:\\Temp\\randZeroNoiseSeed0.csv");
of.writeLine(d.toString());
Model.setDefaultSigma(0.1);
Model.setGlobalRandomSeed(1);
System.out.println(" NOISE 0");
d=generateShapeletData(100,new int[]{2,2});
of = new OutFile("C:\\Temp\\randUnitNoiseSeed1.csv");
of.writeLine(d.toString());
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
System.out.println(" NO NOISE 1");
d=generateShapeletData(100,new int[]{2,2});
of = new OutFile("C:\\Temp\\randZeroNoiseSeed0REP.csv");
of.writeLine(d.toString());
Model.setDefaultSigma(0.1);
Model.setGlobalRandomSeed(1);
System.out.println(" NOISE 1");
d=generateShapeletData(100,new int[]{2,2});
of = new OutFile("C:\\Temp\\randUnitNoiseSeed1REP.csv");
of.writeLine(d.toString());
}
public static void main(String[] args)
{
checkGlobalSeedForIntervals();
System.exit(0);
int[] casesPerClass = {5,5};
int seriesLength = 100;
Model.setDefaultSigma(0);
Model.setGlobalRandomSeed(0);
System.out.println("Model seed ="+Model.getRandomSeed());
Instances data = SimulateShapeletData.generateShapeletData(seriesLength,casesPerClass);
System.out.println("DATA "+data);
System.out.println("Model seed AFTER ="+Model.getRandomSeed());
OutFile out=new OutFile("C:\\temp\\ShapeletData.csv");
out.writeString(data.toString());
}
}
| 5,050 | 35.868613 | 95 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SimulateSpectralData.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/** Simulates AR parameters
*
*/
package statistics.simulators;
import tsml.classifiers.distance_based.DTWCV;
import tsml.transformers.RowNormalizer;
import tsml.transformers.FFT;
import tsml.transformers.ARMA;
import tsml.transformers.PACF;
import tsml.transformers.ACF;
import java.util.*;
import java.text.*;
//import utilities.OutFile;
import weka.core.*;
import fileIO.*;
import utilities.ClassifierTools;
import weka.classifiers.Classifier;
public class SimulateSpectralData extends DataSimulator{
public static String path="C:\\Research\\Data\\RunLengthExperiments\\";
public static double MIN_PARA_VALUE=-1;
public static double MAX_PARA_VALUE=1;
static int defaultMinSize=1;
static int defaultMaxSize=4;
double[][] parameters;
int minParas=defaultMinSize;
int maxParas=defaultMaxSize;
static double DIFF=0.15;
static double THRESH=0.95;
public Random rand=new Random();
public SimulateSpectralData(){
//Generate random model paras that are not trivial to classifier
minParas=defaultMinSize;
maxParas=defaultMaxSize;
boolean validated=false;
int maxRuns=0;
int[] temp=casesPerClass;
int temp2=nosPerClass;
while(maxRuns<10 && !validated){
parameters=generateModels();
nosClasses=parameters.length;
models=new ArrayList<Model>(nosClasses);
for(int i=0;i<nosClasses;i++)
models.add(new ArmaModel(parameters[i]));
setCasesPerClass(new int[]{20,20});
setLength(100);
try{
Instances[] data=this.generateTrainTest();
Classifier c =new DTWCV();
double acc=ClassifierTools.singleTrainTestSplitAccuracy(c, data[0], data[1]);
if(acc<THRESH)
validated=true;
maxRuns++;
}catch(Exception e){//Data generation failed
maxRuns++;
validated=false;
}
}
casesPerClass=temp;
nosPerClass=temp2;
}
public SimulateSpectralData(int seed){
this();
rand=new Random(seed);
}
public static void setMinMaxPara(double a, double b){MIN_PARA_VALUE=a; MAX_PARA_VALUE=b;}
//Default random model with DataSimulator defaults (2 classes seriesLength)
final public double[][] generateModels(){
double[][] p=new double[nosClasses][];
p[0]=generateStationaryParameters(minParas,maxParas);
//Try fixed perturbation
for(int i=1;i<nosClasses;i++){
p[i]=new double[p[0].length];
for(int j=0;j<p[0].length;j++){
if(rand.nextDouble()<0.5)
p[i][j]=p[i-1][j]-DIFF;
else
p[i][j]=p[i-1][j]-DIFF;
if(p[i][j]<0)
p[i][j]=0;
else if(p[i][j]>1)
p[i][j]=1;
}
}
return p;
}
public void randomiseModel(){
parameters=new double[nosClasses][];
for(int i=0;i<parameters.length;i++)
parameters[i]=generateStationaryParameters(minParas,maxParas);
nosClasses=parameters.length;
models=new ArrayList<Model>(nosClasses);
for(int i=0;i<nosClasses;i++)
models.add(new ArmaModel(parameters[i]));
}
public void randomiseModel(int min,int max){
minParas=min;
maxParas=max;
randomiseModel();
}
public void randomiseModel(int min,int max, int numClasses){
nosClasses=numClasses;
minParas=min;
maxParas=max;
randomiseModel();
}
public void randomiseModel(int min,int max, int numClasses, int length){
nosClasses=numClasses;
seriesLength=length;
minParas=min;
maxParas=max;
randomiseModel();
}
public SimulateSpectralData(double[][] paras){
super(paras);
for(int i=0;i<nosClasses;i++)
models.add(new ArmaModel(paras[i]));
}
public void initialise(){
for(Model a:models)
((ArmaModel)a).randomise();
}
public double[] generate(int length, int modelNos){
double[] d=new double[length];
Model a=models.get(modelNos);
for(int i=0;i<length;i++)
d[i]=a.generate();
return d;
}
/** Static test methods **/
public static void testARMA(){
double[][] paras;
int nosParas=(int)(Math.random()*11.0);
DecimalFormat dc = new DecimalFormat("###.###");
paras=new double[2][nosParas];
for(int j=0;j<nosParas;j++)
paras[0][j]=-.95+1.8*Math.random();
System.out.print("\nInput coefficients");
for(int i=0;i<paras[0].length;i++)
System.out.print(dc.format(paras[0][i])+"\t");
System.out.print("\n");
paras[0]=findCoefficients(paras[0]);
for(int j=0;j<nosParas;j++)
paras[1][j]=paras[0][j]-0.1+0.2*Math.random();
int n=200;
double[] d1 = generate(paras[0],n);
double[] d2 = generate(paras[0],n);
for(int i=0;i<d1.length;i++)
System.out.print(dc.format(d1[i])+"\t");
double[] f1= ARMA.fitAR(d1);
double[] f2= ARMA.fitAR(d2);
System.out.println("\n\nModel length ="+nosParas);
System.out.print("\nACTUAL MODEL 1=");
for(int i=0;i<paras[0].length;i++)
System.out.print(dc.format(paras[0][i])+"\t");
System.out.print("\nFITTED MODEL 1=");
for(int i=0;i<f1.length;i++)
System.out.print(dc.format(f1[i])+"\t");
System.out.println("\n\nModel length ="+nosParas);
System.out.print("\nACTUAL MODEL 2=");
for(int i=0;i<paras[0].length;i++)
System.out.print(dc.format(paras[1][i])+"\t");
System.out.print("\nFITTED MODEL 2=");
for(int i=0;i<f2.length;i++)
System.out.print(dc.format(f2[i])+"\t");
}
public static Instances generateSpectralEmbeddedData(int s,int[] casesPerClass){
DataSimulator ds=new SimulateSpectralData();
ds.setCasesPerClass(casesPerClass);
int fullLength=s;
int arLength=fullLength/2;
ds.setLength(arLength);
Instances data=ds.generateDataSet();
RowNormalizer nc= new RowNormalizer();
data=nc.transform(data);
ArrayList<Model> noise=new ArrayList<>();
WhiteNoiseModel wm=new WhiteNoiseModel();
noise.add(wm);
wm=new WhiteNoiseModel();
noise.add(wm);
DataSimulator ds2=new DataSimulator(noise); // By default it goes to white noise
ds2.setCasesPerClass(casesPerClass);
ds2.setLength(fullLength);
Instances noiseData=ds2.generateDataSet();
//Choose random start
int startPos=(int)(Math.random()*(fullLength-arLength));
for(int j=startPos;j<startPos+arLength;j++){
for(int k=0;k<data.numInstances();k++)
noiseData.instance(k).setValue(j, data.instance(k).value(j-startPos));
}
return noiseData;
}
public static void testFFT(String fileName){
//Debug code to test.
//Generate a model and series
// double[][] paras={{0.5},{0.7}};
int n=32;
int[] cases={1,1};
FFT ar = new FFT();
double[][] paras={{1.3532,0.4188,-1.2153,0.3091,0.1877,-0.0876,0.0075,0.0004},
{1.0524,0.9042,-1.2193,0.0312,0.263,-0.0567,-0.0019} };
Instances train=SimulateSpectralData.generateARDataSet(paras,n,cases);
//Fit and compare paramaters without AIC
OutFile of = new OutFile(fileName);
try{
DecimalFormat dc=new DecimalFormat("###.####");
Instances arTrain=ar.transform(train);
Instance in1=train.instance(0);
System.out.print("\nFitted Data Full >\t");
for(int i=0;i<in1.numAttributes()-1;i++){
of.writeString(in1.value(i)+",");
System.out.print(in1.value(i)+",");
}
of.writeString("\n");
Instance in2=arTrain.instance(0);
System.out.print("\nFitted FFT Full >\t");
for(int i=0;i<in2.numAttributes();i++){
System.out.print(dc.format(in2.value(i))+",");
of.writeString(dc.format(in2.value(i))+",");
}
ar.truncate(arTrain, n/4);
System.out.print("\nFitted FFT Truncated >\t");
of.writeString("\n");
for(int i=0;i<in2.numAttributes();i++){
System.out.print(dc.format(in2.value(i))+",");
of.writeString(dc.format(in2.value(i))+",");
}
}catch(Exception e){
System.out.println("Exception ="+e);
e.printStackTrace();
System.exit(0);
}
}
public static void testAIC(String fileName){
//Debug code to test.
//Generate a model and series
// double[][] paras={{0.5},{0.7}};
int n=100;
int[] cases={1,1};
ARMA ar = new ARMA();
ar.setUseAIC(false);
int maxLag=n/4;
ar.setMaxLag(maxLag);
double[][] paras={{1.3532,0.4188,-1.2153,0.3091,0.1877,-0.0876,0.0075,0.0004},
{1.0524,0.9042,-1.2193,0.0312,0.263,-0.0567,-0.0019} };
Instances train=SimulateSpectralData.generateARDataSet(paras,n,cases);
//Fit and compare paramaters without AIC
try{
DecimalFormat dc=new DecimalFormat("###.####");
Instances arTrain=ar.transform(train);
Instance in1=train.instance(0);
Instance in2=arTrain.instance(0);
System.out.print("Actual Model >\t\t");
for(int i=0;i<paras[0].length;i++)
System.out.print(dc.format(paras[0][i])+",");
System.out.print("\nFitted Model No AIC >\t");
for(int i=0;i<in2.numAttributes();i++)
System.out.print(dc.format(in2.value(i))+",");
ar.setUseAIC(true);
arTrain=ar.transform(train);
in2=arTrain.instance(0);
System.out.print("\nFitted Model AIC >\t");
for(int i=0;i<in2.numAttributes();i++)
System.out.print(dc.format(in2.value(i))+",");
//Debug the stages
OutFile of = new OutFile(fileName);
for(int i=0;i<in1.numAttributes()-1;i++)
of.writeString(in1.value(i)+",");
double[] d=in1.toDoubleArray();
double[] d2=new double[d.length-1];
for(int i=0;i<d2.length;i++)
d2[i]=d[i];
System.out.println("Auto Corellations >");
double[] autos=ACF.fitAutoCorrelations(d2,maxLag);
of.writeString("\n");
of.writeString("\n");
for(int i=0;i<autos.length;i++){
of.writeString(autos[i]+",");
System.out.println(autos[i]+",");
}
of.writeString("\n");
of.writeString("\n");
double[][] partials=PACF.formPartials(autos);
for(int i=0;i<partials.length;i++){
for(int j=0;j<partials[i].length;j++)
of.writeString(partials[i][j]+",");
of.writeString("\n");
}
int best=ARMA.findBestAIC(autos,partials,maxLag,d2);
System.out.println(" Best Length = "+best);
}catch(Exception e){
System.out.println("Exception ="+e);
System.exit(0);
}
}
/** Static methods to bypass object creation
*
*/
/**
* This generates a set of 2-Class AR 1 data sets with fixed size and varying differences
* between the two class models. Needs calibrating for complexity.
*
* Noise is always N(0,1).
*
* Range of values initially, model 1: 0.9
* model 2: 0.9 to -0.9 in 0.05 increments
*/
public static void generateAR1(int n, double[][] p, String fileName){
DecimalFormat df = new DecimalFormat("####.####");
int nosCases=200;
double diff=0.05;
String newline=System.getProperty("line.separator");
SimulateSpectralData ar= new SimulateSpectralData(p);
String arffHeader="@relation AR1Models\n";
for(int i=0;i<n;i++)
arffHeader+="@attribute T"+i+" real"+newline;
arffHeader+="@attribute targetClass {0,1}"+newline;
arffHeader+="@data"+newline+newline;
OutFile of =new OutFile(path+"AR1\\train"+fileName);
of.writeString(arffHeader);
for(int i=0;i<nosCases;i++){
ar.initialise();
double[] d=ar.generate(n,0);
for(int j=0;j<d.length;j++)
of.writeString(df.format(d[j])+",");
of.writeString("0"+newline);
d=ar.generate(n,1);
for(int j=0;j<d.length;j++)
of.writeString(df.format(d[j])+",");
of.writeString("1"+newline);
}
of =new OutFile(path+"AR1\\test"+fileName);
of.writeString(arffHeader);
for(int i=0;i<nosCases;i++){
ar.initialise();
double[] d=ar.generate(n,0);
for(int j=0;j<d.length;j++)
of.writeString(df.format(d[j])+",");
of.writeString("0\n");
d=ar.generate(n,1);
for(int j=0;j<d.length;j++)
of.writeString(df.format(d[j])+",");
of.writeString("1\n");
}
}
public static double[] findCoefficients(double[] c){
int n=c.length;
double[] a=new double[1];
double[] aNew=null;
a[0]=1;
// System.out.println(" n = "+n);
for(int j=1;j<=n;j++){
// System.out.println("Finding order "+j);
aNew=new double[j+1];
aNew[0]=1;
for(int i=1;i<j;i++)
aNew[i]=a[i]-a[i-1]*c[j-1];
aNew[j]=a[j-1]*-c[j-1];
a=aNew;
}
//Remove the constant term
double[] f=new double[n];
for(int i=0;i<n;i++)
f[i]=-a[i+1];
return f;
}
public static double[] generate(double[] paras,int n){
double[] d = new double[n];
ArmaModel ar=new ArmaModel(paras);
ar.randomise();
for(int i=0;i<n;i++)
d[i]=ar.generate();
return d;
}
/** generateStationaryParameters
* Generates a random AR model of degree between minParas and maxParas inclusive
*
* 1. Generate a number between minParas and maxParas
* 2. Generate random numbers between -0.9 and 0.9
* 3. find the stationary AR parameters associated with these parameters
*/
public static double[] generateStationaryParameters(int minP, int maxP){
double[] paras;
int nosParas=(int)(minP+(int)(1+maxP*Model.rand.nextDouble()));
paras=new double[nosParas];
for(int j=0;j<nosParas;j++)
paras[j]=MIN_PARA_VALUE+(MAX_PARA_VALUE-MIN_PARA_VALUE)*Model.rand.nextDouble();
paras=SimulateSpectralData.findCoefficients(paras);
return paras;
}
public static void armaTest(){
double[][] paras;
int nosParas=(int)(Model.rand.nextDouble()*11.0);
DecimalFormat dc = new DecimalFormat("###.###");
paras=new double[2][nosParas];
for(int j=0;j<nosParas;j++)
paras[0][j]=-.95+1.8*Model.rand.nextDouble();
System.out.print("\nInput coefficients");
for(int i=0;i<paras[0].length;i++)
System.out.print(dc.format(paras[0][i])+"\t");
System.out.print("\n");
paras[0]=findCoefficients(paras[0]);
for(int j=0;j<nosParas;j++)
paras[1][j]=paras[0][j]-0.1+0.2*Model.rand.nextDouble();
int n=200;
double[] d1 = generate(paras[0],n);
double[] d2 = generate(paras[0],n);
for(int i=0;i<d1.length;i++)
System.out.print(dc.format(d1[i])+"\t");
double[] f1= ARMA.fitAR(d1);
double[] f2= ARMA.fitAR(d2);
System.out.println("\n\nModel length ="+nosParas);
System.out.print("\nACTUAL MODEL 1=");
for(int i=0;i<paras[0].length;i++)
System.out.print(dc.format(paras[0][i])+"\t");
System.out.print("\nFITTED MODEL 1=");
for(int i=0;i<f1.length;i++)
System.out.print(dc.format(f1[i])+"\t");
System.out.println("\n\nModel length ="+nosParas);
System.out.print("\nACTUAL MODEL 2=");
for(int i=0;i<paras[0].length;i++)
System.out.print(dc.format(paras[1][i])+"\t");
System.out.print("\nFITTED MODEL 2=");
for(int i=0;i<f2.length;i++)
System.out.print(dc.format(f2[i])+"\t");
}
public static Instances generateOffByOneARDataSet(int p, int seriesLength, int[] nosCases, boolean normalize){
double[][] paras=new double[nosCases.length][];
paras[0]=generateStationaryParameters(p,p);
for(int i=1;i<paras.length;i++){
paras[i]=generateStationaryParameters(p+i,p+i);
}
Instances d=generateARDataSet(paras,seriesLength,nosCases);
if(normalize){
RowNormalizer norm=new RowNormalizer();
d = norm.transform(d);
}
return d;
}
public static Instances generateARDataSet(int seriesLength, int[] nosCases, boolean normalize){
return generateARDataSet(defaultMinSize,defaultMaxSize,seriesLength,nosCases,normalize);
}
public static Instances generateARDataSet(int minParas, int maxParas, int seriesLength, int[] nosCases, boolean normalize){
double[][] paras=new double[nosCases.length][];
for(int i=0;i<paras.length;i++)
paras[i]=generateStationaryParameters(minParas,maxParas);
Instances d=generateARDataSet(paras,seriesLength,nosCases);
if(normalize){
RowNormalizer norm=new RowNormalizer();
d = norm.transform(d);
}
return d;
}
public static Instances generateARDataSet(int minParas, int maxParas, int seriesLength, int[] nosCases){
return generateARDataSet(minParas,maxParas,seriesLength,nosCases,false);
}
public static Instances generateARDataSet(double[][] p, int seriesLength, int[] nosCases){
SimulateSpectralData ar=new SimulateSpectralData(p);
Instances data;
FastVector atts=new FastVector();
int totalCases=nosCases[0];
for(int i=1;i<nosCases.length;i++)
totalCases+=nosCases[i];
for(int i=1;i<=seriesLength;i++){
atts.addElement(new Attribute("t"+i));
}
FastVector fv=new FastVector();
for(int i=0;i<ar.nosClasses;i++)
fv.addElement(""+i);
atts.addElement(new Attribute("Target",fv));
data = new Instances("AR",atts,totalCases);
double[] d;
for(int i=0;i<ar.nosClasses;i++){
for(int j=0;j<nosCases[i];j++){
//Generate the series
ar.initialise();
d=ar.generate(seriesLength,i);
//Add to an instance
Instance in= new DenseInstance(data.numAttributes());
for(int k=0;k<d.length;k++)
in.setValue(k,d[k]);
//Add to all instances
data.add(in);
in=data.lastInstance();
in.setValue(d.length,""+i);
}
}
data.setClassIndex(seriesLength);
return data;
}
public static Instances generateFFTDataSet(int minParas, int maxParas, int seriesLength, int[] nosCases, boolean normalize){
double[][] paras=new double[nosCases.length][];
//Generate random parameters for the first FFT
Random rand= new Random();
SinusoidalModel[] sm=new SinusoidalModel[nosCases.length];
int modelSize=minParas+rand.nextInt(maxParas-minParas);
paras[0]=new double[3*modelSize];
for(int j=0;j<paras.length;j++)
paras[0][j]=rand.nextDouble();
for(int i=1;i<sm.length;i++){
paras[i]=new double[3*modelSize];
for(int j=0;j<paras.length;j++){
paras[i][j]=paras[0][j];
//Perturb it 10%
paras[i][j]+=-0.1+0.2*rand.nextDouble();
if(paras[i][j]<0 || paras[i][j]>1)
paras[i][j]=paras[0][j];
}
}
for(int i=0;i<sm.length;i++){
sm[i]=new SinusoidalModel(paras[i]);
sm[i].setFixedOffset(false);
}
// for(int i=0;i<paras.length;i++)
// paras[i]=generateStationaryParameters(minParas,maxParas);
DataSimulator ds = new DataSimulator(sm);
ds.setSeriesLength(seriesLength);
ds.setCasesPerClass(nosCases);
Instances d=ds.generateDataSet();
if(normalize){
RowNormalizer norm=new RowNormalizer();
d = norm.transform(d);
}
return d;
}
public static void main(String[] args){
// Instances all=SimulateSpectralData.generateARDataSet(minParas,maxParas,seriesLength,nosCasesPerClass,true);
System.out.println("Running SimulateAR test harness");
ArmaModel.setGlobalVariance(1);
int[] cases={2,2};
Instances d=generateARDataSet(1,1,200,cases,false);
ArmaModel.setGlobalVariance(10);
Instances d2=generateARDataSet(1,1,200,cases,false);
OutFile of=new OutFile("C:\\PhD\\Data\\ARMA\\Results\\AR1_1.csv");
OutFile of2=new OutFile("C:\\PhD\\Data\\ARMA\\Results\\AR1_10.csv");
of.writeString(d.toString());
of2.writeString(d2.toString());
RowNormalizer norm=new RowNormalizer();
d = norm.transform(d);
}
@Override
public String getParameters() {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| 23,388 | 37.154976 | 129 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SimulateWholeSeriesData.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package statistics.simulators;
import weka.core.Instances;
/**
*
* @author ajb
*/
public class SimulateWholeSeriesData extends DataSimulator {
static DataSimulator sim;
public SimulateWholeSeriesData(double[][] paras){
super(paras);
for(int i=0;i<nosClasses;i++)
models.add(new SinusoidalModel(paras[i]));
}
public void setWarping(){
for(Model m:models){
((SinusoidalModel)m).setWarp(true);
}
}
public static Instances generateWholeSeriesdData(int seriesLength, int []casesPerClass)
{
SinusoidalModel[] sin = new SinusoidalModel[casesPerClass.length];
populateWholeSeriesModels(sin);
sim = new DataSimulator(sin);
sim.setSeriesLength(seriesLength);
sim.setCasesPerClass(casesPerClass);
Instances d=sim.generateDataSet();
return d;
}
//We will use the same sine wave for every class, but just shift the offset
private static void populateWholeSeriesModels(SinusoidalModel[] m){
//Create two models with same interval but different shape.
double[] paras= new double[3];
//Offet changes per class
paras[0]=0; //Model.rand.nextDouble();
paras[1]=1;//Model.rand.nextDouble();
paras[2]=1;//Model.rand.nextDouble();
for(int i=0;i<m.length;i++){
m[i]=new SinusoidalModel(paras);
paras[0]=1; //Model.rand.nextDouble();
}
}
}
| 2,369 | 32.380282 | 91 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/SinusoidalModel.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.simulators;
import statistics.distributions.NormalDistribution;
import fileIO.*;
public class SinusoidalModel extends Model{
boolean warpSeries=false;
void setWarp(boolean w){warpSeries=w;}
static double maxAmp=3;
//RANGE 0 ..1, offset gives the starting phase of the cycle. Note this can be randomised for each series at reset() by setting
// fixedOffset to false. This shifts ALL the offsets by a fixed, random amount, to retain relative effect.
double[] offset;
boolean fixedOffset=true;
public void setFixedOffset(boolean b){fixedOffset=b;}
//RANGE: 0 ... 1, 100*frequency gives the number of cycles between 0 and pi()
double[] frequency;
//RANGE:0 .. 1, gives the max height: maxAmp
double[] amplitude;
int maxN=100;
double interval = Math.PI/maxN;
//Default min max range
static double defaultMin=0;
static double defaultMax=1;
public SinusoidalModel(double[] p){
if(p.length%3!=0){
System.out.println(" Error in Sinusoidal Model, the parameter list must be divisible by three (offset, frequency and amplitude");
throw new IllegalArgumentException(" Error in Sinusoidal Model, the parameter list must be divisible by three (offset, frequency and amplitude");
}
offset=new double[p.length/3];
frequency=new double[p.length/3];
amplitude=new double[p.length/3];
int count=0;
for(int i=0;i<offset.length;i++)
offset[i]=p[count++];
for(int i=0;i<frequency.length;i++)
frequency[i]=p[count++];
for(int i=0;i<amplitude.length;i++)
amplitude[i]=p[count++];
}
public void setInterval(int n)
{
maxN=n;
interval = Math.PI/maxN;
}
/** from the base class. Simply split the array into three
*
* @param p
*/
@Override
public void setParameters(double[] p){
if(p.length%3!=0){
System.out.println(" Error in Sinusoidal Model, the parameter list must be divisible by three (offset, frequency and amplitude");
}
offset=new double[p.length/3];
frequency=new double[p.length/3];
amplitude=new double[p.length/3];
int count=0;
for(int i=0;i<offset.length;i++)
offset[i]=p[count++];
for(int i=0;i<frequency.length;i++)
frequency[i]=p[count++];
for(int i=0;i<amplitude.length;i++)
amplitude[i]=p[count++];
}
public SinusoidalModel(double[] offset, double[] frequency, double[] amplitude)
{
this.offset=new double[offset.length];
System.arraycopy(offset, 0, this.offset, 0, offset.length);
this.frequency=new double[frequency.length];
System.arraycopy(frequency, 0, this.frequency, 0, frequency.length);
this.amplitude=new double[amplitude.length];
System.arraycopy(amplitude, 0, this.amplitude, 0, amplitude.length);
t=0;
error = new NormalDistribution(0,variance);
}
public SinusoidalModel(double[] offset, double[] frequency, double[] amplitude, double s)
{
this.offset=new double[offset.length];
System.arraycopy(offset, 0, this.offset, 0, offset.length);
this.frequency=new double[frequency.length];
System.arraycopy(frequency, 0, this.frequency, 0, frequency.length);
this.amplitude=new double[amplitude.length];
System.arraycopy(amplitude, 0, this.amplitude, 0, amplitude.length);
t=0;
setVariance(s);
}
public void setSigma(double s){
setVariance(s);
}
@Override
public double generate(double x){
double res=0;
for(int i=0;i<offset.length;i++)
res+=((maxAmp)*amplitude[i])*(Math.sin(Math.PI*offset[i]+(2+100*frequency[i])*x));
res+= error.simulate();
return res;
}
public double generateError(){ return error.simulate();}
@Override
public double generate()
{
double res=0;
for(int i=0;i<offset.length;i++)
res+=((maxAmp)*amplitude[i])*(Math.sin(Math.PI*offset[i]+(2+100*frequency[i])*t));
this.t+=interval;
res+= error.simulate();
return res;
}
/** @Override
*
* @param l
* @return
*/
@Override
public double[] generateSeries(int l)
{
double[] data = new double[l];
reset();
setInterval(l);
for(int k=0;k<l;k++)
data[k] =generate();
if(!warpSeries)
return data;
//Warping.
double offsetPercent=10;
int offset=(int)(data.length*offsetPercent/100);
double[] newD=new double[data.length];
System.arraycopy(data,0,newD,0,data.length);
// Pick a random point somewhere between the first 10% and last 90%
int warpPoint=(int)(offset+Math.random()*(data.length-offset*2));
// Warp offset points into offset*2 points
newD[warpPoint+offset]=data[warpPoint];
//Warp offset points to fill the gap.
newD[warpPoint-offset]=data[warpPoint-offset];
for(int i=1;i<=offset;i++){
newD[warpPoint-offset+2*i-1]=data[warpPoint-offset+i];
newD[warpPoint-offset+2*i]=(data[warpPoint-offset+i]+data[warpPoint-offset+i+1])/2;
// System.out.println(i+","+warpPoint+","+offset+","+data[warpPoint-offset+i]+","+newD[warpPoint-offset+2*i-1]+","+newD[warpPoint-offset+2*i]);
}
// OutFile of = new OutFile("");
return newD;
}
static public double[][] generateSinusoidalData(SinusoidalModel[] models,int length,int nosPerModel)
{
double[][] data = new double[models.length*nosPerModel][];
for(int i=0;i<models.length;i++)
{
for(int j=0;j<nosPerModel;j++)
data[j+i*nosPerModel]= models[i].generateSeries(length);
}
return data;
}
/** @Override
*
*/
@Override
public void reset(){
//If th
super.reset();
if(!fixedOffset) //Shift all the offsets by a random amount
{
//Find the min and max of the whole offset, ta avoid shifting too much
double min=offset[0];
double max=offset[0];
for(int i=1;i<offset.length;i++){
if(min>offset[i])
min=offset[i];
if(max<offset[i])
max=offset[i];
}
//So now, any shift between -min to (1-max) should retain the relative offsets
double shift=-min+(1-max+min)*Math.random();
for(int i=1;i<offset.length;i++)
offset[i]+=shift;
}
}
/** A Load of static methods
*
* @param r
* @param min
* @param max
* @return
*/
public static SinusoidalModel generateRandomModel(int r, double min, double max)
{
double[] freq=new double[r];
double[] off = new double[r];
double[] amp=new double[r];
for(int i=0;i<r;i++)
{
// freq[i]=min+(max-min)*Distribution.RNG.nextDouble();
// off[i]=min+(max-min)*Distribution.RNG.nextDouble();
// amp[i]=min+(max-min)*Distribution.RNG.nextDouble();
}
SinusoidalModel a = new SinusoidalModel(off,freq,amp);
return a;
}
public static SinusoidalModel generateRandomModelAmp(SinusoidalModel m, double maxDeviation)
{
int r = m.frequency.length;
double[] freq=new double[r];
double[] off = new double[r];
double[] amp=new double[r];
double min,max;
for(int i=0;i<r;i++)
{
freq[i]=m.frequency[i];
off[i]=m.offset[i];
min=m.amplitude[i]-maxDeviation;
if(min<0) min=0;
max=m.amplitude[i]+maxDeviation;
if(max>1) max=1;
// amp[i]=min+(max-min)*Distribution.RNG.nextDouble();
}
SinusoidalModel a = new SinusoidalModel(off,freq,amp);
return a;
}
public static SinusoidalModel generateRandomModelOff(SinusoidalModel m, double maxDeviation)
{
int r = m.frequency.length;
double[] freq=new double[r];
double[] off = new double[r];
double[] amp=new double[r];
double min,max;
for(int i=0;i<r;i++)
{
freq[i]=m.frequency[i];
amp[i]=m.amplitude[i];
min=m.offset[i]-maxDeviation;
if(min<0) min=0;
max=m.offset[i]+maxDeviation;
if(max>1) max=1;
// off[i]=min+(max-min)*Distribution.RNG.nextDouble();
}
SinusoidalModel a = new SinusoidalModel(off,freq,amp);
return a;
}
public static SinusoidalModel generateRandomModelFreq(SinusoidalModel m, double maxDeviation)
{
int r = m.frequency.length;
double[] freq=new double[r];
double[] off = new double[r];
double[] amp=new double[r];
double min,max;
for(int i=0;i<r;i++)
{
amp[i]=m.amplitude[i];
off[i]=m.offset[i];
min=m.frequency[i]-maxDeviation;
if(min<0) min=0;
max=m.frequency[i]+maxDeviation;
if(max>1) max=1;
// freq[i]=min+(max-min)*Distribution.RNG.nextDouble();
}
SinusoidalModel a = new SinusoidalModel(off,freq,amp);
return a;
}
public static SinusoidalModel generateRandomModel(SinusoidalModel m, double maxDeviation)
{
int r = m.frequency.length;
double[] freq=new double[r];
double[] off = new double[r];
double[] amp=new double[r];
double min,max;
for(int i=0;i<r;i++)
{
min=m.frequency[i]-maxDeviation;
if(min<0) min=0;
max=m.frequency[i]+maxDeviation;
if(max>1) max=1;
// freq[i]=min+(max-min)*Distribution.RNG.nextDouble();
min=m.offset[i]-maxDeviation;
if(min<0) min=0;
max=m.offset[i]+maxDeviation;
if(max>1) max=1;
// off[i]=min+(max-min)*Distribution.RNG.nextDouble();
min=m.amplitude[i]-maxDeviation;
if(min<0) min=0;
max=m.amplitude[i]+maxDeviation;
// if(max>1) max=1;
// amp[i]=min+(max-min)*Distribution.RNG.nextDouble();
}
SinusoidalModel a = new SinusoidalModel(off,freq,amp);
return a;
}
public static SinusoidalModel generateRandomModel(int r,
double minO, double maxO,double minF, double maxF,double minA, double maxA)
{
double[] freq=new double[r];
double[] off = new double[r];
double[] amp=new double[r];
for(int i=0;i<r;i++)
{
// freq[i]=minF+(maxF-minF)*Distribution.RNG.nextDouble();
// off[i]=minO+(maxO-minO)*Distribution.RNG.nextDouble();
// amp[i]=0.5+minA+(maxA-minA)*Distribution.RNG.nextDouble();
// off[i]=min+(max-min)*Distribution.RNG.nextDouble();
// amp[i]=min+(max-min)*Distribution.RNG.nextDouble();
}
SinusoidalModel a = new SinusoidalModel(off,freq,amp);
return a;
}
public static SinusoidalModel generateRandomModel(int r)
{
return generateRandomModel(r,defaultMin,defaultMax);
}
public static SinusoidalModel perturbSinusoidalModel(SinusoidalModel base, int maxPercentDev)
{
int r= base.frequency.length;
double[] freq=new double[r];
double[] off = new double[r];
double[] amp=new double[r];
int percentDev;
for(int i=0;i<r;i++)
{
/* percentDev = Distribution.RNG.nextInt(maxPercentDev);
if(Distribution.RNG.nextDouble()<0.5)
freq[i]=base.frequency[i]*(100.0-percentDev)/100.0;
else
freq[i]=base.frequency[i]*(100.0+percentDev)/100.0;
percentDev = Distribution.RNG.nextInt(maxPercentDev);
if(Distribution.RNG.nextDouble()<0.5)
off[i]=base.offset[i]*(100.0-percentDev)/100.0;
else
off[i]=base.offset[i]*(100.0+percentDev)/100.0;
percentDev = Distribution.RNG.nextInt(maxPercentDev);
if(Distribution.RNG.nextDouble()<0.5)
amp[i]=base.amplitude[i]*(100.0-percentDev)/100.0;
else
amp[i]=base.amplitude[i]*(100.0+percentDev)/100.0;
*/
}
SinusoidalModel a = new SinusoidalModel(off,freq,amp);
return a;
}
public void randomiseOffset()
{
// for(int i=0;i<offset.length;i++)
// offset[i]=Distribution.RNG.nextDouble();
}
double[] getOffset(){ return offset;}
double[] getFrequency(){ return frequency;}
double[] getAmplitude(){return amplitude;}
void setAmplitude(double[] a){ amplitude=a;}
void setOffset(double[] o){ offset=o;}
public String toString(){
String str="";
for(int i=0;i<offset.length;i++)
str+=offset[i]+",";
str+="\n";
for(int i=0;i<frequency.length;i++)
str+=frequency[i]+",";
str+="\n";
for(int i=0;i<amplitude.length;i++)
str+=amplitude[i]+",";
str+="\n";
return str;
}
public static void sampleData()
{
OutFile of= new OutFile("randomFFTData_3Waves.csv");
int k=2;
int n=256;
int l=5;
SinusoidalModel[] models= new SinusoidalModel[k];
double[][] data = new double[k*l][n];
for(int i=0;i<k;i++)
{
models[i]= generateRandomModel(3);
for(int j=0;j<l;j++)
data[i*l+j]=models[i].generateSeries(n);
}
for(int i=0;i<n;i++)
{
for(int j=0;j<k*l;j++)
of.writeString(data[j][i]+",");
of.writeString("\n");
}
for(int i=0;i<k;i++)
of.writeLine(models[i].toString());
}
static public void main(String[] args){
System.out.println(" To do: test harness for Sinusoidal models");
}
}
| 14,790 | 33.884434 | 158 | java |
tsml-java | tsml-java-master/src/main/java/statistics/simulators/WhiteNoiseModel.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Basic model that just adds normal noise. This is actually the default behaviour in
model, so we can abstract model and do it here instead (at a later date)
*/
package statistics.simulators;
/**
*
* @author ajb
*/
public class WhiteNoiseModel extends Model{
public WhiteNoiseModel(){
super();
}
@Override
public void setParameters(double[] p) {//Mean and variance of the noise
setVariance(p[0]);
}
}
| 1,229 | 27.604651 | 83 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/DataPoint.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
public class DataPoint implements Comparable{
public double d;
public int level; //i index: sample indicator
public int position;//j index: data position indicator
public double rank;
public DataPoint(double data, int treatLevel, int pos)
{
d=data;
level=treatLevel;
position=pos;
}
public int sampleNumber(){ return level;}
public int level(){ return level;}
public int compareTo(Object other){
if(this.d<((DataPoint)other).d)
return -1;
else if (this.d>((DataPoint)other).d)
return 1;
return 0;
}
public boolean equals(Object other){
return d== ((DataPoint)other).d;
}
}
| 1,431 | 30.822222 | 76 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/KruskalWallis.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
/*
Kruscal-Wallis non-parametric test for difference of k-means
**For description used in this implementation, see Neave and Worthington
Distribution-Free Tests**
K-W test is equivalent to ANOVA f-test, except based on RANK rather than observed value.
Not analgous to the way Spearman's rank correlation coefficient is calculated.
K-W is a generalisation of the Mann-Whitney test for difference of two means
Assumes sample independence.
Assumes continuous distribution (no ties), although this is not essential (see below)
H_0: There is no difference is population means the samples were taken from
H_1: There are some differences
Test stat calculated thus
1. Find the rank of each data in terms of all samples pooled together
(For ties use the average position and adjustment described below if there are a lot of ties).
2. For each sample calculate the average ranks in the sample R_i and the overall mean
R
3. Find the weighted sum of the squared deviations from the mean (weighted by each sample size) W.
4. Calculate test stat
H=W*12/(N*(N+1))
where N is the total number of data, or more easily
H = [12/N(N+1)]* sum{R_i/n_i} - 3(N+1)
5. Large value of H are unlikely if H_0 is true. Not sure of the ddistribution of H under
H_0, probably ML test. Critical regions from tables
If there are a large number of ties an adjustment should be made. Suppose r values have more than
one occurence, then H should be divided by C below. Let t_i be the number of ties for a given data
value, then
C = 1 - sum{t^3 - t}/[N(N^2-1)]
H*=H/C
Implementation notes
1. Input must be csv format, c++ type comments are ignored
Data must be in ROWS, i.e. 1 row = i sample
2. First implementation assumes equal sample sizes at each level
3. First line of data should contain the number of treatment levels
4. Second line should store
4. The file Test1.csv contains the example from page 245 of Neive
The file Test2.csv contains the example from page 249 of Neive that has duplications
5. Output
*/
import fileIO.*;
import java.util.Arrays;
public class KruskalWallis {
static InFile f;
static int N; //Total data size
static int k; //number of levels
static int[] n; //number of data per level
static String fileName = "LearningRate100Rules.csv"; //Hack,. read in from args
// static String fileName = "Test2.txt"; //Hack,. read in from args
static DataPoint[][] dataByLevel;
static DataPoint[] rankedData;
static boolean debug = false;
public static void main(String args[]) {
double H;
double C;
double H_prime;
loadData();
System.out.println("FILE ="+fileName+"\n Treatment levels ="+k+"\t Total data = "+N+" per level ="+n[0]);
//Sort Data
Arrays.sort(rankedData);
for(int i=0;i<N;i++)
{
rankedData[i].rank=(i+1);
if(debug)
System.out.print(rankedData[i].d+"\t");
}
//Check for duplicates, counts number duplicated, and then recalculates ranks as the averages
// System.out.print("\n\nPRIOR TO Duplicate\n");
// for(int i=0;i<N;i++)
// System.out.print(rankedData[i].rank+"\t");
adjustRanksForDuplicates();
//Find rank sums
double[] rankSums= new double[k];
for(int i=0;i<k;i++)
{
rankSums[i]=0;
for(int j=0;j<n[i];j++)
rankSums[i]+=dataByLevel[i][j].rank;
}
// Find H stat
H=0;
for(int i=0;i<k;i++)
H+=rankSums[i]*rankSums[i]/n[i];
H=H*12/(N*(N+1));
H-=3*(N+1);
System.out.println("\n\n H stat = "+H);
//Find C stat
int i=0;
int nextPos=0;
int t=0;
int tSum=0,t3Sum=0;
while(i<N)
{
if(rankedData[i].rank!=(i+1))
{
t=(int)(2*rankedData[i].rank)-i;
// System.out.println("\n t = "+t+"i = "+i);
nextPos = t-1;
t= (t-i-1);
tSum+=(t);
t3Sum+=t*t*t;
i=nextPos;
}
else
i++;
}
System.out.println("\n\n t sum = "+tSum+"\t t^3 sum = "+t3Sum);
C=1- ((double)t3Sum-tSum)/(N*(N*N-1));
H_prime = H/C;
System.out.println("\n\n C = "+C+"\t H* = "+H_prime);
}
public static void loadData()
{
f = new InFile(fileName);
k = f.readInt();
n = new int[k];
N=0;
dataByLevel = new DataPoint[k][];
for(int i=0;i<k;i++)
{
n[i]=f.readInt();
N+=n[i];
dataByLevel[i]=new DataPoint[n[i]];
}
double d;
int c=0;
rankedData = new DataPoint[N];
for(int i=0;i<k;i++)
{
for(int j=0;j<n[i];j++)
{
d = f.readDouble();
dataByLevel[i][j]=new DataPoint(d,i,j);
rankedData[c]=dataByLevel[i][j];
c++;
}
}
if(debug)
{
for(int i=0;i<k;i++)
{
for(int j=0;j<n[i];j++)
System.out.print(dataByLevel[i][j].d+"\t");
System.out.print("\n");
}
}
}
public static void adjustRanksForDuplicates()
{
DataPoint first=rankedData[0];
int count=0;
int pos=0;
double s,e;
for(int i=1;i<N;i++)
{
if(rankedData[i].d!=first.d)
{
if(count>0)
{
s=first.rank;
e=i;
// System.out.print("First Rank = "+s+"\nLast Rank ="+e+"\n count = "+count+"\n");
for(int j=0;j<=count;j++)
rankedData[(int)(s-1)+j].rank=(e+s)/2;
count=0;
}
first=rankedData[i];
}
else
count++;
}
if(count>0)
{
s=first.rank;
e=N;
// System.out.print("First Rank = "+s+"\nLast Rank ="+e+"\n count = "+count+"\n");
for(int j=0;j<=count;j++)
rankedData[(int)(s-1)+j].rank=(e+s)/2;
count=0;
}
}
}
| 6,109 | 26.155556 | 107 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/ManySampleTests.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
import fileIO.*;
import java.util.Arrays;
public class ManySampleTests extends Tests{
/*
Kruscal-Wallis non-parametric test for difference of k-means
**For description used in this implementation, see Neave and Worthington
Distribution-Free Tests**
K-W test is equivalent to ANOVA f-test, except based on RANK rather than observed value.
Analagous to the way Spearman's rank correlation coefficient is calculated.
K-W is a generalisation of the Mann-Whitney test for difference of two means
Assumes sample independence.
Assumes continuous distribution (no ties), although this is not essential (see below)
H_0: There is no difference is population means the samples were taken from
H_1: There are some differences
Test stat calculated thus
1. Find the rank of each data in terms of all samples pooled together
(For ties use the average position and adjustment described below if there are a lot of ties).
2. For each sample calculate the average ranks in the sample R_i and the overall mean
R
3. Find the weighted sum of the squared deviations from the mean (weighted by each sample size) W.
4. Calculate test stat
H=W*12/(N*(N+1))
where N is the total number of data, or more easily
H = [12/N(N+1)]* sum{R_i/n_i} - 3(N+1)
5. Large value of H are unlikely if H_0 is true. Not sure of the distribution of H under
H_0, probably ML test. Critical regions from tables
If there are a large number of ties an adjustment should be made. Suppose r values have more than
one occurence, then H should be divided by C below. Let t_i be the number of ties for a given data
value, then
C = 1 - sum{t^3 - t}/[N(N^2-1)]
H*=H/C
The file Test1.csv contains the example from page 245 of Neive
The file Test2.csv contains the example from page 249 of Neive that has duplications
ASSUMES
1. Data loaded
2. Data ranked and sorted
3. Ranks adjusted for duplicates (2 and 3 are done in the loading methods)
*/
public static void kruskalWallace(TestResults T)
{
if(!loaded)
{
System.out.println("ERROR: Data not loaded, cannot perform testg");
return;
}
double H;
double C;
double H_prime;
T.testName="Kruskal Wallace";
//Find rank sums
double[] rankSums= new double[k];
for(int i=0;i<k;i++)
{
rankSums[i]=0;
for(int j=0;j<n[i];j++)
rankSums[i]+=dataByLevel[i][j].rank;
}
// Find H stat
H=0;
for(int i=0;i<k;i++)
H+=rankSums[i]*rankSums[i]/n[i];
H=H*12/(N*(N+1));
H-=3*(N+1);
System.out.println("\n\n H stat = "+H);
//Find C stat
int i=0;
int nextPos=0;
int t=0;
int tSum=0,t3Sum=0;
while(i<N)
{
if(rankedData[i].rank!=(i+1))
{
t=(int)(2*rankedData[i].rank)-i;
// System.out.println("\n t = "+t+"i = "+i);
nextPos = t-1;
t= (t-i-1);
tSum+=(t);
t3Sum+=t*t*t;
i=nextPos;
}
else
i++;
}
System.out.println("\n\n t sum = "+tSum+"\t t^3 sum = "+t3Sum);
C=1- ((double)t3Sum-tSum)/(N*(N*N-1));
H_prime = H/C;
T.testStat=H_prime;
}
public static void main(String[] args)
{
String fileName = "C:/JavaSource/Clustering/Clustering_Data/Java_Application_Release/Experiment3/Exp3NoRandomShocks.txt";
loadData(fileName);
TestResults t =new TestResults("Blank");
t.h0=0;
t.type=0;
kruskalWallace(t);
System.out.println(" H prime = "+t.testStat);
}
}
| 4,103 | 27.5 | 123 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/OneSampleTests.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
/* To Test whether population median is zero or not
USAGE:
OneSampleTests test=new OneSampleTests();
data: double[] data;
String str=test.performTests(data);
OUTPUT: A comma separated string with testName,testStatistic,pValue (assuming alpha 0.05)
*/
import statistics.distributions.NormalDistribution;
import statistics.distributions.BinomialDistribution;
import java.util.*;
import fileIO.*;
import java.text.DecimalFormat;
import statistics.distributions.StudentDistribution;
import statistics.tests.DataPoint;
import statistics.tests.TestResults;
import statistics.tests.Tests;
public class OneSampleTests extends Tests{
public static boolean beQuiet = false;
private static DecimalFormat df = new DecimalFormat("##.########");
public static DataPoint[] absRankedData;
public String performTests(double[] data){
DataPoint[] d=new DataPoint[data.length];
boolean allthesame=true;
for(int i=0;i<data.length;i++){
d[i]=new DataPoint(data[i],0,i);
if(allthesame && data[i]!=data[0])
allthesame=false;
}
if(allthesame){
System.out.println("All the values are identical, not performing tests");
return "T_Test,0,0.5,Sign_Test,0,0.5,Sign_Rank_Test,0,0.5";
}
Arrays.sort(d);
TestResults test=new TestResults("T_Test");
studentTTest(test,d);
test.findPValue();
String str="T_Test,"+test.testStat+","+df.format(test.pValue)+",";
test=new TestResults("SignTest");
signTest(test,d);
test.findPValue();
str+="Sign_Test,"+test.testStat+","+df.format(test.pValue)+",";
test.findPValue();
test=new TestResults("WilcoxonSignRankTest");
wilcoxonSignRank(test,d);
test.findPValue();
str+="Sign_Rank_Test,"+test.testStat+","+df.format(test.pValue);
return str;
}
public static void studentTTest(TestResults t, DataPoint[] data){
//Find mean and var. Var found the slow way for clarity
double mean=0,var=0;
for(int i=0;i<data.length;i++)
mean+=data[i].d;
mean/=data.length;
for(int i=0;i<data.length;i++)
var+=(mean-data[i].d)*(mean-data[i].d);
var/=data.length-1;
//Find test stat
double tStat=(mean)/Math.sqrt(var/data.length);
t.testStat=tStat;
t.df1=data.length-1;
t.dist=new StudentDistribution(t.df1);
}
public static void signTest(TestResults T, DataPoint[] ranked)
/* Defined by J. Arbuthnott in 1710!
s1=nos greater than \phi_0
s2=nos less than \phi_0
Ties are shared equally
PRE: ASSUMES Data pre sorted
Note this does not require sorted data, so its a bit
wasteful using the loadData if signTest is being performed.
BUT given it is presorted, we can speed up
the calculation with a binary search
It sets the distribution of the test statistic
*/
{
T.testName="signTest";
double s1=0,s2=0;
DataPoint h0 = new DataPoint(T.h0,0,0);
int adjN=ranked.length;
int pos=Arrays.binarySearch(ranked,h0);
/* From API
index of the search key, if it is contained in the list;
otherwise, (-(insertion point) - 1). The insertion point is
defined as the point at which the key would be inserted into
the list: the index of the first element greater than the key,
or list.size(), if all elements in the list are less than the #
specified key. Note that this guarantees that the return value
will be >= 0 if and only if the key is found.
Tested with file signTestExample.txt
*/
if(pos>=0)
//Value present, need to adjust for duplicates
{
int dupCount=1;
int left=pos-1;
int right=pos+1;
while(left>=0 && ranked[pos].equals(ranked[left]))
{
// System.out.println("Left ="+ranked[left].d);
left--;
dupCount++;
}
while(right< ranked.length && ranked[pos].equals(ranked[right]))
{
right++;
dupCount++;
}
if(dupCount%2==1&& adjN<50) //If using a binomial want a whole number!
{
adjN--;
dupCount-=1;
}
// System.out.println("Duplicate count ="+dupCount);
s1=left+1+dupCount/2.0;
s2=(ranked.length-right)+dupCount/2.0;
// System.out.println("left = "+left+"\t right = "+right+"\t lower ="+s1+"\thigher = "+s2);
}
else
{
//Number smaller
s1=-pos-1;
//Number larger: THIS MAY BE A BUG IF adjN is adjusted
s2=adjN-s2;
// System.out.println("pos = "+pos+"\tlower ="+s1+"\thigher = "+s2);
}
if(T.type==-1)
T.testStat=s2;
else if(T.type==1)
T.testStat=s1;
else if(T.type==0)
T.testStat=(s1<s2)?s1:s2;
//Test distribution, use binomial approximation if N>50
if(adjN<50)
T.dist = new BinomialDistribution(adjN,0.5);
else
T.dist = new NormalDistribution(adjN/2.0,Math.sqrt(adjN)/2.0);
T.findCriticalValue();
T.findPValue();
}
public static void wilcoxonSignRank(TestResults T, DataPoint[] ranked)
/*
1. Find the differences from hypothesised median
2. Rank by absolute values
3. Sum ranks for positive
Need to rerank the data
*/
{
T.testName="wilcoxonSignRank";
absRankedData=new DataPoint[ranked.length];
double diff;
int nonZeroDifferences=0;
for(int j=0;j<ranked.length;j++)
{
// System.out.println(" Data = "+ranked[j].d+"\t in Pos "+ranked[j].position);//+" data ="+dataByLevel[0][ranked[j].position].d);
diff=(ranked[j].d>T.h0)?ranked[j].d-T.h0:T.h0-ranked[j].d;
if(diff>0){
absRankedData[nonZeroDifferences]=new DataPoint(diff,0,j);
nonZeroDifferences++;
}
}
if(nonZeroDifferences==0){
System.out.println(" The two series are identical, this should have been handled before this call to Wilcoxon Sign Rank test. Setting up test to accept the null");
T.dist = new NormalDistribution(0,1);
T.testStat=0;
T.findCriticalValue();
T.findPValue();
return;
}
if(nonZeroDifferences<ranked.length){
DataPoint[] temp = new DataPoint[nonZeroDifferences];
for(int i=0;i<nonZeroDifferences;i++)
temp[i]=absRankedData[i];
absRankedData=temp;
}
Arrays.sort(absRankedData);
for(int i=0;i<absRankedData.length;i++)
absRankedData[i].rank=(i+1);
adjustRanksForDuplicates(absRankedData);
double rankSumUnder=0,rankSumOver=0;
for(int j=0;j<nonZeroDifferences;j++)
{
diff=ranked[absRankedData[j].position].d-T.h0;
if (!beQuiet)
System.out.println(" Rank = "+ j +" Pos ="+absRankedData[j].position+" Val ="+ranked[absRankedData[j].position].d+" diff ="+diff+" Abs Val ="+absRankedData[j].d);
if(diff<0)
{
if (!beQuiet)
System.out.println(" Rank = "+ absRankedData[j].rank +" Value ="+ranked[absRankedData[j].position].d+"\t diff ="+diff);
rankSumUnder+=absRankedData[j].rank;
}
else
rankSumOver+=absRankedData[j].rank;
}
if(T.type==1)
T.testStat=rankSumUnder;
else if(T.type==-1)
T.testStat=rankSumOver;
else
T.testStat=(rankSumOver<rankSumUnder)?rankSumUnder:rankSumOver;
//Havent used the exact distribution, but it is possible for
//small N, see page 74 of Neave
T.dist = new NormalDistribution(nonZeroDifferences*(nonZeroDifferences+1)/4.0,Math.sqrt(nonZeroDifferences*(nonZeroDifferences+1)*(2*nonZeroDifferences+1)/24.0));
T.findCriticalValue();
T.findPValue();
}
public static void main(String[] args){
TestResults t = new TestResults("SignTest");
InFile inf=new InFile("C:\\Users\\ajb\\Dropbox\\Results\\DebugFiles\\TwoSampleTest.csv");
int n=inf.readInt();
int m=inf.readInt();
double[] diff=new double[m];
for (int i = 0; i < diff.length; i++)
diff[i]=inf.readDouble();
for (int i = 0; i < diff.length; i++)
diff[i]-=inf.readDouble();
OneSampleTests one= new OneSampleTests();
System.out.println(one.performTests(diff));
//Sign Test
}
}
| 9,627 | 35.195489 | 178 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/ResidualTests.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
import statistics.distributions.FisherDistribution;
import statistics.distributions.NormalDistribution;
import fileIO.OutFile;
import java.io.FileReader;
import java.util.Arrays;
import statistics.transformations.ArrayPair;
import statistics.transformations.LinearModel;
import statistics.transformations.MatrixSort;
import weka.classifiers.functions.LinearRegression;
import weka.core.Instance;
import weka.core.Instances;
/**
*
* Class to test residuals.
*/
public class ResidualTests {
//Returns the test stat
public static double goldfeldQuandt(double[][]X, double[] Y, int pos)
{
//Copy data and sort by selected attribute
MatrixSort[] ms = new MatrixSort[Y.length];
for(int i=0;i<Y.length;i++)
{
double[] x = new double[X.length];
for(int j=0;j<x.length;j++)
x[j]=X[j][i];
ms[i]=new MatrixSort(x,Y[i],pos);
}
Arrays.sort(ms);
//Split into three sets size n1, n2=n/4 and n3
int p=X.length-1;
int n=Y.length;
int n2=n/3;
int n1=(n-n2)/2;
int n3=n-n1-n2;
System.out.println("n1 = "+n1+" n2 = "+n2+" n3 = "+n3);
double[][]newX = new double[X.length][n1];
double[] newY= new double[n1];
for(int i=0;i<n1;i++)
{
newY[i]=Y[i];
for(int j=0;j<X.length;j++)
newX[j][i]=ms[i].x[j];
}
// Fit regression to first set, find SSE
LinearModel lm =new LinearModel(newX,newY);
lm.fitModel();
lm.findStats();
double s1=lm.getSSE();
// Fit regression to second set, find SSE
newX = new double[X.length][n3];
newY= new double[n3];
for(int i=n1+n2;i<n;i++)
{
newY[i-n1-n2]=Y[i];
for(int j=0;j<X.length;j++)
newX[j][i-n1-n2]=ms[i].x[j];
}
lm =new LinearModel(newX,newY);
lm.fitModel();
lm.findStats();
double s3=lm.getSSE();
//Find q = (n1-p-1)SSE1/(n3-p-1)SSE3
double q;
q=((n3-p-1)*s1)/((n1-p-1)*s3);
System.out.println(" s1 = "+s1+" s3 = "+s3+" q = "+q+" s3/s1"+s3/s1);
FisherDistribution f;
if(s1>s3)
f = new FisherDistribution(n3-p-1, n1-p-1);
else
f= new FisherDistribution(n3-p-1, n1-p-1);
double prob = f.getCDF(q);
double prob2=f.getDensity(q);
System.out.println(" prob = "+prob+" density = "+prob2);
//This follows an F(n1-p-1,k3-p-1) distribution under the null of homoscedastic
return q;
}
public static double runsTest(double[] pred, double[] residuals)
{
double runsCount=1;
double p=0;
boolean positive=false, currentPositive;
ArrayPair[] ap = new ArrayPair[pred.length];
for(int i=0;i<pred.length;i++)
{
ap[i]=new ArrayPair();
ap[i].predicted=pred[i];
ap[i].residual=residuals[i];
}
Arrays.sort(ap);
if(ap[0].residual>0)
{
positive=true;
p=1;
}
for(int i=1;i<ap.length;i++)
{
if(ap[i].residual>0)
{
currentPositive=true;
p++;
}
else
currentPositive=false;
if(currentPositive!=positive)
runsCount++;
positive=currentPositive;
}
double n=ap.length;
// System.out.println("Runs count = "+runsCount+" number of ones = "+p);
//Calculate probs via normal
double m=(2.0*p*(n-p))/(n-1);
//Something wrong with v!?!
double v=(2*p*(n-p)*(2*p*(n-p)-n))/(n*n*(n-1));
System.out.println("m = "+m+" v = "+v);
//Better to use the weka normal distribution
double res=(runsCount-m)/Math.sqrt(v);
return res;
}
public static double kolmogorovSmirnoff(double[] residuals)
{
return kolmogorovSmirnoff(residuals,1);
}
public static double kolmogorovSmirnoff(double[] residuals, double var)
{
//Normality test for residuals: Kolmogorov Smirnoff
int n=residuals.length;
double[] expected=new double[n];
double[] observed=new double[n+1];
double[] residCopy= new double[residuals.length];
System.arraycopy(residuals, 0, residCopy, 0, residuals.length);
observed[n]=1;
NormalDistribution norm = new NormalDistribution(0,var);
Arrays.sort(residCopy);
//Find out the Expected normal values for the stepped probabilities
//Set probs
for(int i=0;i<n;i++)
expected[i]=(i+1)/(double)n;
//Find inverses
for(int i=0;i<n;i++)
observed[i]=norm.getCDF(residCopy[i]);
//Find max deviation
double max=0;
for(int i=0;i<n;i++)
{
if(Math.abs(expected[i]-observed[i+1])>max)
max=Math.abs(expected[i]-observed[i+1]);
}
return max;
}
public static double anscombeProcedure(double[] actual, double[] predicted)
{
return testHeteroscadisity(actual,predicted);
}
public static double testHeteroscadisity(double[] actual, double[] predicted)
{
//Measure correlation between actual values and absolute residual values
double[] absRes = new double[predicted.length];
double meanPred=0;
double meanAbs=0;
for(int i=0;i<predicted.length;i++)
{
absRes[i]=Math.abs(actual[i]-predicted[i]);
meanAbs+=absRes[i];
meanPred+=predicted[i];
}
meanAbs/=predicted.length;
meanPred/=predicted.length;
//Measure correlation between absRes and predicted, quite slowly!
double corr=0,x=0,y=0;
for(int i=0;i<predicted.length;i++)
{
corr+=(absRes[i]-meanAbs)*(predicted[i]-meanPred);
x+=(absRes[i]-meanAbs)*(absRes[i]-meanAbs);
y+=(predicted[i]-meanPred)*(predicted[i]-meanPred);
}
corr=corr/Math.sqrt(x*y);
System.out.println(" Correlation = "+corr);
//Not adjusted for the number of regressors!
// double t=corr*(Math.sqrt(predicted.length-2))/Math.sqrt(1-corr*corr);
return corr;
}
public static void main(String[] args) {
int s=100;
double[] data= new double[s];
NormalDistribution n = new NormalDistribution(0,3);
for(int i=0;i<s;i++)
data[i]=n.simulate();
double x = kolmogorovSmirnoff(data,1);
System.out.println(" KS stat = "+x);
}
public static void testHetero()
{
Instances data;
FileReader r;
Instance inst;
double[] actual,predictions;
LinearRegression lg = new LinearRegression();
try{
r= new FileReader("C:/Research/Data/Gavin Competition/Weka Files/Temp Train.arff");
data = new Instances(r);
data.setClassIndex(data.numAttributes()-1);
lg.buildClassifier(data);
predictions=new double[data.numInstances()];
actual=data.attributeToDoubleArray(data.numAttributes()-1);
for(int i=0;i<predictions.length;i++)
{
inst=data.instance(i);
predictions[i]=lg.classifyInstance(inst);
}
OutFile of= new OutFile("C:/Research/Data/Gavin Competition/Weka Files/CorrelationTest.csv");
System.out.println(" t stat for homogeneity ="+testHeteroscadisity(actual,predictions));
for(int i=0;i<predictions.length;i++)
of.writeLine(actual[i]+","+predictions[i]);
}catch(Exception e)
{
System.out.println(" Error in REsidual Test "+e);
}
}
}
| 7,333 | 27.426357 | 96 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/TestResults.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
import statistics.distributions.*;
public class TestResults{
public double testStat;
public double pValue;
public String testName;
public double h0;
public int type;
//if type ==-1 then H1 \phi < \phi0
//if type ==0 then H1 \phi \neq \phi0 (2-Sided)
//if type ==1 then H1 \phi > \phi0
public double level=0.05;
public double criticalValue;
public int df1,df2; //Degrees of freedom, if required
//Distribution of test stat if available
//NOTE that all the Distribution classes should be
//checked before useage, cos at least one of them was wrong!
public Distribution dist;
public TestResults(String s){
testName = s;
dist = null;
testStat=pValue=h0=0;
}
public TestResults(String s,Distribution d){
testName = s;
dist = d;
testStat=pValue=h0=0;
}
public void findPValue(){
if(dist==null)
return;
pValue=dist.getCDF(testStat);
if(type==1)
pValue=1-pValue;
else if(type==0)
{
if(pValue>0.5)
pValue=1-pValue;
pValue*=2;
}
}
public void findCriticalValue()
{
if(type==-1)
criticalValue=dist.getQuantile(1-level);
else
criticalValue=dist.getQuantile(level);
}
public String toString(){
String str="****** Results for "+testName+" *********\n";
str+=" To test median ="+h0+" against H1: ";
if(type==-1)
str+="median < "+h0;
else if(type==1)
str+="median > "+h0;
else
str+="median not equal to "+h0;
str+="\n T = "+testStat+"\t p value = "+pValue+"\tlevel = "+level+"\n";
str+=" Distribution = "+dist+"\t Level ="+level+"\t Critical Value ="+criticalValue;
return str;
}
}
| 2,510 | 27.862069 | 100 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/Tests.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
import fileIO.*;
import java.util.*;
abstract public class Tests{
protected static int N; //Total data size
protected static int k; //number of levels, default to 1
protected static int[] n; //number of data per level
protected static DataPoint[][] dataByLevel;
protected static DataPoint[] rankedData;
protected static boolean loaded = false;
protected static boolean debug = false;
/*
loadData MUST be called prior to any of the tests
It loads the data from file, sorts it, ranks it and adjusts ranks for duplicates
Implementation notes
1. Input must be csv format, c++ type comments are ignored
Data must be in ROWS, i.e. 1 row = i sample. Unusual, but I'm working with row based data!
Data is Stored row based (i == sample index, j== data index)
2. First implementation assumes equal sample sizes at each level
3. First line of data should contain the number of treatment levels
4. Second line should store the number of data in each sample
*/
public static void loadData(String fileName)
{
InFile f = new InFile(fileName);
k = f.readInt();
n = new int[k];
N=0;
dataByLevel = new DataPoint[k][];
for(int i=0;i<k;i++)
{
n[i]=f.readInt();
N+=n[i];
dataByLevel[i]=new DataPoint[n[i]];
}
double d;
int c=0;
rankedData = new DataPoint[N];
for(int i=0;i<k;i++)
{
for(int j=0;j<n[i];j++)
{
d = f.readDouble();
dataByLevel[i][j]=new DataPoint(d,i,j);
rankedData[c]=dataByLevel[i][j];
c++;
}
}
Arrays.sort(rankedData);
for(int i=0;i<N;i++)
rankedData[i].rank=(i+1);
adjustRanksForDuplicates(rankedData);
loaded =true;
if(debug)
{
for(int i=0;i<k;i++)
{
for(int j=0;j<n[i];j++)
System.out.print(dataByLevel[i][j].d+"\t");
System.out.print("\n");
}
}
}
public static void loadData(double[][] data)
{
k = data.length;
n = new int[k];
N=0;
dataByLevel = new DataPoint[k][];
for(int i=0;i<k;i++)
{
n[i]=data[i].length;
N+=n[i];
dataByLevel[i]=new DataPoint[n[i]];
}
double d;
int c=0;
rankedData = new DataPoint[N];
for(int i=0;i<k;i++)
{
for(int j=0;j<n[i];j++)
{
dataByLevel[i][j]=new DataPoint(data[i][j],i,j);
rankedData[c]=dataByLevel[i][j];
c++;
}
}
Arrays.sort(rankedData);
for(int i=0;i<N;i++)
rankedData[i].rank=(i+1);
adjustRanksForDuplicates(rankedData);
loaded =true;
}
public static void loadData(double[] data)
// Only use with OneSample Tests
{
k = 1;
n = new int[k];
N=data.length;
n[0]=data.length;
dataByLevel = new DataPoint[k][];
double d;
int c=0;
rankedData = new DataPoint[N];
for(int j=0;j<N;j++)
{
dataByLevel[0][j]=new DataPoint(data[j],0,j);
rankedData[j]=dataByLevel[0][j];
}
Arrays.sort(rankedData);
for(int i=0;i<N;i++)
rankedData[i].rank=(i+1);
adjustRanksForDuplicates(rankedData);
loaded =true;
}
protected static void adjustRanksForDuplicates(DataPoint[] ranks)
{
DataPoint first=ranks[0];
int count=0;
int pos=0;
double s,e;
for(int i=1;i<ranks.length;i++)
{
if(ranks[i].d!=first.d)
{
if(count>0)
{
s=first.rank;
e=i;
// System.out.print("First Rank = "+s+"\nLast Rank ="+e+"\n count = "+count+"\n");
for(int j=0;j<=count;j++)
ranks[(int)(s-1)+j].rank=(e+s)/2;
count=0;
}
first=ranks[i];
}
else
count++;
}
if(count>0)
{
s=first.rank;
e=ranks.length;
// System.out.print("First Rank = "+s+"\nLast Rank ="+e+"\n count = "+count+"\n");
for(int j=0;j<=count;j++)
ranks[(int)(s-1)+j].rank=(e+s)/2;
count=0;
}
}
public static void rank(DataPoint[] data)
{
Arrays.sort(data);
for(int i=0;i<data.length;i++)
data[i].rank=(i+1);
adjustRanksForDuplicates(data);
}
}
| 4,556 | 23.111111 | 90 | java |
tsml-java | tsml-java-master/src/main/java/statistics/tests/TwoSampleTests.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.tests;
/*Written in the dark and distant past
Algorithms implemented from descriptions in Neave and Worthington
Assumes unpaired samples. For paired samples just take the difference and use
OneSam
*USAGE:
TwoSampleTests test=new TwoSampleTests();
double[] d1=...
double[] d2=...
String str=test.performTests(d1,d2);
OUTPUT: A comma separated string with testName,testStatistic,pValue (assuming alpha 0.05)
**/
import statistics.distributions.*;
import java.util.*;
public class TwoSampleTests extends Tests{
public String performTests(double[] a, double[] b)
//Performs t-test (unequal var), Mann-Whitney and Robust Rank Order test
//Returns for each test
// TestName, Test Statistic, large sample 1-sided p-value,
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
//T test
TestResults t_test=new TestResults("T_Test");
studentT_Test(t_test,d,0);
t_test.findPValue();
String str="T_Test,"+t_test.testStat+","+t_test.pValue+"\n";
//Mann-Whitney
TestResults mw=new TestResults("Mann_Whitney");
mannWhitney(mw,d);
mw.findPValue();
str+="Mann_Whitney,"+mw.testStat+","+mw.pValue+"\n";
//Robust Rank Sum
TestResults rrs=new TestResults("robustRankSum");
robustRankSum(rrs,d);
rrs.findPValue();
str+="robustRankSum,"+rrs.testStat+","+rrs.pValue+"\n";
return str;
}
public static TestResults performTwoSampleTest(double[] a, double[] b, int testType)
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
//T test
TestResults testR=new TestResults("T_Test");
switch(testType)
{
case 0:
studentT_Test(testR,d,0);
break;
case 1:
mannWhitney(testR,d);
break;
case 2:
robustRankSum(testR,d);
break;
default:
System.out.println(" Test Not implemented: exit");
System.exit(0);
}
return testR;
}
public static double performTest(double[] a, double[] b, int testType, boolean returnPVal)
{
TestResults r=performTwoSampleTest(a,b,testType);
if(returnPVal)
{
r.findPValue();
return r.pValue;
}
else
return r.testStat;
}
public static double studentT_PValue(double[] a, double[] b)
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
//T test
TestResults t_test=new TestResults("T_Test");
studentT_Test(t_test,d,0);
t_test.findPValue();
return t_test.pValue;
}
public static double mw_PValue(double[] a, double[] b)
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
TestResults mw=new TestResults("Mann_Whitney");
mannWhitney(mw,d);
mw.findPValue();
return mw.pValue;
}
public static double rrs_PValue(double[] a, double[] b)
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
TestResults rrs=new TestResults("robustRankSum");
robustRankSum(rrs,d);
rrs.findPValue();
return rrs.pValue;
}
public static double studentT_TestStat(double[] a, double[] b)
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
//T test
TestResults t_test=new TestResults("T_Test");
studentT_Test(t_test,d,0);
return t_test.testStat;
}
public static double mw_TestStat(double[] a, double[] b)
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
TestResults mw=new TestResults("Mann_Whitney");
mannWhitney(mw,d);
return mw.testStat;
}
public static double rrs_TestStat(double[] a, double[] b)
{
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[a.length];
d[1]=new DataPoint[b.length];
for(int i=0;i<a.length;i++)
d[0][i]=new DataPoint(a[i],0,i);
for(int i=0;i<b.length;i++)
d[1][i]=new DataPoint(b[i],1,i);
TestResults rrs=new TestResults("robustRankSum");
robustRankSum(rrs,d);
return rrs.testStat;
}
public static void robustRankSum(TestResults t, DataPoint[][] d)
// Find U as with Mann-Whitley, different test stat
{
// 1. Find placement arrays for both ways
// 1.1 Merge two data series into one,
DataPoint[] mergedD=new DataPoint[d[0].length+d[1].length];
for(int i=0;i<d[0].length;i++)
mergedD[i]=d[0][i];
for(int i=0;i<d[1].length;i++)
mergedD[d[0].length+i]=d[1][i];
//1.2. Sort combined data series
Arrays.sort(mergedD);
int[] p_YX=new int[d[0].length];
int[] p_XY=new int[d[1].length];
int m=p_YX.length;
int n=p_XY.length;
int j=0;
int countA=0;
int countB=0;
double u_YX=0,u_XY=0;
for(int i=0;i<mergedD.length; i++)
{
//If d[i] from sample A increment U
if(mergedD[i].sampleNumber()==0)
{
p_YX[countA]=countB;
u_YX+=p_YX[countA];
countA++;
}
else
{
p_XY[countB]=countA;
u_XY+=p_XY[countB];
countB++;
}
}
System.out.println(" Series A positions = ");
for(int i=0;i<p_XY.length;i++)
System.out.print(p_XY[i]+",");
System.out.println(" Series B positions = ");
for(int i=0;i<p_YX.length;i++)
System.out.print(p_YX[i]+",");
System.out.println(" u1 = "+u_XY+" u2 = "+u_YX);
System.out.println(" m*n = "+p_XY.length*p_YX.length+" u1+u2 = "+(u_XY+u_YX));
//
// 2. Calculate test statistic
System.out.println(" U_A statistic = "+u_YX/m);
System.out.println(" U_B statistic = "+u_XY/n);
u_YX/=m;
u_XY/=n;
double Va=0,Vb=0;
for(int i=0;i<m;i++)
Va+=(p_YX[i]-u_YX)*(p_YX[i]-u_YX);
for(int i=0;i<n;i++)
Va+=(p_XY[i]-u_XY)*(p_XY[i]-u_XY);
double U=(m*u_YX-n*u_XY)/(2*Math.sqrt(Va+Vb+u_YX*u_XY));
t.dist = new NormalDistribution(0,1);
t.testStat=U;
System.out.println(" U New = "+t.testStat);
}
public static void wilcoxonRankSum(TestResults t, DataPoint[][] d)
//Find U as with Mann-Whitley, different test stat
{
double U=(double)findU(d);
t.testStat=U+0.5*d[0].length*(d[0].length+1);
System.out.println(" R statistic = "+t.testStat);
}
/** Man Whitney two sample test
* H. B. Mann and D. R. Whitney "On a test of whether one of two random variables
* is stochastically larger than another" Ann. Math. Statist. 18, 50-60 (1947)
*
* The test on two samples sums the number of elements in sample B that are larger
* than each element of sample A. This test statistic, U, has what kind of distribution?
* tables are given in the book.
*
*
* the number of elements
* @param t Test results
* @param d Data set
*
*
* Notes: Works with unequal sample sizes
* Related to Wilcoxon test
*/
public static void mannWhitney(TestResults t, DataPoint[][] d)
{
//1. Find U.
t.testStat=(double)findU(d);
// System.out.println(" U statistic = "+t.testStat);
double nA=(double)d[0].length;
double nB=(double)d[1].length;
double nullMean=0.5*nA*nB;
double nullStDev=Math.sqrt(nA*nB*(nA+nB+1)/12.0);
t.dist=new NormalDistribution(nullMean,nullStDev);
System.out.println(" Null Mean ="+nullMean+" Null SD = "+nullStDev);
// a*NB = "+d[0].length*d[1].length);
}
public static void studentT_Test(TestResults t, DataPoint[][] d, int type){
switch(type){//Should probably enum this
case 0: //Default to two sample, unequal variance
unequalVarianceStudentT_Test(t,d);
break;
case 1: //two sample, equal variance, NOT IMPLEMENTED
unequalVarianceStudentT_Test(t,d);
break;
case 2: //pair two sample
}
}
//Defaults to two sample test assuming unequal variance
public static void unequalVarianceStudentT_Test(TestResults t, DataPoint[][] d)
{
//Find means and var
double m1=0,m2=0;
double s1=0,s2=0;
for(int i=0;i<d[0].length;i++)
m1+=d[0][i].d;
for(int i=0;i<d[1].length;i++)
m2+=d[1][i].d;
m1/=d[0].length;
m2/=d[1].length;
for(int i=0;i<d[0].length;i++)
s1+=(d[0][i].d-m1)*(d[0][i].d-m1);
for(int i=0;i<d[1].length;i++)
s2+=(d[1][i].d-m2)*(d[1][i].d-m2);
s1/=(d[0].length-1);
s2/=(d[1].length-1);
//Find test stat
double tStat=(m1-m2)/Math.sqrt((s1/d[0].length+s2/d[1].length));
t.testStat=tStat;
//Find df
int n1=d[0].length;
int n2=d[1].length;
t.df1=(int)Math.ceil( (s1/n1+s2/n2)*(s1/n1+s2/n2)/( (s1/n1)*(s1/n1)/(n1-1)+ (s2/n2)*(s2/n2)/(n2-1) ) ) ;
t.dist=new StudentDistribution(t.df1);
}
private static DataPoint[] mergeData(DataPoint[][] d)
{
DataPoint[] md=new DataPoint[d[0].length+d[1].length];
for(int i=0;i<d[0].length;i++)
md[i]=d[0][i];
for(int i=0;i<d[1].length;i++)
md[d[0].length+i]=d[1][i];
return md;
}
static private int findU(DataPoint[][] d, int a, int b)
{
// The test on two samples sums the number of elements in sample B that are
// smaller than each element of sample A.
//NUMBER OF B's proceeding each A
// 1. Merge two data series into one,
DataPoint[] mergedD=new DataPoint[d[a].length+d[b].length];
for(int i=0;i<d[a].length;i++)
mergedD[i]=d[a][i];
for(int i=0;i<d[b].length;i++)
mergedD[d[a].length+i]=d[b][i];
//2. Sort combined data series
Arrays.sort(mergedD);
//3. Find U statistic: Does NOT handle equal values
int j=0;
int count=0;
int U=0;
for(int i=0;i<mergedD.length && j<d[a].length; i++)
{
//If d[i] from sample A increment U
if(mergedD[i].sampleNumber()==a)
{
U+=count;
j++;
}
//else from sample B, increment count
else
count++;
}
return U;
}
static private int findU(DataPoint[][] d)
//Implementation 1: Does not deal with ties: Defaults for finding positions
// The test on two samples sums the number of elements in sample B that are larger
// * than each element of sample A.
{
//1. Linear scan to find the number of sample B proceeding sample A
if(d.length!=2)
{
System.out.println("Error, cannot use this for k!=2");
System.exit(0);
}
return findU(d,0,1);
}
public static void testTwoSamples()
/*
* Test Data 1: page 110
* Sample A: 3,7,15,10,4,6,4,7
* Sample B: 19,11,36,8,25,23,38,14,17,41,25,21
n_a = 8, n_b=12
Test Stat: U=4
* Test Data 2: Feltovich paper "Nonparametric tests of differences in medians
* Sample A: 5.025,6.7,6.725,6.75,7.05,7.25,8.375
* Sample B: 4.875,5.125,5.225,5.55,5.75,5.925,6.125
n_a = m=7, n_b=n=8
Test Stat: U=4
*
* */
{
int n_a=8;
int n_b=12;
DataPoint[][] d = new DataPoint[2][];
d[0]=new DataPoint[n_a];
d[1]=new DataPoint[n_b];
double []d1={3,7,15,10,4,6,4,7};
for(int i=0;i<n_a;i++)
d[0][i]=new DataPoint(d1[i],0,i);
double []d2={19,11,36,8,25,23,38,14,17,41,25,21};
TwoSampleTests ts = new TwoSampleTests();
String str = ts.performTests(d1,d2);
System.out.println(str+"\n");
// System.exit(0);
for(int i=0;i<n_b;i++)
d[1][i]=new DataPoint(d2[i],1,i);
TestResults t=new TestResults("Mann Whittley");
studentT_Test(t,d,0);
mannWhitney(t,d);
robustRankSum(t,d);
int m=7;
int n=8;
double []d3={5.025,6.7,6.725,6.75,7.05,7.25,8.375};
double[]d4={4.875,5.125,5.225,5.425,5.55,5.75,5.925,6.125};
d = new DataPoint[2][];
d[0]=new DataPoint[m];
d[1]=new DataPoint[n];
for(int i=0;i<m;i++)
d[0][i]=new DataPoint(d3[i],0,i);
for(int i=0;i<n;i++)
d[1][i]=new DataPoint(d4[i],1,i);
studentT_Test(t,d,0);
mannWhitney(t,d);
robustRankSum(t,d);
str = ts.performTests(d3,d4);
System.out.println(str+"\n");
System.exit(0);
}
public static void wilcoxonMatchedPairs(TestResults t, DataPoint[][] d)
{
if(d.length!=2)
{
System.out.println("Error, cannot use this for k!=2");
System.exit(0);
}
if(d[0].length!=d[1].length)
{
System.out.println("Error, cannot use this for unequal samples, they should be matched");
System.exit(0);
}
DataPoint[] ranked= new DataPoint[d[0].length];
for(int i=0;i<ranked.length;i++)
{
System.out.println(" Difference ="+(d[0][i].d-d[1][i].d));
ranked[i]= new DataPoint(d[0][i].d-d[1][i].d,0,d[0][i].position);
}
Tests.rank(ranked);
OneSampleTests.wilcoxonSignRank(t,ranked);
}
public static void main(String args[]) {
testTwoSamples();
System.exit(0);
String fileName = "C:\\Users\\ajb\\Dropbox\\Results\\DebugFiles\\TwoSampleTest.csv";
double[][] d;
TestResults T=new TestResults("SSS");
T.h0=0;
T.level=0.05;
T.type=0;
loadData(fileName);
wilcoxonMatchedPairs(T,dataByLevel);
System.out.println(T);
/*
d=getData(fileName);
switch(testType)
{
//Location
case 0: //Independent, normal, unknown variance
T=T_Test(d);
break;
//Independent, unknown distribution
case 1:
T=MannWhitney(d);
break;
case 2:
T=Wilcoxon(d);
break;
case 3:
T=Tukey(d);
break;
case 4: //Dependent, Just do single sample test on di
*/
}
}
| 16,738 | 30.944656 | 106 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/ArrayPair.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.transformations;
/*
* Created on Jan 31, 2006
*
*/
/**
* @author ajb
*
*/
public class ArrayPair implements Comparable {
public double predicted;
public double residual;
public int compareTo(Object c) {
if(this.predicted>((ArrayPair)c).predicted)
return 1;
else if(this.predicted<((ArrayPair)c).predicted)
return -1;
return 0;
}
public static void main(String[] args) {
}
}
| 1,244 | 27.953488 | 76 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/BoxCox.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* Created on Jan 29, 2006
*
*/
package statistics.transformations;
import fileIO.*;
import weka.core.*;
import weka.classifiers.functions.*;
import weka.classifiers.*;
/**
* @author ajb
*
*/
public class BoxCox extends Transformations{
public static double MIN=-3,MAX=3,INTERVAL=0.25;
boolean tryZero=false;
AbstractClassifier c;
double minError=Double.MAX_VALUE, bestLambda;
double gamma;
boolean strictlyPositive=true;
public BoxCox()
{
supervised=true;
response=true;
minError=Double.MAX_VALUE;
bestLambda=MIN;
c=new LinearRegression();
String[] options = {"-S 1","-C "};
try{
c.setOptions(options);
}catch(Exception e){
System.out.println(" Error Setting options in constructor");
}
}
public void setStrictlyPos(boolean f){strictlyPositive=f;}
public BoxCox(AbstractClassifier c)
{
this();
this.c=c;
}
//Performs a specific B-C transform on the response variable, overwriting original
static public void transformResponse(Instances data, double lambda, double[] response)
{
Instance inst;
double v;
int responsePos=data.numAttributes()-1;
for(int i=0;i<response.length;i++)
{
inst=data.instance(i);
v=(Math.pow(response[i],lambda)-1)/lambda;
inst.setValue(responsePos,v);
}
}
// Transform the response variable using box-cox procedure
public Instances transform(Instances data)
{
int responsePos=data.classIndex();
double[] response=data.attributeToDoubleArray(responsePos);
double[] predictions=new double[response.length];
double v;
Instance inst;
//Check if strictly positive
gamma=response[0];
for(int i=1;i<response.length;i++)
{
if(response[i]<gamma)
gamma=response[i];
}
System.out.println(" Min value = "+gamma);
if(gamma<=0)
{
gamma=-2*gamma+1;
System.out.println(" Data series is not strictly positive, rescaling by "+gamma);
for(int i=0;i<response.length;i++)
response[i]+=gamma;
}
for(double lambda=MIN;lambda<=MAX;lambda+=INTERVAL)
{
//Transform response
if(lambda==0) lambda+=INTERVAL;
transformResponse(data,lambda,response);
//Fit model and get training predictions
try{
c.buildClassifier(data);
for(int i=0;i<predictions.length;i++)
{
inst=data.instance(i);
predictions[i]=c.classifyInstance(inst);
// if(predictions[i]<0)
// predictions[i]=0;
}
}
catch(Exception e)
{
System.out.println(" Error building with lambda = "+lambda);
}
//Assess quality of fit by SSE: Transformed or untransformed? Assume we have to
// turn it back
double SSE=0;
boolean f=true;
for(int i=0;i<predictions.length;i++)
{
predictions[i]*=lambda;
predictions[i]++;
if(predictions[i]<=0)
predictions[i]=0;
else
{
if(lambda>0)
predictions[i]=Math.pow(predictions[i],1.0/lambda);
else
predictions[i]=1/Math.pow(predictions[i],-1.0/lambda);
}
SSE+=(predictions[i]-response[i])*(predictions[i]-response[i]);
}
//Check whether minimum, and store
SSE/=(data.numInstances()-data.numAttributes());
System.out.println("lambda = "+lambda+"SSE ="+SSE);
if(SSE<minError)
{
minError=SSE;
bestLambda=lambda;
}
}
System.out.println("Min lambda = "+bestLambda+" with MSE = "+minError);
//Perform best transform
for(int i=0;i<response.length;i++)
{
inst=data.instance(i);
v=(Math.pow(response[i],bestLambda)-1)/bestLambda;
inst.setValue(responsePos,v);
}
return data;
}
public Instances invert(Instances data){
Instance inst;
int responsePos=data.numAttributes()-1;
double[] response=data.attributeToDoubleArray(responsePos);
double v;
for(int i=0;i<data.numInstances();i++)
{
inst=data.instance(i);
v=response[i]*bestLambda;
v++;
v=Math.pow(v,1/bestLambda);
inst.setValue(responsePos,v);
}
return data;
}
//Transform data based on values formed by calling transform on another data set
//Only needed for dependent variable transformations, for others does nothing
public Instances staticTransform(Instances data)
{
Instance inst;
int responsePos=data.numAttributes()-1;
double[] response=data.attributeToDoubleArray(responsePos);
double v;
for(int i=0;i<data.numInstances();i++)
{
inst=data.instance(i);
v=(Math.pow(response[i],bestLambda)-1)/bestLambda;
inst.setValue(responsePos,v);
}
return data;
}
public double[] invertPredictedResponse(double[] d)
{
double v;
for(int i=0;i<d.length;i++)
{
v=d[i]*bestLambda;
v++;
d[i]=Math.pow(v,1/bestLambda);
}
return d;
}
public static void main(String[] args)
{
double[] quantiles = Transformations.getNormalQuantiles(0.0,1.0);
for(int i=0;i<quantiles.length;i++)
System.out.println("Quantile "+i+" = "+quantiles[i]);
OutFile of = new OutFile("TestQuantiles.csv");
for(int i=0;i<quantiles.length;i++)
{
System.out.println(i+","+(i+1)/(double)quantiles.length+","+quantiles[i]);
of.writeLine(i+","+(i+1)/(double)quantiles.length+","+quantiles[i]);
}
}
}
| 7,001 | 30.972603 | 93 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/BoxTidwell.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* Created on Jan 30, 2006
*
* TODO To change the template for this generated file go to
* Window - Preferences - Java - Code Style - Code Templates
*/
package statistics.transformations;
import fileIO.OutFile;
import java.io.FileReader;
import weka.core.Instance;
import weka.core.Instances;
public class BoxTidwell {
public static Instances transformRegressor(Instances data, int pos,int resultPos, double[] powers)
{
//1. Get values of the attribute of interest.
//Confusingly, am working with attributes in rows not columns
double[] temp=data.attributeToDoubleArray(pos);
double[] originalData= new double[temp.length];
double[] logData= new double[temp.length];
for(int i=0;i<temp.length;i++)
{
originalData[i]=temp[i];
logData[i]=Math.log(temp[i]);
}
double[] y =data.attributeToDoubleArray(data.classIndex());
// I'm not sure if this is a memory copy or a reference copy, so be safe
double[][] transposeFirst = new double[data.numAttributes()][data.numInstances()];
double[][] transposeSecond = new double[data.numAttributes()+1][data.numInstances()];
for(int j=0;j<data.numInstances();j++)
{
transposeFirst[0][j]=transposeSecond[0][j]=1;
}
for(int i=1;i<data.numAttributes();i++)
{
transposeFirst[i]=transposeSecond[i]=data.attributeToDoubleArray(i-1);
}
// Add one to pos cos of the ones
pos=pos+1;
// Second has an attribute at the end of data for transform
int workingPos=data.numAttributes();
LinearModel l1,l2;
double alpha=1, b1,b2;
double min=0.1;
boolean finished=false;
int count=0;
final int MaxIterations=10;
// Initialise alpha to 1
//Find Base SSE
//While not termination condition
while(!finished)
{
// System.out.println(" Iteration = "+(count+1)+" alpha = "+alpha);
//Create new attributes
//1. Calculate x^alpha
for(int j=0;j<originalData.length;j++)
{
transposeSecond[pos][j]=transposeFirst[pos][j]=Math.pow(originalData[j],alpha);
}
//2. Fit y=b1+ .. b_pos x^alpha (+ other terms)-> get b_pos
l1=new LinearModel(transposeFirst,y);
l1.fitModel();
//Not necessary:
// l1.formTrainPredictions();
// l1.findTrainStatistics();
// System.out.println(l1+"\nVariance for L1 = "+l1.variance);
b1=l1.paras[pos];
//3. Fit y=b*1+ .. b*_pos x^alpha +b*_workingPos x^alpha*log(x) (+ other terms)-> get b*2
//2. Calculate x^alpha*log(x)
for(int j=0;j<originalData.length;j++)
transposeSecond[workingPos][j]=transposeFirst[pos][j]*logData[j];
l2=new LinearModel(transposeSecond,y);
l2.fitModel();
// Not necessary:
// l2.formTrainPredictions();
// l2.findTrainStatistics();
// System.out.println(l2+"\nVariance for L2 = "+l2.variance);
b2=l2.paras[workingPos];
alpha+=b2/b1;
//Work out change term alpha = b*2/b1+alpha0
// System.out.println("New Alpha ="+alpha+" b1 = "+b1+" b2 = "+b2);
//Update termination criteria: stop if small change: check notes
count++;
if(Math.abs(b2/b1)<min || count>=MaxIterations)
finished=true;
else if(Math.abs(alpha)>10)
{
alpha=1;
finished=true;
}
}
//Fix original
powers[resultPos]=alpha;
pos=pos-1;
Instance inst;
for(int i=0;i<data.numInstances();i++)
{
inst=data.instance(i);
inst.setValue(pos,Math.pow(originalData[i],alpha));
}
return data;
}
//First rows is all ones
//Last row is for transformed attribute
public static double transformRegressor(double[][] data, double[] response, int pos)
{
//1. Get values of the attribute of interest.
double[] temp=data[pos];
double[] originalData= new double[temp.length];
double[] transformedData= new double[temp.length];
double[] logData = new double[originalData.length];
for(int i=0;i<originalData.length;i++)
{
originalData[i]=temp[i];
logData[i]=Math.log(originalData[i]);
}
double[] y =response;
double[][] transposeFirst = new double[data.length][];
double[][] transposeSecond = new double[data.length+1][];
for(int j=0;j<data.length;j++)
transposeFirst[j]=transposeSecond[j]=data[j];
transposeFirst[pos]=transformedData;
transposeSecond[pos]=transformedData;
transposeSecond[data.length]=logData;
int workingPos=data.length;
LinearModel l1,l2;
double alpha=1, b1,b2;
double min=0.1;
boolean finished=false;
int count=0;
final int MaxIterations=10;
// Initialise alpha to 1
//Find Base SSE
//While not termination condition
while(!finished)
{
//Create new attributes
//1. Calculate x^alpha
for(int j=0;j<originalData.length;j++)
transformedData[j]=Math.pow(originalData[j],alpha);
//2. Fit y=b1+ .. b_pos x^alpha (+ other terms)-> get b_pos
l1=new LinearModel(transposeFirst,y);
l1.fitModel();
//Not necessary:
// l1.formTrainPredictions();
// l1.findTrainStatistics();
// System.out.println(l1+"\nVariance for L1 = "+l1.variance);
b1=l1.paras[pos];
//3. Fit y=b*1+ .. b*_pos x^alpha +b*_workingPos x^alpha*log(x) (+ other terms)-> get b*2
//2. Calculate x^alpha*log(x)
for(int j=0;j<originalData.length;j++)
transposeSecond[workingPos][j]=originalData[j]*logData[j];
l2=new LinearModel(transposeSecond,y);
l2.fitModel();
b2=l2.paras[workingPos];
alpha+=b2/b1;
//Work out change term alpha = b*2/b1+alpha0
//Update termination criteria: stop if small change: check notes
count++;
if(Math.abs(b2/b1)<min || count>=MaxIterations)
finished=true;
else if(Math.abs(alpha)>10)
{
alpha=1;
finished=true;
}
}
//Fix original
return alpha;
}
public static void main(String[] args)
{
Instances data=null;
try{
FileReader r = new FileReader("C:/Research/Code/Archive Generator/src/weka/addOns/BoxTidwellTest2.arff");
data = new Instances(r);
data.setClassIndex(data.numAttributes()-1);
}catch(Exception e)
{
System.out.println("Error loading file "+e);
}
double[] powers=new double[data.numAttributes()-1];
// data=transformRegressor(data,0,powers);
// data=transformRegressor(data,2,powers);
// data=transformRegressor(data,1,powers);
System.out.println(" Final powers =");
for(int i=0;i<powers.length;i++)
System.out.println(i+" ="+powers[i]);
OutFile r = new OutFile("C:/Research/Code/Archive Generator/src/weka/addOns/BoxTidwellResults2.arff");
r.writeLine(data.toString());
}
}
| 7,127 | 29.461538 | 108 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/EmptyTransform.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* Created on Jan 30, 2006
*
* TODO To change the template for this generated file go to
* Window - Preferences - Java - Code Style - Code Templates
*/
package statistics.transformations;
import weka.core.Instances;
/**
* @author ajb
*
* TODO To change the template for this generated type comment go to
* Window - Preferences - Java - Code Style - Code Templates
*/
public class EmptyTransform extends Transformations {
public Instances transform(Instances data) {
return data;
}
public Instances invert(Instances data) {
return data;
}
public Instances staticTransform(Instances data) {
return data;
}
public double[] invertPredictedResponse(double[] d) {
return d;
}
public static void main(String[] args) {
}
}
| 1,513 | 27.037037 | 76 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/Exponential.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.transformations;
import weka.core.*;
/* Implements a simple log transformation of the response variable
*
*
*/
public class Exponential extends Transformations{
double offSet=0;
static double zeroOffset=1;
public Exponential()
{
supervised=true;
response=true;
}
public Instances transform(Instances data){
//Not ideal, should call a method to get this
int responsePos=data.numAttributes()-1;
System.out.println(" Response Pos = "+responsePos);
double[] response=data.attributeToDoubleArray(responsePos);
//Find the min value
double min=response[0];
for(int i=0;i<response.length;i++)
{
if(response[i]<min)
min=response[i];
}
if(min<=zeroOffset) //Cant take a log of a negative, so offset
{
offSet=-min+zeroOffset;
}
else
offSet=0;
System.out.println(" Min value = "+min+" offset = "+offSet);
for(int i=0;i<data.numInstances();i++)
{
Instance t = data.instance(i);
double resp=t.value(responsePos);
System.out.print(i+" "+resp);
resp=Math.log(resp+offSet);
System.out.println(" "+resp);
t.setValue(responsePos,resp);
}
return data;
}
public Instances invert(Instances data){
int responsePos=data.numAttributes()-1;
for(int i=0;i<data.numInstances();i++)
{
Instance t = data.instance(i);
double resp=t.value(responsePos);
resp=Math.exp(resp);
resp-=offSet;
t.setValue(responsePos,resp);
}
return data;
}
public double[] invertPredictedResponse(double[] d){
for(int i=0;i<d.length;i++)
{
d[i]=Math.exp(d[i]);
d[i]-=offSet;
}
return d;
}
//Get quantile values for a transformed response, assuming
//Mean and variance of model
//Not relevant, only needed for st
public Instances staticTransform(Instances data){
return data;
}
}
| 2,552 | 25.05102 | 76 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/LinearModel.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Basic linear regression, including standardisation for residuals which isnt included
in the Jama ridge regression
* */
package statistics.transformations;
import fileIO.OutFile;
import java.io.FileReader;
import weka.core.Instances;
import weka.core.matrix.Matrix;
public class LinearModel {
double variance, standardisedError,SSE,SST,SSR, yBar;
Matrix Xt,X,XtXinv;
Matrix Y;
Matrix B;
double[] paras,y,H_Diagonal,predicted,residual,stdResidual;
int n,m;
//H is going to be big! X(XtX)-1Xt is nxn, so need to just generate diagonal terms
// X(XtX)-1 is nxm, so can just work the diagonals with Xt
public Matrix HatDiagonal;
//ASSUMES FIRST ROW IS ALL ONES IF CONSTANT TERM TO BE INCLUDED
//ATTRIBUTE FIRST Dirty hack
public LinearModel(double[][] data,double[] response)
{
m=data.length;
n=data[0].length;
y = response;
//This way round for consistency with other constructor
Xt=new Matrix(data);
// System.out.println("Xt = \n"+Xt);
X = Xt.transpose();
// System.out.println("X = \n"+X);
Y=new Matrix(y,y.length);
}
public LinearModel(Instances data)
{
//Form X and Y from Instances
n=data.numInstances();
m=data.numAttributes(); //includes the constant term
y = data.attributeToDoubleArray(data.classIndex());
Y=new Matrix(y,y.length);
double[][] xt = new double[m][n];
for(int i=0;i<n;i++)
xt[0][i]=1;
for(int i=1;i<m;i++)
xt[i]=data.attributeToDoubleArray(i-1);
Xt=new Matrix(xt);
X=Xt.transpose();
}
public double[] getY(){return y;}
public double[] getPredicted(){return predicted;}
public double[] getResiduals(){return stdResidual;}
public double getSSR(){return SSR;}
public void fitModel()
{
//B = (XtX)-1XtY
XtXinv=Xt.times(X);
XtXinv=XtXinv.inverse();
Matrix temp= XtXinv.times(Xt),t2,t3;
//B should be m x 1
B=temp.times(Y);
paras=B.getColumnPackedCopy();
H_Diagonal=new double[n];
// (XtX)-1Xt is mxn, so can just work the diagonals with Xt
double sum=0;
for(int i=0;i<n;i++)
{
t2=X.getMatrix(i,i,0,m-1);
t3=t2.transpose();
// System.out.println("Row mult t2 rows ="+t2.getRowDimension()+" columns = "+t2.getColumnDimension());
t3=XtXinv.times(t3);
t3=t2.times(t3);
H_Diagonal[i]=t3.get(0,0);
sum+=H_Diagonal[i];
}
}
public double findInverseStats(double l, double[] untransformed)
{
formTrainPredictions();
predicted=YeoJohnson.invert(l,predicted);
y=untransformed;
findTrainStatistics();
return variance;
}
public double findStats()
{
formTrainPredictions();
findTrainStatistics();
return variance;
}
public double[] formTrainPredictions()
{
predicted=new double[n];
for(int i=0;i<n;i++)
{
//Find predicted
predicted[i]=paras[0];
for(int j=1;j<paras.length;j++)
predicted[i]+=paras[j]*X.get(i,j);
}
return predicted;
}
public void findTrainStatistics()
{
SSE=0;
stdResidual=new double[n];
residual=new double[n];
yBar=0;
for(int i=0;i<n;i++)
{
residual[i]=(y[i]-predicted[i]);
SSE+=residual[i]*residual[i];
yBar+=y[i];
}
yBar/=n;
variance=SSE/(n-paras.length);
SST=0;
for(int i=0;i<n;i++)
SST+=(y[i]-yBar)*(y[i]-yBar);
SSR=SST-SSE;
double s= Math.sqrt(variance);
standardisedError=0;
for(int i=0;i<n;i++)
{
stdResidual[i]=residual[i]/(s*(Math.sqrt(1-H_Diagonal[i])));
standardisedError+=stdResidual[i]*stdResidual[i];
}
standardisedError/=(n-paras.length);
}
public double[] formTestPredictions(Instances testData)
{
//Form X matrix from testData
int rows=testData.numInstances();
int cols=testData.numAttributes(); //includes the constant term
predicted=new double[rows];
if(cols!=m)
{
System.out.println("Error: Mismatch in attribute lengths in form test Train ="+m+" Test ="+cols);
System.exit(0);
}
double[][] xt = new double[cols][rows];
for(int i=0;i<rows;i++)
xt[0][i]=1;
for(int i=1;i<cols;i++)
xt[i]=testData.attributeToDoubleArray(i-1);
Matrix testX=new Matrix(xt);
testX=testX.transpose();
for(int i=0;i<rows;i++)
{
//Find predicted
predicted[i]=paras[0];
for(int j=1;j<paras.length;j++)
predicted[i]+=paras[j]*testX.get(i,j);
}
return predicted;
}
public String toString()
{
String str="Paras : ";
for(int i=0;i<paras.length;i++)
str+=paras[i]+" ";
return str;
}
public static void main(String[] args) {
Instances data=null;
try{
FileReader r = new FileReader("C:/Research/Code/Archive Generator/src/weka/addOns/RegressionTest2.arff");
data = new Instances(r);
data.setClassIndex(data.numAttributes()-1);
}catch(Exception e)
{
System.out.println("Error loading file "+e);
}
LinearModel lm = new LinearModel(data);
lm.fitModel();
lm.formTrainPredictions();
lm.findTrainStatistics();
OutFile f = new OutFile("C:/Research/Code/Archive Generator/src/weka/addOns/TestResults.csv");
f.writeLine("Parameters");
for(int i=0;i<lm.paras.length;i++)
f.writeString(lm.paras[i]+",");
f.writeLine("Variance = "+lm.variance);
f.writeLine("\nHatDiagonal, Actual, Predicted, StdResidual");
for(int i=0;i<lm.n;i++)
f.writeLine(lm.H_Diagonal[i]+","+lm.y[i]+","+lm.predicted[i]+","+lm.stdResidual[i]);
}
public double getSSE() {
throw new UnsupportedOperationException("Not yet implemented");
}
}
| 6,066 | 25.845133 | 108 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/MatrixSort.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.transformations;
/*
* Created on Jan 31, 2006
*
* TODO To change the template for this generated file go to
* Window - Preferences - Java - Code Style - Code Templates
*/
/**
* @author ajb
*
* TODO To change the template for this generated type comment go to
* Window - Preferences - Java - Code Style - Code Templates
*/
public class MatrixSort implements Comparable {
public double[] x;
public double y;
public int pos=0;
public MatrixSort(double[] X, double Y, int p)
{
x=X;
y=Y;
pos=p;
}
public int compareTo(Object c) {
if(this.x[pos]>((MatrixSort)c).x[pos])
return 1;
else if(this.x[pos]<((MatrixSort)c).x[pos])
return -1;
return 0;
}
public static void main(String[] args) {
}
}
| 1,511 | 26.490909 | 76 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/PCA.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* Created on Jan 30, 2006
*
*/
package statistics.transformations;
import java.io.FileReader;
import fileIO.*;
import weka.core.*;
import weka.attributeSelection.PrincipalComponents;
public class PCA extends Transformations {
double varianceCovered=1;
public void setVariance(double v){
varianceCovered=v;
}
PrincipalComponents pca=new PrincipalComponents();
public Instances transform(Instances data){
Instances newData=data;
try{
pca.setVarianceCovered(varianceCovered);
pca.buildEvaluator(data);
newData=pca.transformedData(data);
}catch(Exception e)
{
System.out.println(" Error = "+e);
System.exit(0);
}
//Build the transformation
//Add the response back in
return newData;
}
//Generally dont need this
public Instances invert(Instances data){
return data;
}
public Instances staticTransform(Instances data){
Instance inst=null;
Instances newData=null;
try{
newData=pca.transformedHeader();
for(int i=0;i<data.numInstances();i++)
{
inst=pca.convertInstance(data.instance(i));
newData.add(inst);
}
}catch(Exception e)
{
System.out.println(" Error in convert "+e);
System.out.println(" instance ="+inst);
System.exit(0);
}
return newData;
}
//PCA Leaves the response the same
public double[] invertPredictedResponse(double[] d){
return d;
}
public static void main(String[] args){
PCA p= new PCA();
Instances data;
FileReader r;
try{
r= new FileReader("C:/Research/Data/Gavin Competition/Weka Files/SO2Combined.arff");
// r= new FileReader("C:/Research/Data/Gavin Competition/Weka Files/Temp Train.arff");
data = new Instances(r);
data.setClassIndex(data.numAttributes()-1);
p.varianceCovered=0.95;
data=p.transform(data);
System.out.println(" New attribute size = "+data.numAttributes());
OutFile of= new OutFile("C:/Research/Data/Gavin Competition/Weka Files/SO2CombinedTransformed.arff");
of.writeLine(data.toString());
r= new FileReader("C:/Research/Data/Gavin Competition/Weka Files/PrecipCombined.arff");
// r= new FileReader("C:/Research/Data/Gavin Competition/Weka Files/Temp Train.arff");
data = new Instances(r);
data.setClassIndex(data.numAttributes()-1);
p.varianceCovered=0.95;
data=p.transform(data);
System.out.println(" New attribute size = "+data.numAttributes());
of= new OutFile("C:/Research/Data/Gavin Competition/Weka Files/PrecipCombinedTransformed.csv");
of.writeLine(data.toString());
}catch(Exception e)
{
System.out.println(" Error in PCA "+e);
}
}
}
| 3,889 | 33.732143 | 117 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/PowerSearch.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* Created on Jan 30, 2006
*
* TODO To change the template for this generated file go to
* Window - Preferences - Java - Code Style - Code Templates
*/
package statistics.transformations;
import java.io.FileReader;
import fileIO.*;
import weka.core.*;
import weka.attributeSelection.PrincipalComponents;
public class PowerSearch {
public static double MIN=-4, MAX=4, INCREMENT=0.125;
//First rows is all ones
//Last row is for transformed attribute
public static double transformRegressor(double[][] data, double[] response, int pos)
{
//1. Get values of the attribute of interest.
double[] originalData= new double[data[pos].length];
double[] transformedData= new double[originalData.length];
for(int i=0;i<originalData.length;i++)
{
originalData[i]=data[pos][i];
// if(i<10)
// System.out.println(" data "+i+" = "+data[pos][i]+" response = "+response[i]);
}
data[pos]=transformedData;
LinearModel l;
double alpha, s,minAlpha=0,minSSE=Double.MAX_VALUE;
for(alpha=MIN;alpha<=MAX;alpha+=INCREMENT)
{
if(alpha==0)
{
for(int j=0;j<originalData.length;j++)
transformedData[j]=Math.log(originalData[j]);
}
else
{
for(int j=0;j<originalData.length;j++)
transformedData[j]=Math.pow(originalData[j],alpha);
}
l=new LinearModel(data,response);
l.fitModel();
s=l.findStats();
// System.out.println(" Alpha = "+alpha+" SSE = "+s);
if(s<minSSE)
{
minAlpha=alpha;
minSSE=s;
}
}
if(minAlpha==MIN || minAlpha==MAX)
minAlpha=1;
return minAlpha;
}
public static double[] transform(double[] x, double power)
{
double[] newX= new double[x.length];
for(int i=0;i<x.length;i++)
newX[i]=Math.pow(x[i],power);
return newX;
}
public static void main(String[] args)
{
Instances data=null;
try{
FileReader r = new FileReader("C:/Research/Code/Archive Generator/src/weka/addOns/BoxTidwellTest2.arff");
data = new Instances(r);
data.setClassIndex(data.numAttributes()-1);
}catch(Exception e)
{
System.out.println("Error loading file "+e);
}
double[] powers=new double[data.numAttributes()-1];
// data=transformRegressor(data,0,powers);
// data=transformRegressor(data,2,powers);
// data=transformRegressor(data,1,powers);
System.out.println(" Final powers =");
for(int i=0;i<powers.length;i++)
System.out.println(i+" ="+powers[i]);
OutFile r = new OutFile("C:/Research/Code/Archive Generator/src/weka/addOns/BoxTidwellResults2.arff");
r.writeLine(data.toString());
}
}
| 3,288 | 28.106195 | 108 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/Reciprocal.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.transformations;
import weka.core.Instance;
import weka.core.Instances;
public class Reciprocal extends Transformations {
double offSet=0;
static double zeroOffset=1;
public Reciprocal()
{
supervised=true;
response=true;
}
public Instances transform(Instances data){
//Not ideal, should call a method to get this
int responsePos=data.numAttributes()-1;
double[] response=data.attributeToDoubleArray(responsePos);
//Find the min value
double min=response[0];
for(int i=0;i<response.length;i++)
{
if(response[i]<min)
min=response[i];
}
if(min<=zeroOffset) //Cant take a log of a negative, so offset
{
offSet=-min+zeroOffset;
}
else
offSet=0;
System.out.println(" Min value = "+min+" offset = "+offSet);
for(int i=0;i<data.numInstances();i++)
{
Instance t = data.instance(i);
double resp=t.value(responsePos);
System.out.print(i+" "+resp);
resp=1/(resp+offSet);
System.out.println(" "+resp);
t.setValue(responsePos,resp);
}
return data;
}
public Instances invert(Instances data){
int responsePos=data.numAttributes()-1;
for(int i=0;i<data.numInstances();i++)
{
Instance t = data.instance(i);
double resp=t.value(responsePos);
resp=1/resp;
resp-=offSet;
t.setValue(responsePos,resp);
}
return data;
}
public double[] invertPredictedResponse(double[] d){
for(int i=0;i<d.length;i++)
{
d[i]=Math.exp(d[i]);
d[i]-=offSet;
}
return d;
}
//Get quantile values for a transformed response, assuming
//Mean and variance of model
//Not relevant, only needed for st
public Instances staticTransform(Instances data){
return data;
}
public static void main(String[] args) {
}
}
| 2,962 | 29.546392 | 76 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/Transformations.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.transformations;
import statistics.distributions.NormalDistribution;
import weka.core.*;
// Null lass does not change anything
abstract public class Transformations {
//Transform data dependent on the
boolean supervised=false;
boolean response=false;
public boolean isSupervised(){return supervised;}
public boolean isResponseTransform(){return response;}
abstract public Instances transform(Instances data);
abstract public Instances invert(Instances data);
//Transform data based on values formed by calling transform on another data set
//Only needed for dependent variable transformations, for others does nothing
abstract public Instances staticTransform(Instances data);
abstract public double[] invertPredictedResponse(double[] d);
public double[] invertPredictedQuantiles(double[] d)
{
return invertPredictedResponse(d);
}
public static int width=100;
static public void setWidth(int w){width=w;}
static public double[] getNormalQuantiles(double mean,double variance)
{
double[] q= new double[width];
NormalDistribution norm = new NormalDistribution(mean,Math.sqrt(variance));
for(int i=0;i<width;i++)
q[i]=norm.getQuantile((i+1)/(double)width);
return q;
}
static public double[] getNormalQuantiles(double[] standard, double mean,double variance)
{
double stDev=Math.sqrt(variance);
double[] q= new double[standard.length];
for(int i=0;i<standard.length;i++)
q[i]=standard[i]*stDev+mean;
return q;
}
public double[] findResiduals(double[] actual, double[] fitted)
{
if(actual.length!=fitted.length)
{
System.out.println(" Error, mismatched lengths in findResiduals");
System.exit(0);
}
double[] res= new double[actual.length];
for(int i=0;i<res.length;i++)
res[i]=actual[i]-fitted[i];
return res;
}
static public Instances powerTransform(Instances data, int[] pos, double[] powers)
{
Instance inst;
int p=data.classIndex();
for(int i=0;i<data.numInstances();i++)
{
inst=data.instance(i);
for(int j=0;j<pos.length;j++)
{
if(pos[j]!=p)
{
if(powers[j]!=0)
inst.setValue(pos[j], Math.pow(inst.value(j),powers[j]));
else
inst.setValue(pos[j], Math.log(inst.value(j)));
}
}
}
return data;
}
}
| 3,098 | 30.622449 | 90 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/VarianceStabalisingStepwiseRegression.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package statistics.transformations;
import fileIO.*;
import statistics.tests.ResidualTests;
public class VarianceStabalisingStepwiseRegression {
static int m,n;
static double[][] X;
static double[][] transformedX;
static double[][] workingX;
static double[] Y;
static double[] transformedY;
static double[] powers;
static boolean[] included;
static boolean[] transformIncluded;
static int[] positions;
static int size=0;
static int[] transformedPositions;
static int transSize=0;
static final double CRITICAL=7;
//0.823;
public static void main(String[] args) {
//Each one should load X and Y scaling if necessary
OutFile f = new OutFile("C:/Research/Data/Gavin Competition/Results/StepwiseTransformationPowerResults.csv");
f.writeLine("Synthetic,MSE,AP,KS,RT,RQ");
int choice=2;
Synthetic();
if(choice==0)
fullModel(f);
else if (choice==1)
stepwiseLinear(f);
else
forwardSelectTransform(f);
/* System.out.println(" Starting Temp Full ...");
f.writeLine("Temp Full");
Temperature();
if(choice==0)
fullModel(f);
else if (choice==1)
stepwiseLinear(f);
else
forwardSelectTransform(f);
*/ System.out.println(" Starting Temp Reduced ...");
f.writeLine("Temp Reduced");
TemperatureReduced();
if(choice==0)
fullModel(f);
else if (choice==1)
stepwiseLinear(f);
else
forwardSelectTransform(f);
System.out.println(" Starting SO2 ...");
f.writeLine("SO2");
SO2();
if(choice==0)
fullModel(f);
else if (choice==1)
stepwiseLinear(f);
else
forwardSelectTransform(f);
System.out.println(" Starting SO2 Reduced...");
f.writeLine("SO2 Reduced");
SO2Reduced();
if(choice==0)
fullModel(f);
else if (choice==1)
stepwiseLinear(f);
else
forwardSelectTransform(f);
/* System.out.println(" Starting Precip...");
f.writeLine("Precip");
Precip();
if(choice==0)
fullModel(f);
else if (choice==1)
stepwiseLinear(f);
else
forwardSelectTransform(f);
System.out.println(" Starting Precip Reduced...");
*/ f.writeLine("Precip Reduced");
PrecipReduced();
if(choice==0)
fullModel(f);
else if (choice==1)
stepwiseLinear(f);
else
forwardSelectTransform(f);
}
public static void Synthetic() {
m=1;
n=256;
int n2=128;
int synthScale=5;
String path="C:/Research/Data/Gavin Competition/Synthetic/";
String p1="Synthetic Train.csv";
String p2="Synthetic Validate.csv";
boolean finished=false;
int attCount=0, c;
double var,minVar=Double.MAX_VALUE,newVar, oldSSR, newSSR;
X=new double[m+1][n];
Y=new double[n];
transformedY=new double[n];
InFile f = new InFile(path+p1);
for(int i=0;i<n;i++)
X[0][i]=1;
for(int i=0;i<n;i++)
{
for(int j=1;j<=m;j++)
X[j][i]=f.readDouble()+synthScale;
Y[i]=f.readDouble();
}
//1: Fit linear model, estimate S^2,
}
public static void TemperatureReduced() {
m=20;
n=7117;
double tempScale=10;
String path="C:/Research/Data/Gavin Competition/Temperature/TempTransformed Train.csv";
// String path="C:/Research/Data/Gavin Competition/Temperature/TempTraining.csv";
//Attributes to remove
int[] collinear= {1,3,4,5,6,7,20,34,35,36,47,48,72,82};
boolean finished=false;
int attCount=0, c;
double var,minVar=Double.MAX_VALUE,newVar, oldSSR, newSSR;
X=new double[m+1][n];
Y=new double[n];
transformedY=new double[n];
InFile f = new InFile(path);
for(int i=0;i<n;i++)
X[0][i]=1;
for(int i=0;i<n;i++)
{
for(int j=1;j<=m;j++)
X[j][i]=f.readDouble()+tempScale;
Y[i]=f.readDouble();
}
int c1=0,c2=0;
}
public static void Temperature() {
m=106;
n=7117;
double tempScale=10;
String path="C:/Research/Data/Gavin Competition/Temperature/TempTraining.csv";
//Attributes to remove
int[] collinear= {1,3,4,5,6,7,20,34,35,36,47,48,72,82};
boolean finished=false;
int attCount=0, c;
double var,minVar=Double.MAX_VALUE,newVar, oldSSR, newSSR;
X=new double[m+1][n];
Y=new double[n];
transformedY=new double[n];
InFile f = new InFile(path);
for(int i=0;i<n;i++)
X[0][i]=1;
for(int i=0;i<n;i++)
{
for(int j=1;j<=m;j++)
X[j][i]=f.readDouble()+tempScale;
Y[i]=f.readDouble();
}
int c1=0,c2=0;
double[][] reducedX=new double[m+1-collinear.length][];
for(int i=0;i<=m;i++)
{
if(c1>=collinear.length || i!=collinear[c1])
{
reducedX[c2]=X[i];
c2++;
}
else
c1++;
}
X=reducedX;
m=reducedX.length-1;
}
public static void SO2() {
m=26;
n=15304;
int so2Scale=10;
String path="C:/Research/Data/Gavin Competition/SO2/";
String p1="SO2Train.csv";
boolean finished=false;
int attCount=0, c;
double var,minVar=Double.MAX_VALUE,newVar, oldSSR, newSSR;
X=new double[m+1][n];
Y=new double[n];
transformedY=new double[n];
InFile f = new InFile(path+p1);
for(int i=0;i<n;i++)
X[0][i]=1;
for(int i=0;i<n;i++)
{
for(int j=1;j<=m;j++)
X[j][i]=f.readDouble()+so2Scale;
Y[i]=f.readDouble();
}
}
public static void SO2Reduced() {
m=19;
n=15304;
int so2Scale=10;
String path="C:/Research/Data/Gavin Competition/SO2/";
String p1="SO2TrainReduced.csv";
boolean finished=false;
int attCount=0, c;
double var,minVar=Double.MAX_VALUE,newVar, oldSSR, newSSR;
X=new double[m+1][n];
Y=new double[n];
transformedY=new double[n];
InFile f = new InFile(path+p1);
for(int i=0;i<n;i++)
X[0][i]=1;
for(int i=0;i<n;i++)
{
for(int j=1;j<=m;j++)
X[j][i]=f.readDouble()+so2Scale;
Y[i]=f.readDouble();
}
}
public static void Precip() {
m=106;
n=7031;
int precipScale=6;
String path="C:/Research/Data/Gavin Competition/Precipitation/";
String p1="PrecipitationTrain.csv";
boolean finished=false;
int attCount=0, c;
double var,minVar=Double.MAX_VALUE,newVar, oldSSR, newSSR;
X=new double[m+1][n];
Y=new double[n];
transformedY=new double[n];
InFile f = new InFile(path+p1);
for(int i=0;i<n;i++)
X[0][i]=1;
for(int i=0;i<n;i++)
{
for(int j=1;j<=m;j++)
X[j][i]=f.readDouble()+precipScale;
Y[i]=f.readDouble();
}
}
public static void PrecipReduced() {
m=20;
n=7031;
int precipScale=6;
String path="C:/Research/Data/Gavin Competition/Precipitation/";
String p1="PrecipTrainReduced.csv";
boolean finished=false;
int attCount=0, c;
double var,minVar=Double.MAX_VALUE,newVar, oldSSR, newSSR;
X=new double[m+1][n];
Y=new double[n];
transformedY=new double[n];
InFile f = new InFile(path+p1);
for(int i=0;i<n;i++)
X[0][i]=1;
for(int i=0;i<n;i++)
{
for(int j=1;j<=m;j++)
X[j][i]=f.readDouble()+precipScale;
Y[i]=f.readDouble();
}
}
static public void findStats(OutFile f2, LinearModel lm)
{
double s = lm.findStats();
double[] resids=lm.getResiduals();
double[] pred=lm.getPredicted();
double ap=ResidualTests.anscombeProcedure(pred,resids);
double ks=ResidualTests.kolmogorovSmirnoff(resids);
double rt=ResidualTests.runsTest(pred,resids);
double gq=ResidualTests.goldfeldQuandt(X,Y,1);
System.out.println("YJ, s^2 = "+s+", AP = "+ap+", KS = "+ks+", RT = "+rt+", GQ = "+gq);
f2.writeLine("FullReg,"+s+","+ap+","+ks+","+rt+","+gq);
}
public static void fullModel(OutFile f2)
{
LinearModel lm = new LinearModel(X,Y);
lm.fitModel();
findStats(f2,lm);
//2. Fit YJ
double best = YeoJohnson.findBestTransform(X,Y);
System.out.println(" Best Transform = "+best);
double[] newY = YeoJohnson.transform(best,Y);
lm = new LinearModel(X,newY);
lm.fitModel();
f2.writeLine("YJ Transform");
findStats(f2,lm);
}
public static void stepwiseLinear(OutFile f2)
{
boolean finished=false;
included=new boolean[m+1];
int attCount=0;
double var,oldSSR,newSSR,newVar;
//This is going to record whether the base values are included, not the transformed
included[0]=true; //Always include constant
for(int i=1;i<=m;i++)
included[i]=false;
positions=new int[m+1];
while(!finished)
{
attCount=formatRegressors();
//Fit linear model with current candidates
LinearModel lm = new LinearModel(workingX,Y);
lm.fitModel();
var=lm.findStats();
oldSSR=lm.getSSR();
// System.out.println(" Att Count = "+attCount+" SSE plain Y = "+var);
//Try adding in each variable in position attCount, record best improvement
//Fitting one more model the necessary, but makes code clearer
//At the end, workingX should have the new candidate in position attCount
//Returns the POSITION IN X
// int bestPos=findBestAdditionTransformed(attCount);
int bestPos=findBestAddition(attCount);
attCount++;
//If improvement significant, add in permanently by setting flag, otherwise dont
lm = new LinearModel(workingX,Y);
lm.fitModel();
newVar=lm.findStats();
newSSR=lm.getSSR();
System.out.println(" Verification: New Var = "+newVar);
// System.out.println("SSR Change = "+(newSSR-oldSSR));
// System.out.println("Test stat = "+(newSSR-oldSSR)/var);
if((newSSR-oldSSR)/var>CRITICAL)
{
System.out.println("ADDING = "+bestPos);
included[bestPos]=true;
positions[attCount-1]=bestPos;
size=attCount;
}
else
{
System.out.println("NOT ADDING = "+bestPos);
included[bestPos]=false;
finished=true;
}
if(attCount==m+1)
finished=true;
// Try removing any already in the model, if no significant worsening, remove
int worst;
if(attCount>3)
{
worst=tryRemovals(X[bestPos],newVar, newSSR);
if(worst!=-1)
{
System.out.println(" Removing Element > "+worst);
included[worst]=false;
int x=0;
while(x<attCount)
{
if(positions[x]==worst)
{
while(x<attCount-1)
{
positions[x]=positions[x+1];
x++;
}
}
x++;
}
attCount--;
attCount=formatRegressors();
}
}
}
//Get full daignositics on final model
attCount=formatRegressors();
//Fit linear model with current candidates
LinearModel lm = new LinearModel(workingX,Y);
lm.fitModel();
findStats(f2,lm);
}
static int count=0;
public static void forwardSelectTransform(OutFile f2)
{
boolean finished=false, useYJ=false;
double bestLambda=1,temp;
included=new boolean[m+1];
int attCount=0;
LinearModel lm;
double var,oldSSR,newSSR,newVar;
//This is going to record whether the base values are included, not the transformed
included[0]=true; //Always include constant
for(int i=1;i<=m;i++)
included[i]=false;
positions=new int[m+1];
powers=new double[m+1];
while(!finished)
{
attCount=formatRegressors();
//Fit linear model with current candidates
lm = new LinearModel(workingX,Y);
lm.fitModel();
var=lm.findStats();
oldSSR=lm.getSSR();
// System.out.println(" Att Count = "+attCount+" SSE plain Y = "+var);
//Try adding in each variable in position attCount, record best improvement
//Fitting one more model the necessary, but makes code clearer
//At the end, workingX should have the new candidate in position attCount
//Returns the POSITION IN X
int bestPos=findBestAdditionTransformed(attCount);
// int bestPos=findBestAddition(attCount);
attCount++;
//If improvement significant, add in permanently by setting flag, otherwise dont
lm = new LinearModel(workingX,Y);
lm.fitModel();
newVar=lm.findStats();
newSSR=lm.getSSR();
System.out.println(" Verification: New Var = "+newVar);
// System.out.println("SSR Change = "+(newSSR-oldSSR));
// System.out.println("Test stat = "+(newSSR-oldSSR)/var);
if((newSSR-oldSSR)/var>CRITICAL)
{
System.out.println("ADDING = "+bestPos);
included[bestPos]=true;
positions[attCount-1]=bestPos;
size=attCount;
}
else
{
System.out.println("NOT ADDING = "+bestPos);
included[bestPos]=false;
finished=true;
}
if(attCount==m+1)
finished=true;
attCount=formatRegressors();
//Yeo Johnson first
System.out.println(" TRY YJ: ");
bestLambda=YeoJohnson.findBestTransform(workingX,Y);
// Round to the nearest 0.5
temp=((double)Math.round(bestLambda*2))/2;
double alpha=1;
System.out.println("Best Lambda value ="+bestLambda+" Rounded = "+temp);
int p=0;
useYJ=false;
if(temp!=1)
{
transformedY=YeoJohnson.transform(temp,Y);
lm=new LinearModel(workingX,transformedY);
lm.fitModel();
double s=lm.findInverseStats(temp,Y);
useYJ=true;
System.out.println("s = "+s);
}
}
//Get full daignositics on final model
attCount=formatRegressors();
//Fit linear model with current candidates
if(useYJ)
{
temp=((double)Math.round(bestLambda*2))/2;
transformedY=YeoJohnson.transform(temp,Y);
lm = new LinearModel(workingX,transformedY);
}
else
lm = new LinearModel(workingX,Y);
lm.fitModel();
findStats(f2,lm);
OutFile f3 = new OutFile("TestTrans"+count+".csv");
count++;
for(int i=0;i<powers.length;i++)
f3.writeString(powers[i]+",");
for(int j=0;j<X[0].length;j++)
{
for(int i=0;i<X.length;i++)
f3.writeString(X[i][j]+",");
f3.writeString("\n");
}
}
public static void transformCode()
{
/* //Simple parameter search on variable just entered
attCount=formatRegressors();
int p=0;
double alpha;
while(p<size && positions[p]!=bestPos) p++;
System.out.println("p = "+p+" Pos = "+positions[p]);
if(p<size)
{
alpha=PowerSearch.transformRegressor(workingX,Y,p);
System.out.println("Alpha = "+alpha);
alpha =((double) Math.round(alpha*2))/2.0;
System.out.println("Rounded Alpha = "+alpha);
if(alpha==0)
{
for(int i=0;i<X[bestPos].length;i++)
X[bestPos][i]=Math.log(X[bestPos][i]);
}
else if (alpha!=1)
{
for(int i=0;i<X[bestPos].length;i++)
X[bestPos][i]=Math.pow(X[bestPos][i],alpha);
}
}
//
// First effort, try YJ and B-T on newly entered
// Try Transformations, not going to bother Y with TEMP
attCount=formatRegressors();
//Yeo Johnson first
System.out.println(" Nos atts = "+attCount);
double bestLambda=YeoJohnson.findBestTransform(workingX,Y);
//Round to the nearest 0.5
double temp=((double)Math.round(bestLambda*2))/2;
double alpha=1;
System.out.println("Best Lambda value ="+bestLambda+" Rounded = "+temp);
int p=0;
boolean yo=false;
if(temp!=1)
{
transformedY=YeoJohnson.transform(temp,Y);
lm=new LinearModel(workingX,transformedY);
lm.fitModel();
double s=lm.findInverseStats(temp,Y);
yo=true;
System.out.println("s = "+s);
}
while(p<size && positions[p]!=bestPos) p++;
System.out.println("p = "+p+" Pos = "+positions[p]);
if(p<size)
{
if(yo)
alpha=BoxTidwell.transformRegressor(workingX,transformedY,p);
else
alpha=BoxTidwell.transformRegressor(workingX,Y,p);
}
System.out.println("************* ALPHA = "+alpha);
*/
}
//Calculate SSR of removing each one except a, then only remove if not significantly worse
public static int tryRemovals(double[] inData, double var, double fullSSR)
{
int worst=0,outPos;
double worstSSR=Double.MAX_VALUE,ssr,s;
//Swap new one into position 0, always include
LinearModel lm;
double[][] temp = new double[size-1][];
temp[0]=workingX[0];
temp[1]=inData;
double[] out=workingX[1];
double[] t;
int[] tempPos=new int[size-1];
int tempOutPos,a;
tempPos[0]=0;
tempOutPos=positions[1];
tempPos[1]=positions[size-1];
// for(int i=0;i<size;i++)
// System.out.println(" Position = "+positions[i]);
System.out.println(" size = "+size);
for(int i=2;i<size-1;i++)
{
temp[i]=workingX[i];
tempPos[i]=positions[i];
}
// System.out.println(" Removing element "+tempOutPos);
// for(int i=0;i<size-1;i++)
// System.out.println(" Temp position = "+tempPos[i]);
int i=2;
do{
//Fit reduced model
lm=new LinearModel(temp,Y);
lm.fitModel();
//Find new SSR, record the largest of reduced
s=lm.findStats();
ssr=lm.getSSR();
// System.out.println(" SSR when removing "+(i-1)+ " which has original position "+positions[(i-1)]+" is = "+ssr+" with s^2="+s);
if(ssr<worstSSR)
{
worstSSR=ssr;
worst=i-1;
}
//Swap attribute in and out if done
if(i<size-1)
{
a=tempOutPos;
tempOutPos=tempPos[i];
// System.out.println(" Removing element "+tempOutPos+" Adding element "+a+" back in");
tempPos[i]=a;
// for(int j=0;j<size-1;j++)
// System.out.println(" Temp position = "+tempPos[j]);
t=temp[i];
temp[i]=out;
out=t;
}
i++;
}while(i<size);
//Test worst, if not significant, return position.
//NOTE that the position in the ORIGINAL data recorded by positions[worst]
outPos=positions[worst];
if(((fullSSR-worstSSR)/var)<CRITICAL)
return outPos;
return -1;
}
public static int findBestAdditionTransformed(int a)
{
int best=-1;
double[][] temp = new double[a+1][];
LinearModel lmTemp;
double minSSE=Double.MAX_VALUE,s,bestPower=1;
System.arraycopy(workingX, 0, temp, 0, a);
for(int i=0;i<included.length;i++)
{
if(!included[i])
{
temp[a]=X[i];// new double[X[i].length];
// for(int j=0;j<X[i].length;j++)
// temp[a][j]=X[i][j];
double power=PowerSearch.transformRegressor(temp,Y,a);
if(power!=1)
temp[a]=PowerSearch.transform(X[i],power);
lmTemp=new LinearModel(temp,Y);
lmTemp.fitModel();
s=lmTemp.findStats();
// System.out.println(" Adding in attribute = "+i+" with power = "+power+" New MSE = "+s);
if(s<minSSE)
{
minSSE=s;
best=i;
bestPower=power;
}
}
}
if(best>0) //Shouldnt be 0
{
System.out.println(" BEST to add = "+best+" with power = "+bestPower+" MSE = "+minSSE);
powers[best]=bestPower;
temp[a]=PowerSearch.transform(X[best],bestPower);
X[best]=temp[a];
workingX=temp;
}
return best;
}
public static int findBestAddition(int a)
{
int best=-1;
double[][] temp = new double[a+1][];
LinearModel lmTemp;
double minSSE=Double.MAX_VALUE,s;
System.arraycopy(workingX, 0, temp, 0, a);
for(int i=0;i<included.length;i++)
{
if(!included[i])
{
temp[a]=X[i];
lmTemp=new LinearModel(temp,Y);
lmTemp.fitModel();
s=lmTemp.findStats();
// System.out.println(" Adding in attribute = "+i+" New MSE = "+s);
if(s<minSSE)
{
minSSE=s;
best=i;
}
}
}
if(best>0) //Shouldnt be 0
{
System.out.println(" BEST to add = "+best+" with MSE = "+minSSE);
temp[a]=X[best];
workingX=temp;
}
return best;
}
public static int formatRegressors()
{
int attCount=0;
for(int i=0;i<included.length;i++)
if(included[i]) attCount++;
workingX= new double[attCount][];
int c=0;
for(int i=0;i<included.length;i++)
{
if(included[i])
{
workingX[c]=X[i];
positions[c]=i;
c++;
}
}
return attCount;
}
}
| 19,464 | 25.162634 | 131 | java |
tsml-java | tsml-java-master/src/main/java/statistics/transformations/YeoJohnson.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* Created on Jan 29, 2006
*
* TODO To change the template for this generated file go to
* Window - Preferences - Java - Code Style - Code Templates
*/
package statistics.transformations;
import fileIO.OutFile;
import statistics.tests.ResidualTests;
import weka.core.Instance;
import weka.core.Instances;
/**
* @author ajb
*
* TODO To change the template for this generated type comment go to
* Window - Preferences - Java - Code Style - Code Templates
*/
public class YeoJohnson extends BoxCox{
public YeoJohnson(double r)
{
super();
bestLambda=r;
}
//Performs a specific B-C transform on the response variable, overwriting original
@Override
public Instances invert(Instances data){
Instance inst;
int responsePos=data.numAttributes()-1;
double[] response=data.attributeToDoubleArray(responsePos);
double v;
double[] newVals=invert(bestLambda,response);
for(int i=0;i<data.numInstances();i++)
{
inst=data.instance(i);
inst.setValue(responsePos,newVals[i]);
}
return data;
}
@Override
public double[] invertPredictedResponse(double[] d)
{
return invert(bestLambda,d);
}
static public Instances invertResponse(Instances data, double lambda){
Instance inst;
int responsePos=data.classIndex();
double[] response=data.attributeToDoubleArray(responsePos);
double v;
for(int i=0;i<response.length;i++)
{
inst=data.instance(i);
if(response[i]<0)
{
if(lambda!=2)
v=-(Math.pow((1-response[i]),2-lambda)-1)/(2-lambda);
else
v=-Math.log(1-response[i]);
}
else
{
if(lambda==0)
v=Math.log(1+response[i]);
else
v=(Math.pow(response[i]+1,lambda)-1)/lambda;
}
inst.setValue(responsePos,v);
}
return data;
}
static public Instances transformResponse(Instances data, double lambda)
{
transformResponse(data,lambda,data.attributeToDoubleArray(data.classIndex()));
return data;
}
public static double[] invert(double lambda, double[] response)
{
double[] data=new double[response.length];
for(int i=0;i<response.length;i++)
{
if(response[i]<0)
{
if(lambda!=2)
{
// data[i]=-(Math.pow((1-response[i]),2-lambda)-1)/(2-lambda);
//Need to check whether 1.0/(2.0-lambda) is negative I think, as it doesnt work wth power?
data[i]=1-Math.pow((1-(2-lambda)*response[i]),1.0/(2.0-lambda));
}
else
// data[i]=-Math.log(1-response[i]);
data[i]=1-Math.exp(-response[i]);
}
else
{
if(lambda==0)
// data[i]=Math.log(1+response[i]);
data[i]=Math.exp(response[i])-1;
else
// data[i]=(Math.pow(response[i]+1,lambda)-1)/lambda;
{//HACK
if(lambda*response[i]+1>0.000001)
data[i]=Math.pow((lambda*response[i]+1),1.0/lambda)-1;
else if(i==0)
data[i]=Math.pow((0.000001),1.0/lambda)-1;
else
data[i]=1.05*data[i-1];
}
}
if(data[i]==Double.NaN)
{
System.out.println("NAN in invert: Response = "+response[i]+" lambda = "+lambda);
System.exit(0);
}
}
return data;
}
public static double[] transform(double lambda, double[] response)
{
double[] data=new double[response.length];
for(int i=0;i<response.length;i++)
{
if(response[i]<0)
{
if(lambda!=2)
data[i]=-(Math.pow((1-response[i]),2-lambda)-1)/(2-lambda);
else
data[i]=-Math.log(1-response[i]);
}
else
{
if(lambda==0)
data[i]=Math.log(1+response[i]);
else
data[i]=(Math.pow(response[i]+1,lambda)-1)/lambda;
}
if(data[i]==Double.NaN)
{
System.out.println("NAN in transform: Response = "+response[i]+" lambda = "+lambda);
System.exit(0);
}
}
return data;
}
static public Instances transformInstances(Instances data, double lambda)
{
transformResponse(data,lambda,data.attributeToDoubleArray(data.classIndex()));
return data;
}
static public void transformResponse(Instances data, double lambda, double[] response)
{
Instance inst;
int responsePos=data.classIndex();
double[] newData=transform(lambda,response);
for(int i=0;i<response.length;i++)
{
inst=data.instance(i);
inst.setValue(responsePos,newData[i]);
}
}
static public double findBestTransform(double[][] data, double[] res)
{
double[] response;
// double[] predictions=new double[res.length];
double v;
Instance inst;
LinearModel lm;
double bestLambda=MIN,minError=Double.MAX_VALUE,error;
double correlation;
for(double lambda=MIN;lambda<=MAX;lambda+=INTERVAL)
{
//Transform response
response=transform(lambda,res);
lm=new LinearModel(data,response);
lm.fitModel();
double e =lm.findStats();
//Initially, just going to use standardised SSR
/*Use the K-S stat for this
double ks=ResidualTests.kolmogorovSmirnoff(lm.stdResidual);
correlation=ResidualTests.testHeteroscadisity(lm.y,lm.predicted);
*/ error=lm.findInverseStats(lambda,res);
// error=correlation;
if(error<minError)
{
bestLambda=lambda;
minError=error;
}
// System.out.println(" Lambda ="+lambda+" untransformed error = "+e+" Transformed error = "+error+" Correlation = "+correlation+" KS = "+ks);
}
// power[pos]=bestLambda;
return bestLambda;
}
static public double findBestTransform(Instances data, int pos, double[] power)
{
int responsePos=data.classIndex();
double[] temp=data.attributeToDoubleArray(responsePos);
double[] response=new double[temp.length];
System.arraycopy(temp, 0, response, 0, temp.length);
double[] predictions=new double[response.length];
double v;
Instance inst;
LinearModel lm;
double bestLambda=MIN,minError=Double.MAX_VALUE,error;
double correlation;
for(double lambda=MIN;lambda<=MAX;lambda+=INTERVAL)
{
//Transform response
transformResponse(data,lambda,response);
lm=new LinearModel(data);
lm.fitModel();
lm.formTrainPredictions();
lm.findTrainStatistics();
//Use the K-S stat for this
error=ResidualTests.kolmogorovSmirnoff(lm.stdResidual);
correlation=ResidualTests.testHeteroscadisity(lm.y,lm.predicted);
if(error<minError)
{
bestLambda=lambda;
minError=error;
}
// System.out.println(" Lambda ="+lambda+" KS Stat = "+error+" Correlation = "+correlation);
}
power[pos]=bestLambda;
return minError;
}
@Override
public Instances transform(Instances data)
{
System.out.println(" Doesnt do anything! ");
int responsePos=data.numAttributes()-1;
double[] response=data.attributeToDoubleArray(responsePos);
double[] preds=new double[response.length];
double v;
Instance inst;
return data;
}
public static void main(String[] args)
{
int size=100;
double[] d=new double[size];
for(int i=0;i<size;i++)
d[i]=-5+0.1*i;
OutFile f= new OutFile("TestYeoJohnson.csv");
f.writeLine("Data,Lambda-3,Lambda-1,Lambda-0.5,Lambda0,Lambda0.5,Lambda1,Lambda2,Lambda3, InvLambda-3,InvLambda-1,InvLambda-0.5,InvLambda0,InvLambda0.5,InvLambda1,InvLambda2,InvLambda3");
double[][] d2=new double[8][];
double[][] inv=new double[8][];
d2[0]=transform(-3.0,d);
d2[1]=transform(-1.0,d);
d2[2]=transform(-0.5,d);
d2[3]=transform(0.0,d);
d2[4]=transform(0.5,d);
d2[5]=transform(1.0,d);
d2[6]=transform(2.0,d);
d2[7]=transform(3.0,d);
inv[0]=invert(-3.0,d2[0]);
inv[1]=invert(-1.0,d2[1]);
inv[2]=invert(-0.5,d2[2]);
inv[3]=invert(0.0,d2[3]);
inv[4]=invert(0.5,d2[4]);
inv[5]=invert(1.0,d2[5]);
inv[6]=invert(2.0,d2[6]);
inv[7]=invert(3.0,d2[7]);
for(int i=0;i<size;i++)
{
f.writeString(d[i]+",");
for(int j=0;j<8;j++)
f.writeString(d2[j][i]+",");
for(int j=0;j<8;j++)
f.writeString(inv[j][i]+",");
f.writeString("\n");
}
}
} | 8,430 | 26.552288 | 189 | java |
tsml-java | tsml-java-master/src/main/java/tests/ClassifierSanityChecks.java | package tests;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import machine_learning.classifiers.ensembles.ContractRotationForest;
import machine_learning.classifiers.ensembles.EnhancedRotationForest;
import tsml.classifiers.Checkpointable;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.classifiers.TrainTimeContractable;
import tsml.classifiers.dictionary_based.TDE;
import tsml.classifiers.distance_based.proximity.ProximityForest;
import tsml.classifiers.kernel_based.Arsenal;
import tsml.classifiers.hybrids.HIVE_COTE;
import tsml.classifiers.interval_based.DrCIF;
import tsml.classifiers.shapelet_based.ShapeletTransformClassifier;
import utilities.ClassifierTools;
import weka.classifiers.meta.RotationForest;
import weka.core.Instance;
import weka.core.Instances;
import java.util.Random;
/**
* Simple development class to road test the classifiers in HiveCote 2, and HC2 itself
* HC2.0 is too slow on ArrowHead, must be STC taking an hour
*/
public class ClassifierSanityChecks {
static String[] classifiers={//"DrCIF","TDE","PF","Arsenal",
"STC"
// ,"HiveCote"
};
static int[] arrowHeadCorrect={140,155,150,148,144,134,153};
static EnhancedAbstractClassifier setClassifier(String str){
//MNissing Arsenal and HC2
EnhancedAbstractClassifier c = null;
switch(str){
case "DrCIF":
c= new DrCIF();
break;
case "TDE":
c= new TDE();
break;
case "PF":
ProximityForest pf=new ProximityForest();
pf.setSeed(new Random().nextInt());
c=pf;
break;
case "STC":
ShapeletTransformClassifier stc= new ShapeletTransformClassifier();
stc.setMinuteLimit(1);
stc.setDebug(true);
c=stc;
break;
case "Arsenal":
c = new Arsenal();
break;
case "HiveCote":
HIVE_COTE hc = new HIVE_COTE();
hc.setupHIVE_COTE_2_0();
hc.enableMultiThreading(5);
c=hc;
break;
}
return c;
}
/**
* To use before finalising tests
*/
public static void basicUsageTest()throws Exception {
String path="src/main/java/experiments/data/tsc/";
String problem="ArrowHead";
Instances train= DatasetLoading.loadData(path+problem+"/"+problem+"_TRAIN.arff");
Instances test= DatasetLoading.loadData(path+problem+"/"+problem+"_TEST.arff");
for(String str:classifiers) {
EnhancedAbstractClassifier c = setClassifier(str);
System.out.println(" running "+str);
if(c!=null) {
try {
long t1= System.nanoTime();
c.buildClassifier(train);
long t2= System.nanoTime();
long trainTime = (t2-t1)/1000000000;
int correct=0;
for(Instance ins:test){
double pred=c.classifyInstance(ins);
double[] d = c.distributionForInstance(ins);
if(pred==ins.classValue())
correct++;
}
System.out.println(str + " on " + problem + " train time = "+trainTime+" (secs), correct count = "+correct+" test acc = " + correct/(double)test.numInstances());
}catch(Exception e){
System.out.println(" Error building classifier "+str+" exception = "+e);
}
} else{
System.out.println(" null classifier "+str);
}
}
}
public static void contractRotationForestTest()throws Exception {
// String path="src/main/java/experiments/data/tsc/";
// String problem="";
String path="Z:\\ArchiveData\\Univariate_arff\\";
String problem="ArrowHead";
Instances train= DatasetLoading.loadData(path+problem+"/"+problem+"_TRAIN.arff");
Instances test= DatasetLoading.loadData(path+problem+"/"+problem+"_TEST.arff");
long t1,t2, trainTime;
int correct=0;
EnhancedAbstractClassifier c = new ContractRotationForest();
TrainTimeContractable x=( TrainTimeContractable) c;
((TrainTimeContractable) c).setMinuteLimit(3);
t1= System.nanoTime();
c.setDebug(true);
c.setEstimateOwnPerformance(false);
int count=0;
ClassifierResults trainRes;
/*
c.buildClassifier(train);
t2= System.nanoTime();
trainTime = (t2-t1)/1000000000;
trainRes = c.getTrainResults();
System.out.println(" CONTRACT Train Acc = "+trainRes.getAcc()+" results = "+trainRes);
for(Instance ins:test){
double pred=c.classifyInstance(ins);
double[] d = c.distributionForInstance(ins);
if(pred==ins.classValue())
correct++;
if(count<2) {
for (double dd : d)
System.out.print(dd + ", ");
System.out.println(" PREDICTION = " + pred+ " actual = "+ins.classValue());
}
count++;
}
System.out.println("\n CRF finished in "+trainTime+" secs, test num correct = "+correct+" acc = "+correct/(double)test.numInstances());
*/
EnhancedAbstractClassifier c2 = new EnhancedRotationForest();
t1= System.nanoTime();
((TrainTimeContractable) c2).setMinuteLimit(3);
c2.setDebug(true);
// ((EnhancedRotationForest)c2).setRemovedPercentage(10);
// ((EnhancedRotationForest)c2).setProbabilityClassSelection(1);
((EnhancedRotationForest)c2).setBagging(true);
c2.setEstimateOwnPerformance(true);
c2.setTrainEstimateMethod("OOB");
c2.buildClassifier(train);
c2.setDebug(false);
t2= System.nanoTime();
trainTime = (t2-t1)/1000000000;
correct=0;
trainRes = c2.getTrainResults();
System.out.println(" ENHANCED: Train Acc = "+trainRes.getAcc()+" results = "+trainRes);
/*
trainPred=trainRes.getPredClassValsAsArray();
trainProbs=trainRes.getProbabilityDistributionsAsArray();
for(int i=0;i<trainPred.length;i++){
System.out.print("\n actual = "+train.instance(i).classValue()+" predicted = "+trainPred[i]+" probs = ");
for(double d: trainProbs[i])
System.out.print(d+",");
}
*/
count=0;
correct=0;
for(Instance ins:test){
double pred=c2.classifyInstance(ins);
double[] d = c2.distributionForInstance(ins);
if(count<2) {
for (double dd : d)
System.out.print(dd + ", ");
System.out.println(" PREDICTION = " + pred+ " actual = "+ins.classValue());
}
count++;
if(pred==ins.classValue())
correct++;
}
System.out.println("\n ERF finished in "+trainTime+" secs, test num correct = "+correct+" acc = "+correct/(double)test.numInstances());
RotationForest rotf1= new RotationForest();
t1= System.nanoTime();
rotf1.setNumIterations(200);
rotf1.buildClassifier(train);
t2= System.nanoTime();
trainTime = (t2-t1)/1000000000;
correct=count=0;
for(Instance ins:test){
double pred=rotf1.classifyInstance(ins);
double[] d = rotf1.distributionForInstance(ins);
if(pred==ins.classValue())
correct++;
if(count<2) {
for (double dd : d)
System.out.print(dd + ", ");
System.out.println(" PREDICTION = " + pred);
}
count++;
}
System.out.println("\n Default RotF finished in "+trainTime+" secs, test num correct = "+correct+" acc = "+correct/(double)test.numInstances());
}
public static void shapeletTransformClassifiertTest()throws Exception {
// String path="src/main/java/experiments/data/tsc/";
// String problem="";
String path="Z:\\ArchiveData\\Univariate_arff\\";
String problem="ElectricDevices";
Instances train= DatasetLoading.loadData(path+problem+"/"+problem+"_TRAIN.arff");
Instances test= DatasetLoading.loadData(path+problem+"/"+problem+"_TEST.arff");
long t1,t2, trainTime;
int correct=0;
EnhancedAbstractClassifier c = new ShapeletTransformClassifier();
TrainTimeContractable x=( TrainTimeContractable) c;
((TrainTimeContractable) c).setMinuteLimit(2);
t1= System.nanoTime();
c.setDebug(true);
c.setEstimateOwnPerformance(true);
int count=0;
ClassifierResults trainRes;
c.buildClassifier(train);
t2= System.nanoTime();
trainTime = (t2-t1)/1000000000;
trainRes = c.getTrainResults();
System.out.println(" CONTRACT Train Acc = "+trainRes.getAcc()+" results = "+trainRes);
for(Instance ins:test){
double pred=c.classifyInstance(ins);
double[] d = c.distributionForInstance(ins);
if(pred==ins.classValue())
correct++;
if(count<2) {
for (double dd : d)
System.out.print(dd + ", ");
System.out.println(" PREDICTION = " + pred+ " actual = "+ins.classValue());
}
count++;
}
System.out.println("\n STC finished in "+trainTime+" secs, test num correct = "+correct+" acc = "+correct/(double)test.numInstances());
}
public static void shortContractTest() throws Exception {
String path = "src/main/java/experiments/data/tsc/";
String problem = "Beef";
Instances train = DatasetLoading.loadData(path + problem + "/" + problem + "_TRAIN.arff");
Instances test = DatasetLoading.loadData(path + problem + "/" + problem + "_TEST.arff");
for(String str:classifiers) {
EnhancedAbstractClassifier c = setClassifier(str);
System.out.println(" running "+str);
if(c instanceof TrainTimeContractable) {
((TrainTimeContractable) c).setMinuteLimit(1);
System.out.println("Set timer to 1 minute ");
}
else{
System.out.println(" Classifier "+str+" is not TrainTimeContractable, skipping ");
continue;
}
if(c!=null) {
try {
long t1= System.nanoTime();
c.buildClassifier(train);
long t2= System.nanoTime();
long trainTime = (t2-t1)/1000000000;
int correct=0;
for(Instance ins:test){
double pred=c.classifyInstance(ins);
double[] d = c.distributionForInstance(ins);
if(pred==ins.classValue())
correct++;
}
System.out.println(str + " on " + problem + " train time = "+trainTime+" (secs), correct count = "+correct+" test acc = " + correct/(double)test.numInstances());
}catch(Exception e){
System.out.println(" Error building classifier "+str+" exception = "+e);
}
} else{
System.out.println(" null classifier "+str);
}
}
}
public static void mediumContractTest() throws Exception {
String beastPath = "Z:/ArchiveData/Univariate_arff/";
String problem = "ElectricDevices";
Instances train = DatasetLoading.loadData(beastPath + problem + "/" + problem + "_TRAIN.arff");
Instances test = DatasetLoading.loadData(beastPath + problem + "/" + problem + "_TEST.arff");
System.out.println(" Problem = "+problem);
for(String str:classifiers) {
EnhancedAbstractClassifier c = setClassifier(str);
System.out.println(" running "+str);
if(c instanceof TrainTimeContractable) {
((TrainTimeContractable) c).setHourLimit(1);
System.out.println("Set timer to 1 hour ");
}
else{
System.out.println(" Classifier "+str+" is not TrainTimeContractable, skipping ");
continue;
}
if(c!=null) {
try {
long t1= System.nanoTime();
c.buildClassifier(train);
long t2= System.nanoTime();
long trainTime = (t2-t1)/1000000000;
System.out.print(str + " on " + problem + " train time = "+trainTime+" (secs),");
int correct=0;
for(Instance ins:test){
double pred=c.classifyInstance(ins);
double[] d = c.distributionForInstance(ins);
if(pred==ins.classValue())
correct++;
}
System.out.println(" correct count = "+correct+" test acc = " + correct/(double)test.numInstances());
}catch(Exception e){
System.out.println(" Error building classifier "+str+" exception = "+e);
}
} else{
System.out.println(" null classifier "+str);
}
}
}
//Genera
public static void checkPointTest() throws Exception {
String path = "src/main/java/experiments/data/tsc/";
String problem = "Beef";
Instances train = DatasetLoading.loadData(path + problem + "/" + problem + "_TRAIN.arff");
Instances test = DatasetLoading.loadData(path + problem + "/" + problem + "_TEST.arff");
for(String str:classifiers) {
EnhancedAbstractClassifier c = setClassifier(str);
System.out.println(" running "+str);
if(c instanceof Checkpointable) {
((Checkpointable) c).setCheckpointPath("");
((Checkpointable) c).setCheckpointTimeHours(1);
System.out.println("Set timer to 1 minute ");
}
else{
System.out.println(" Classifier "+str+" is not TrainTimeContractable, skipping ");
continue;
}
if(c!=null) {
try {
long t1= System.nanoTime();
c.buildClassifier(train);
long t2= System.nanoTime();
long trainTime = (t2-t1)/1000000000;
int correct=0;
for(Instance ins:test){
double pred=c.classifyInstance(ins);
double[] d = c.distributionForInstance(ins);
if(pred==ins.classValue())
correct++;
}
System.out.println(str + " on " + problem + " train time = "+trainTime+" (secs), correct count = "+correct+" test acc = " + correct/(double)test.numInstances());
}catch(Exception e){
System.out.println(" Error building classifier "+str+" exception = "+e);
}
} else{
System.out.println(" null classifier "+str);
}
}
}
public static void hackFile() throws Exception {
String path="src/main/java/experiments/data/tsc/";
String problem="Beef";
Instances train= DatasetLoading.loadData(path+problem+"/"+problem+"_TRAIN.arff");
Instances test= DatasetLoading.loadData(path+problem+"/"+problem+"_TEST.arff");
EnhancedAbstractClassifier c = new TDE();
if(c instanceof TrainTimeContractable) {
((TrainTimeContractable) c).setMinuteLimit(1);
System.out.println("Set timer to 1 minute ");
}
long t1= System.nanoTime();
c.buildClassifier(train);
long t2= System.nanoTime();
long secs = (t2-t1)/1000000000;
double acc = ClassifierTools.accuracy(test, c);
System.out.println("TDE on 1 minute timer " + problem + " took = "+secs+" seconds, test acc = " + acc);
String locaPath="src/main/java/experiments/data/tsc/";
problem="Beef";
//1. Test basic load and build with defaults on ArrowHead: print acc and number correct
//2. Test contract on 1 minute with Beef
//2. Test contract on 1 hour on Elect
// Test check pointing.
}
public static void main(String[] args) throws Exception {
// basicUsageTest();
// shortContractTest();
// mediumContractTest();
// contractRotationForestTest();
shapeletTransformClassifiertTest();
}
}
| 17,084 | 37.829545 | 181 | java |
tsml-java | tsml-java-master/src/main/java/tests/HiveCoteExperiments.java | package tests;
public class HiveCoteExperiments {
public static String[] newUnivariateData={
"AbnormalHeartbeat",
"AsphaltObstacles",
"AsphaltPavementType",
"AsphaltRegularity",
"BinaryHeartbeat",
"Colposcopy",
"DucksAndGeese",
"EyesOpenShut",
"Tiselac",
"UrbanSound"
};
}
| 388 | 19.473684 | 46 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/Checkpointable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import utilities.FileUtils;
import java.io.*;
import java.util.concurrent.TimeUnit;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
/**
* Interface that allows the user to allow a classifier to checkpoint, i.e.
save its current state and then load it again to continue building the model on
a separate run.
By default this involves simply saving and loading a serialised the object
known classifiers: none
Requires two methods
number
* @author Tony Bagnall 2018, goastler
*/
public interface Checkpointable extends Serializable {
/**
* Store the path to write checkpoint files,
* @param path string for full path for the directory to store checkpointed files
* @return true if successful (i.e. the directory now exist
*/
boolean setCheckpointPath(String path);
/**
*
* @param t number of hours between checkpoints
* @return true if set correctly.
*/
default boolean setCheckpointTimeHours(int t){ return false;};
//Override both if not using Java serialisation
default void saveToFile(String filename) throws Exception {
try (FileUtils.FileLock fileLocker = new FileUtils.FileLock(filename);
FileOutputStream fos = new FileOutputStream(fileLocker.getFile());
GZIPOutputStream gos = new GZIPOutputStream(fos);
ObjectOutputStream out = new ObjectOutputStream(gos)) {
out.writeObject(this);
}
}
default void loadFromFile(String filename) throws Exception{
Object obj = null;
try (FileUtils.FileLock fileLocker = new FileUtils.FileLock(filename);
FileInputStream fis = new FileInputStream(fileLocker.getFile());
GZIPInputStream gis = new GZIPInputStream(fis);
ObjectInputStream in = new ObjectInputStream(gis)) {
obj = in.readObject();
}
if(obj != null) {
copyFromSerObject(obj);
}
}
/**
* Utility function to set the file structure up if required. Call this in setSavePath if you wish
* */
default boolean createDirectories(String path){
File f = new File(path);
boolean success=true;
if(!f.isDirectory())
success=f.mkdirs();
return success;
}
//Define how to copy from a loaded object to this object
void copyFromSerObject(Object obj) throws Exception;
//delete any checkpoint file if present, not required currently
default boolean deleteCheckpoint() { return false; }
}
| 3,337 | 33.412371 | 102 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/EnhancedAbstractClassifier.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import tsml.data_containers.TimeSeriesInstances;
import weka.classifiers.AbstractClassifier;
import evaluation.storage.ClassifierResults;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Random;
import weka.classifiers.Classifier;
import weka.core.*;
/**
*
* Extends the AbstractClassifier to achieve the following:
* 1. Allow storage of information about the training process, including timing info,
* any optimisation performed and any train set estimates and predictions made to help
* assess generalisability. See below for more info on this major enhancement.
* 2. Allow for a unified process of seeding classifiers. The seed is stored in seed,
* and set via the interface Randomizable
* 3. Allow for default getCapapabilities() for TSC. For time series, these default to all real
* valued attributes, no missing values, and classification only. This overrides default
* behaviour in AbstractClassifier
* 4. Allow for standardised mechanism for saving classifier information to file.
* For example usage, see the classifier TSF.java. the method getParameters()
* can be enhanced to include any parameter info for the final classifier.
* getParameters() is called to store information on the second line of file
* storage format testFoldX.csv.
Train data is the major enhancement: There are two components: time taken in training, and any
train set predictions produced internally.
*there are three components to the time that may be spent building a classifier
* 1. timing
buildTime
* the minimum any classifier that extends this should store
is the build time in buildClassifier, through calls to System.currentTimeMillis()
or nanoTime() at the start and end of the method, stored in trainResults, with
trainResults.setBuildTime(totalBuildTime) nanoTime() is generally preferred, and
to set the TimeUnit of the ClassiiferReults object appropriately, e.g
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
errorEstimateTime
* the exact usage of this statistic has not been finalised. Conceptually measures
* how long is spent estimating the test error from the train data
buildPlusEstimateTime
* 2. Recording train set results
ClassifierResults trainResults can also store other information about the training,
including estimate of accuracy, predictions and probabilities. The mechanism for finding
these is algorithm specific. They key point is that all values in trainResults are
set without any reference to the train set at all. All the variables for trainResults
are set in buildClassifier, which has no access to test data at all. It is completely decoupled.
Instances train=//Get train
EnhancedAbstractClassifier c= //Get classifier
c.buildClassifier(train) //ALL STATS SET HERE
* Update 1/7/2020:
* @author Tony Bagnall and James Large EstimatorMethod estimator moved up from subclasses, since the pattern
* appears in multiple forest based ensembles
*/
abstract public class EnhancedAbstractClassifier extends AbstractClassifier implements SaveParameterInfo,
Serializable,
Randomizable,
TSClassifier {
/** Store information of training. The minimum should be the build time, tune time and/or estimate acc time */
protected ClassifierResults trainResults = new ClassifierResults();
protected int seed = 0;
protected boolean buildCalled=false; // set true on a class to buildClassifier, check performed in classify instance to
/**Can seed for reproducibility*/
protected Random rand=new Random(seed);
protected boolean seedClassifier=false;
protected transient boolean debug=false;
/**
* get the classifier RNG
* @return Random
*/
public Random getRandom() {
return rand;
}
/*Start Aaron Stuff for TSInstances wrapper*/
public AbstractClassifier getClassifier(){
return this;
}
public TimeSeriesInstances trainData;
@Override
public TimeSeriesInstances getTSTrainData(){
return trainData;
}
@Override
public void setTSTrainData(TimeSeriesInstances train){
trainData = train;
}
/*END - Aaron Stuff for TSClassifier wrapper*/
/**
* Set the classifier RNG
* @param rand
*/
public void setRandom(Random rand) {
this.rand = rand;
}
/**
* A printing-friendly and/or context/parameter-aware name that can optionally
* be used to describe this classifier. By default, this will simply be the
* simple-class-name of the classifier
*/
protected String classifierName = getClass().getSimpleName();
/**
* This flags whether classifiers are able to estimate their own performance
* (possibly with some bias) on the train data in some way as part of their buildClassifier
* fit, and avoid an external fully nested-cross validation process.
*
* This flag being true indicates the ABILITY to estimate train performance,
* to turn this behaviour on, setEstimateOwnPerformance(true) should be called.
* By default, the estimation behaviour is off regardless of ability
*
* This way, unnecessary work is avoided and if for whatever reason a nested
* estimation process is explicitly wanted (e.g. for completely bias-free estimates),
* that can also be achieved.
*
* This variable is private and only settable via the abstract constructor,
* such that all subclasses must set it at initialisation.
*
* This variable and the related gets/sets replace the TrainAccuracyEstimator interface
*/
protected boolean ableToEstimateOwnPerformance = false;
/**
* This flags whether the classifier shall estimate their own performance
* (possibly with some bias) on the train data in some way as part of their buildClassifier
* fit, and avoid a full nested-cross validation process.
*
* The estimation process may be entirely encapsulated in the build process (e.g. a tuned
* classifier returning the train estimate of the best parameter set, acting as the train
* estimate of the full classifier: note the bias), or may be done as an
* additional step beyond the normal build process but far more efficiently than a
* nested cv (e.g. a 1NN classifier could perform an efficient internal loocv)
*/
protected boolean estimateOwnPerformance = false;
/** If trainAccuracy is required, there are three options that can be implemented
* All three options involve a two stage process: Fit whole model then estimate (which might mean fit more models)
* 1. trainEstimateMethod=CV: do a 10x CV on the train set with a clone of this classifier
* 2. trainEstimateMethod=OOB: build a single OOB model just to get the OOB predictions
* 3. trainEstimateMethod-TRAIN: use the data used to train the model to make predictions
*/
public enum TrainEstimateMethod {CV,OOB,TRAIN,NONE}
protected TrainEstimateMethod trainEstimateMethod = TrainEstimateMethod.NONE;
public void setTrainEstimateMethod(TrainEstimateMethod t){
trainEstimateMethod=t;
}
public void setTrainEstimateMethod(String str){
String s=str.toUpperCase();
if(s.equals("CV"))
trainEstimateMethod = TrainEstimateMethod.CV;
else if(s.equals("OOB"))
trainEstimateMethod = TrainEstimateMethod.OOB;
else if(s.equals("NONE"))
trainEstimateMethod = TrainEstimateMethod.NONE;
else if(s.equals("TRAIN"))
trainEstimateMethod = TrainEstimateMethod.TRAIN;
else
throw new UnsupportedOperationException("Unknown estimator method in classifier "+getClass().getSimpleName()+" = "+str);
}
public String getEstimatorMethod() {
return trainEstimateMethod.name();
}
//utilities for readability in setting the above bools via super constructor in subclasses
public static final boolean CAN_ESTIMATE_OWN_PERFORMANCE = true;
public static final boolean CANNOT_ESTIMATE_OWN_PERFORMANCE = false;
protected int numClasses = -1;
protected boolean buildClassifierCalled = false;
public int getNumClasses() {
return numClasses;
}
protected void setAbleToEstimateOwnPerformance(boolean state) {
ableToEstimateOwnPerformance = state;
}
@Override
public void buildClassifier(final Instances trainData) throws
Exception {
buildClassifierCalled=true;
trainResults = new ClassifierResults();
rand.setSeed(seed);
numClasses = trainData.numClasses();
trainResults.setEstimatorName(getClassifierName());
trainResults.setParas(getParameters());
if(trainData.classIndex() != trainData.numAttributes() - 1) {
throw new IllegalArgumentException("class value not at the end");
}
}
public EnhancedAbstractClassifier() {
this(false);
}
public EnhancedAbstractClassifier(boolean ableToEstimateOwnPerformance) {
this.ableToEstimateOwnPerformance = ableToEstimateOwnPerformance;
setDebug(debug);
}
@Override
public int hashCode() {
if(classifierName == null) {
return super.hashCode();
}
return classifierName.hashCode();
}
@Override
public boolean equals(Object other) {
if(!(other instanceof EnhancedAbstractClassifier)) {
return false;
}
EnhancedAbstractClassifier eac = (EnhancedAbstractClassifier) other;
return classifierName.equalsIgnoreCase(eac.classifierName);
}
/**
* This flags whether the classifier shall estimate their own performance
* (possibly with some bias) on the train data in some way as part of their buildClassifier
* fit, and avoid a full nested-cross validation process.
*
* The estimation process may be entirely encapsulated in the build process (e.g. a tuned
* classifier returning the train estimate of the best parameter set, acting as the train
* estimate of the full classifier: note the bias), or may be done as an
* additional step beyond the normal build process but far more efficiently than a
* nested cv (e.g. a 1NN classifier could perform an efficient internal loocv, or a tree ensemble
* can use out of bag estimates)
*/
public boolean ableToEstimateOwnPerformance() {
return ableToEstimateOwnPerformance;
}
/**
* This flags whether the classifier shall estimate their own performance
* (possibly with some bias) on the train data in some way as part of their buildClassifier
* fit, and avoid a full nested-cross validation process.
*
* The estimation process may be entirely encapsulated in the build process (e.g. a tuned
* classifier returning the train estimate of the best parameter set, acting as the train
* estimate of the full classifier: note the bias), or may be done as an
* additional step beyond the normal build process but far more efficiently than a
* nested cv (e.g. a 1NN classifier could perform an efficient internal loocv)
*/
public void setEstimateOwnPerformance(boolean estimateOwnPerformance) throws IllegalArgumentException {
if (estimateOwnPerformance && !ableToEstimateOwnPerformance)
throw new IllegalArgumentException("Classifier ("+getClassifierName()+") is unable to estimate own performance, but "
+ "trying to set it to do so. Check with ableToEstimateOwnPerformance() first");
this.estimateOwnPerformance = estimateOwnPerformance;
}
/**
* This flags whether the classifier shall estimate their own performance
* (possibly with some bias) on the train data in some way as part of their buildClassifier
* fit, and avoid a full nested-cross validation process.
*
* The estimation process may be entirely encapsulated in the build process (e.g. a tuned
* classifier returning the train estimate of the best parameter set, acting as the train
* estimate of the full classifier: note the bias), or may be done as an
* additional step beyond the normal build process but far more efficiently than a
* nested cv (e.g. a 1NN classifier could perform an efficient internal loocv)
*/
public boolean getEstimateOwnPerformance() {
return estimateOwnPerformance;
}
/**
* A simple utility to wrap the test of whether a classifier reference contains an
* EnhancedAbstractClassifier object, and whether that classifier CAN estimate
* its own accuracy internally.
*
* Replacing the previous test 'classifier instanceof TrainAccuracyEstimator'
*/
public static boolean classifierAbleToEstimateOwnPerformance(Classifier classifier) {
return classifier instanceof EnhancedAbstractClassifier &&
((EnhancedAbstractClassifier) classifier).ableToEstimateOwnPerformance();
}
/**
* A simple utility to wrap the test of whether a classifier reference contains an
* EnhancedAbstractClassifier object, and whether that classifier has been set up
* to estimate its own accuracy internally.
*
* Replacing the previous test 'classifier instanceof TrainAccuracyEstimator''
*/
public static boolean classifierIsEstimatingOwnPerformance(Classifier classifier) {
return classifier instanceof EnhancedAbstractClassifier &&
((EnhancedAbstractClassifier) classifier).getEstimateOwnPerformance();
}
@Override
public String getParameters() {
return "seedClassifier,"+seedClassifier+",seed,"+seed;
}
/**
* Gets the train results for this classifier, which will be empty (but not-null)
* until buildClassifier has been called.
*
* If the classifier has ableToEstimateOwnPerformance()==true and was set-up to estimate
* it's own train accuracy (setEstimateOwnPerformance(true) called), these will be populated
* with full prediction information, ready to be written as a trainFoldX file for example
*
* Otherwise, the object will at minimum contain the build time, classifiername,
* and parameter information
*/
public ClassifierResults getTrainResults() {
return trainResults;
}
/**
* Set the seed for random number generation. Re-initialises internal RNG
* with new seed. Therefore, note that in general this method should be called
* before the classifier needs to use the RNG, and that in general the RNG
* shouldn't ever be used in the constructor
*
* @param seed the seed
*/
@Override
public void setSeed(int seed) {
seedClassifier=true;
this.seed = seed;
rand=new Random(seed);
}
/**
* Gets the seed for the random number generations
*
* @return the seed for the random number generation
*/
@Override
public int getSeed() {
return seed;
}
/**
* Returns default capabilities of the classifier. These are that the
* data must be numeric, with no missing and a nominal class
* @return the capabilities of this classifier
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes must be numeric
result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES);
// Can only handle discrete class
result.enable(Capabilities.Capability.NOMINAL_CLASS);
// instances
result.setMinimumNumberInstances(2);
return result;
}
public int setNumberOfFolds(Instances data){
return Math.min(data.numInstances(), 10);
}
public int setNumberOfFolds(TimeSeriesInstances data){
return Math.min(data.numInstances(), 10);
}
/**
* A printing-friendly and/or context/parameter-aware name that can optionally
* be used to describe this classifier. By default, this will simply be the
* simple-class-name of the classifier.
*/
public String getClassifierName() {
if (classifierName == null)
classifierName = this.getClass().getSimpleName();
return classifierName;
}
/**
* Method to find the best index in a list of doubles. If a tie occurs,
* then the best index is chosen randomly.
*
* @param x a list of doubles.
* @param rand a Random object.
* @return the index of the highest value in x.
*/
public static int findIndexOfMax(double [] x, Random rand) {
double currentMax = x[0];
ArrayList<Integer> bestIndexes = new ArrayList<>();
bestIndexes.add(0);
//Find the best index(es)
for(int i = 1; i < x.length; i++) {
if(x[i] > currentMax) {
bestIndexes.clear();
bestIndexes.add(i);
currentMax = x[i];
} else if(x[i] == currentMax) {
bestIndexes.add(i);
}
}
//No ties occured
if(bestIndexes.size() == 1) {
return bestIndexes.get(0);
} else {
//ties did occur
return bestIndexes.get(rand.nextInt(bestIndexes.size()));
}
}
/**
* Method to find the best index in a list of doubles. If a tie occurs,
* then the best index is chosen randomly.
*
* @param x a list of doubles.
* @param seed a long seed for a Random object.
* @return the index of the highest value in x.
*/
public static int findIndexOfMax(double [] x, long seed) {
double currentMax = x[0];
ArrayList<Integer> bestIndexes = new ArrayList<>();
bestIndexes.add(0);
//Find the best index(es)
for(int i = 1; i < x.length; i++) {
if(x[i] > currentMax) {
bestIndexes.clear();
bestIndexes.add(i);
currentMax = x[i];
} else if(x[i] == currentMax) {
bestIndexes.add(i);
}
}
//No ties occured
if(bestIndexes.size() == 1) {
return bestIndexes.get(0);
} else {
//ties did occur
return bestIndexes.get(new Random(seed).nextInt(bestIndexes.size()));
}
}
/**
* Overrides default AbstractClassifier classifyInstance to use random tie breaks.
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Utils.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
@Override
public double classifyInstance(Instance instance) throws Exception {
double [] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
return findIndexOfMax(dist, rand);
case Attribute.NUMERIC:
return dist[0];
default:
return Utils.missingValue();
}
}
/**
* Sets a printing-friendly and/or context/parameter-aware name that can optionally
* be used to describe this classifier. By default, this will simply be the
* simple-class-name of the classifier
*/
public void setClassifierName(String classifierName) {
this.classifierName = classifierName;
}
public void setDebug(boolean b){
debug=b;
}
public boolean isDebug() {
return debug;
}
public String toString() {
return getClassifierName();
}
public void printDebug(String s){
if(debug)
System.out.print(s);
}
public void printLineDebug(String s){
if(debug)
System.out.println(s);
}
}
| 21,407 | 39.316384 | 132 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/Interpretable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import java.io.File;
/**
* Interface for classifiers that can output files or visualisations for each prediction made.
*
* @author Matthew Middlehurst
*/
public interface Interpretable {
/**
* Stores a path to save interpretability files to.
*
* @param path String directory path
* @return true if path is valid, false otherwise.
*/
boolean setInterpretabilitySavePath(String path);
/**
* Outputs a summary/visualisation of how the last classifier prediction was made to a set path
*
* @return true if successful, false otherwise
* @throws Exception if failure to set path or output files.
*/
boolean lastClassifiedInterpretability() throws Exception;
/**
* Get a unique indentifier for the last prediction made, used for filenames etc.
*
* @return int ID for the last prediction
*/
int getPredID();
/**
* Create a directory at a given path.
*
* @param path String directory path
* @return true if folder is created successfully, false otherwise
*/
default boolean createInterpretabilityDirectories(String path) {
File f = new File(path);
boolean success = true;
if (!f.isDirectory())
success = f.mkdirs();
return success;
}
}
| 2,096 | 30.772727 | 99 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/LEFTIST.java | package tsml.classifiers;
import experiments.data.DatasetLoading;
import fileIO.OutFile;
import org.apache.commons.lang3.ArrayUtils;
import tsml.classifiers.interval_based.TSF;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import utilities.GenericTools;
import utilities.generic_storage.Pair;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.functions.LinearRegression;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import java.io.BufferedReader;
import java.io.File;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Random;
import java.util.function.Function;
import static utilities.InstanceTools.resampleTrainAndTestInstances;
import static utilities.StatisticalUtilities.dot;
import static utilities.Utilities.argMax;
/**
* Implementation of the LEFTIST interpretability algorithm, producing model agnostic weights for time series interval
* importance for new predictions.
*
* @author Matthew Middlehurst
**/
public class LEFTIST {
private TimeSeriesInstances sampleSeries;
private EnhancedAbstractClassifier classifier;
private int noSlices = 20;
private int noNeighbours = 1000;
public enum FeatureSelectionMethod{HIGHEST_WEIGHTS,FORWARD_SELECTION,NONE}
private int noFeatures = 10;
private FeatureSelectionMethod fsMethod = FeatureSelectionMethod.HIGHEST_WEIGHTS;
private boolean distanceToSeries = false;
private boolean predicictionClassOnly = false;
private boolean generateScore = false;
private Function<Pair<double[],double[]>,Double> similarityMeasure = (Pair<double[],double[]> p) ->
-(dot(p.var1,p.var2) / (Math.sqrt(dot(p.var1,p.var1)) * Math.sqrt(dot(p.var2,p.var2)))) + 1;
private Function<Double,Double> kernel = (Double d) ->
Math.sqrt(Math.exp(-Math.pow(d, 2) / Math.pow(0.25, 2)));
private LinearRegression explainer = new LinearRegression();
private int seed;
private Random rand;
private boolean debug = false;
public LEFTIST(TimeSeriesInstances sampleSeries, EnhancedAbstractClassifier classifier, int seed){
this.sampleSeries = sampleSeries;
this.classifier = classifier;
this.seed = seed;
this.rand = new Random(seed);
}
public LEFTIST(Instances sampleSeries, EnhancedAbstractClassifier classifier, int seed){
this.sampleSeries = Converter.fromArff(sampleSeries);
this.classifier = classifier;
this.seed = seed;
this.rand = new Random(seed);
}
public void setNoSlices(int i){
noSlices = i;
}
public void setNoNeighbours(int i){
noNeighbours = i;
}
public void setDistanceToSeries(boolean b) { distanceToSeries = b; }
public void setDistanceMeasure(Function<Pair<double[],double[]>,Double> f) { similarityMeasure = f; }
public void setKernel(Function<Double,Double> f) { kernel = f; }
public void setFSMethod(FeatureSelectionMethod fs){
fsMethod = fs;
}
public void setDebug(boolean b) { debug = b; }
public void setSeed(int i) {
seed = i;
rand = new Random(seed);
}
public Explanation generateExplanation(TimeSeriesInstance inst) throws Exception {
if (inst.getNumDimensions() > 1){
System.err.println("Only available for univariate series.");
return null;
}
Explanation e = new Explanation();
e.slices = slices(inst.getMaxLength());
double[][] activatedSlices = neighbourSliceActivation();
double[][] transformedNeighbours = transformNeighbours(inst, e.slices, activatedSlices);
double[] neighbourWeights = weightNeighbours(distanceToSeries ? transformedNeighbours : activatedSlices);
double[][] probas = new double[noNeighbours][];
for (int i = 0; i < noNeighbours; i++) {
double[][] dims = new double[1][];
dims[0] = transformedNeighbours[i];
probas[i] = classifier.distributionForInstance(new TimeSeriesInstance(dims));
}
e.predVal = argMax(probas[0], rand);
int noClasses;
if (predicictionClassOnly){
noClasses = 1;
e.classes = new int[]{ e.predVal };
}
else{
noClasses = sampleSeries.getClassLabels().length;
e.classes = new int[sampleSeries.numClasses()];
for (int i = 0; i < e.classes.length; i++){
e.classes[i] = i;
}
}
e.coefficientsForClass = new double[noClasses][];
e.classMeans = new double[noClasses];
e.scores = new double[noClasses];
Arrays.fill(e.scores, -1);
for (int i = 0 ; i < noClasses; i++) {
e.usedFeatures = featureSelection(activatedSlices, neighbourWeights, probas, e.classes[i]);
Instances maskInstances = maskInstances(activatedSlices, neighbourWeights, e.usedFeatures);
for (int n = 0; n < noNeighbours; n++){
maskInstances.get(n).setClassValue(probas[n][e.classes[i]]);
}
LinearRegression lr = (LinearRegression) AbstractClassifier.makeCopy(explainer);
lr.buildClassifier(maskInstances);
if (generateScore){
e.scores[i] = score(lr, maskInstances);
}
double[] c = lr.coefficients();
e.coefficientsForClass[i] = new double[noSlices];
for (int n = 0; n < noFeatures; n++){
e.coefficientsForClass[i][e.usedFeatures[n]] = c[n];
}
e.classMeans[i] = c[c.length-1];
}
return e;
}
public Explanation generateExplanation(Instance inst) throws Exception {
return generateExplanation(Converter.fromArff(inst));
}
public void outputFigure(TimeSeriesInstance inst, String figureSavePath) throws Exception {
Explanation exp = generateExplanation(inst);
File f = new File(figureSavePath);
if(!f.isDirectory()) f.mkdirs();
OutFile of = new OutFile(figureSavePath + "\\interp" + seed + ".txt");
of.writeLine(Arrays.toString(inst.toValueArray()[0]));
of.writeString(Arrays.toString(exp.slices[0]));
for (int i = 1; i < exp.slices.length; i++) {
of.writeString(";" + Arrays.toString(exp.slices[i]));
}
of.writeLine("");;
of.writeLine(Integer.toString(exp.predVal));
of.writeLine(Integer.toString(sampleSeries.getClassLabels().length));
for (int i = 0; i < exp.coefficientsForClass.length; i++){
of.writeLine(Integer.toString(exp.classes[i]));
of.writeLine(Arrays.toString(exp.coefficientsForClass[i]));
}
Process p = Runtime.getRuntime().exec("py src/main/python/visualisation/leftist.py \"" +
figureSavePath.replace("\\", "/")+ "\" " + seed);
if (debug) {
System.out.println("LEFTIST interp python output:");
BufferedReader out = new BufferedReader(new InputStreamReader(p.getInputStream()));
BufferedReader err = new BufferedReader(new InputStreamReader(p.getErrorStream()));
System.out.println("output : ");
String outLine = out.readLine();
while (outLine != null) {
System.out.println(outLine);
outLine = out.readLine();
}
System.out.println("error : ");
String errLine = err.readLine();
while (errLine != null) {
System.out.println(errLine);
errLine = err.readLine();
}
}
}
public void outputFigure(Instance inst, String figureSavePath) throws Exception {
outputFigure(Converter.fromArff(inst), figureSavePath);
}
private int[][] slices(int length){
if (length < noSlices) noSlices = length;
int[][] slices = new int[noSlices][2];
int sliceSize = length / noSlices;
int remainder = length % noSlices;
int sum = 0;
for (int i = 0; i < noSlices; i++){
slices[i][0] = sum;
sum += sliceSize;
if (remainder > 0){
sum++;
remainder--;
}
slices[i][1] = sum;
}
return slices;
}
private double[][] neighbourSliceActivation(){
double[][] activatedSlices = new double[noNeighbours][noSlices];
Arrays.fill(activatedSlices[0], 1);
for (int i = 1; i < noNeighbours; i++){
for (int n = 0; n < noSlices; n++){
activatedSlices[i][n] = rand.nextBoolean() ? 1 : 0;
}
}
return activatedSlices;
}
private double[][] transformNeighbours(TimeSeriesInstance inst, int[][] slices, double[][] activatedSlices){
double[] instVals = inst.toValueArray()[0];
double[][] transformedNeighbours = new double[noNeighbours][];
for (int i = 0; i < noNeighbours; i++){
transformedNeighbours[i] = sampleSeries.get(rand.nextInt(sampleSeries.numInstances())).toValueArray()[0];
for (int n = 0 ; n < noSlices; n++){
if (activatedSlices[i][n] == 1){
System.arraycopy(instVals, slices[n][0], transformedNeighbours[i], slices[n][0],
slices[n][1] - slices[n][0]);
}
}
}
return transformedNeighbours;
}
private double[] weightNeighbours(double[][] neighbours){
double[] weights = new double[neighbours.length];
Pair<double[],double[]> p = new Pair<>(neighbours[0], null);
for (int i = 0; i < noNeighbours; i++){
p.var2 = neighbours[i];
weights[i] = kernel.apply(similarityMeasure.apply(p));
}
return weights;
}
private Instances maskInstances(double[][] activatedSlices, double[] neighbourWeights, int[] usedFeatures){
int noAtts = usedFeatures.length;
ArrayList<Attribute> atts = new ArrayList<>(noAtts + 1);
ArrayList<String> vals = new ArrayList<>(2);
vals.add("0");
vals.add("1");
for (int i = 0; i < noAtts; i++) {
atts.add(new Attribute(Integer.toString(i), vals));
}
atts.add(new Attribute("class"));
Instances maskInstances = new Instances("maskInstances", atts, noNeighbours);
maskInstances.setClassIndex(maskInstances.numAttributes()-1);
for (int i = 0; i < noNeighbours; i++) {
double[] newArr = new double[noAtts + 1];
for (int n = 0; n < noAtts; n++){
newArr[n] = activatedSlices[i][usedFeatures[n]];
}
maskInstances.add(new DenseInstance(neighbourWeights[i], newArr));
}
return maskInstances;
}
private int[] featureSelection(double[][] activatedSlices, double[] neighbourWeights, double[][] probas, int cls)
throws Exception {
int[] usedFeatures;
if (fsMethod == FeatureSelectionMethod.NONE || noFeatures >= noSlices){
usedFeatures = new int[noSlices];
for (int i = 0; i < noSlices; i++) {
usedFeatures[i] = i;
}
}
else if (fsMethod == FeatureSelectionMethod.HIGHEST_WEIGHTS) {
usedFeatures = new int[noFeatures];
Integer[] allFeatures = new Integer[noSlices];
for (int i = 0; i < noSlices; i++) {
allFeatures[i] = i;
}
Instances maskInstances = maskInstances(activatedSlices, neighbourWeights, Arrays.stream(allFeatures)
.mapToInt(i -> i).toArray());
for (int n = 0; n < noNeighbours; n++){
maskInstances.get(n).setClassValue(probas[n][cls]);
}
LinearRegression lr = new LinearRegression();
lr.buildClassifier(maskInstances);
double[] c = new double[noSlices];
System.arraycopy(lr.coefficients(), 0, c, 0, noSlices);
Arrays.sort(allFeatures, new GenericTools.SortIndexDescending(c));
for (int i = 0; i < noFeatures; i++) {
usedFeatures[i] = allFeatures[i];
}
}
else if (fsMethod == FeatureSelectionMethod.FORWARD_SELECTION) {
usedFeatures = new int[noFeatures];
Arrays.fill(usedFeatures, -1);
ArrayList<String> vals = new ArrayList<>(2);
vals.add("0");
vals.add("1");
ArrayList<Attribute> atts = new ArrayList<>(1);
atts.add(new Attribute("class"));
Instances fsInstances = new Instances("fs", atts, noNeighbours);
fsInstances.setClassIndex(fsInstances.numAttributes() - 1);
for (int i = 0; i < noNeighbours; i++){
double[] a = new double[]{probas[i][cls]};
fsInstances.add(new DenseInstance(neighbourWeights[i], a));
}
for (int i = 0; i < noFeatures; i++) {
double max = -99999999;
int feature = 0;
fsInstances.insertAttributeAt(new Attribute(Integer.toString(i), vals), i);
for (int n = 0; n < noSlices; n++){
if (ArrayUtils.contains(usedFeatures, n)) continue;
for (int g = 0; g < noNeighbours; g++){
fsInstances.get(g).setValue(i, activatedSlices[g][n]);
}
LinearRegression lr = new LinearRegression();
lr.buildClassifier(fsInstances);
double score = score(lr, fsInstances);
if (score > max){
max = score;
feature = n;
}
}
usedFeatures[i] = feature;
}
}
else {
throw new Exception("Invalid feature selection option.");
}
return usedFeatures;
}
private double score(Classifier cls, Instances insts) throws Exception {
double[] preds = new double[insts.numInstances()];
int mean = 0;
int weightSum = 0;
for (int i = 0 ; i < preds.length; i++){
Instance inst = insts.get(i);
preds[i] = cls.classifyInstance(inst);
mean += inst.classValue() * inst.weight();
weightSum += inst.weight();
}
mean /= weightSum;
double n = 0;
double d = 0;
for (int i = 0; i < preds.length; i++){
Instance inst = insts.get(i);
n += inst.weight() * Math.pow(inst.classValue() - preds[i], 2);
d += inst.weight() * Math.pow(inst.classValue() - mean, 2);
}
return d != 0 ? 1 - n / d : 0;
}
public static class Explanation {
int predVal;
int[][] slices;
int[] classes;
int[] usedFeatures;
double[][] coefficientsForClass;
double[] classMeans;
double[] scores;
public Explanation() {}
@Override
public String toString(){
return predVal + "\n" + Arrays.deepToString(slices) + "\n" + Arrays.toString(classes) + "\n" +
Arrays.toString(usedFeatures) + "\n" + Arrays.deepToString(coefficientsForClass) + "\n" +
Arrays.toString(classMeans) + "\n" + Arrays.toString(scores);
}
}
public static void main(String[] args) throws Exception {
int fold = 0;
//Minimum working example
String dataset = "ItalyPowerDemand";
Instances train = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\UnivariateARFF\\" + dataset +
"\\" + dataset + "_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("D:\\CMP Machine Learning\\Datasets\\UnivariateARFF\\" + dataset +
"\\" + dataset + "_TEST.arff");
Instances[] data = resampleTrainAndTestInstances(train, test, fold);
train = data[0];
test = data[1];
TSF c = new TSF();
c.seed = 0;
c.buildClassifier(train);
LEFTIST l = new LEFTIST(train, c, 0);
l.outputFigure(test.get(0), "E:\\Temp\\LEFTIST\\" + dataset + "1\\");
l.outputFigure(test.get(1), "E:\\Temp\\LEFTIST\\" + dataset + "2\\");
l.outputFigure(test.get(2), "E:\\Temp\\LEFTIST\\" + dataset + "3\\");
l.outputFigure(test.get(3), "E:\\Temp\\LEFTIST\\" + dataset + "4\\");
l.outputFigure(test.get(4), "E:\\Temp\\LEFTIST\\" + dataset + "5\\");
}
}
| 16,816 | 36.205752 | 124 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/MemoryContractable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import tsml.classifiers.distance_based.utils.system.memory.MemoryWatchable;
/**
* Interface that allows the user to impose a memory contract of a classifier that
implements this interface
known classifiers: None
*
* Provide default implementation of the memory stats getters which classifiers should track. It's most easily
* tracked through the GcMemoryWatchable interface so you don't have to track the stats yourself!
*
* @author pfm15hbu, goastler
*/
public interface MemoryContractable {
enum DataUnit {BYTES, MEGABYTE, GIGABYTE}
default void setSixGigabyteLimit(){ setMemoryLimit(DataUnit.GIGABYTE, 6); }
default void setGigabyteLimit(int t){ setMemoryLimit(DataUnit.GIGABYTE, t); }
default void setMegabyteLimit(int t){ setMemoryLimit(DataUnit.MEGABYTE, t); }
//set any value in bytes you like.
default void setMemoryLimit(long bytes){ setMemoryLimit(DataUnit.BYTES, bytes); }
//pass in an value from the DataUnit enum and the amount of said values.
void setMemoryLimit(DataUnit unit, long amount);
}
| 1,852 | 36.816327 | 110 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/MultiThreadable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
/**
* Interface that allows the user to allow a classifier to use multiple threads, how this happens is determined by the
* classifier. Exact API for this and how threading is handled codebase-wide is still to be decided 02/08/2019
*
* Known classifiers: AbstractEnsemble, BOSS, cBOSS, BOSSIndividual, MultiSamplingEvaluator
*
* @author Matthew Middlehurst, James Large (james.large@uea.ac.uk)
*/
public interface MultiThreadable {
/**
* Enables multithreading, and allows the class to spawn numThreads threads
*/
void enableMultiThreading(int numThreads);
/**
* Enables multithreading, and allows the class to spawn a number of threads equal to the number of available
* processors minus one.
*/
default void enableMultiThreading() {
enableMultiThreading(Runtime.getRuntime().availableProcessors()-1);
}
}
| 1,668 | 37.813953 | 118 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/ParameterSplittable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import java.io.Serializable;
/**
*
* @author ajb
*/
public interface ParameterSplittable extends Serializable{
public void setParamSearch(boolean b);
/* The actual parameter values should be set internally. This integer
is just a key to maintain different parameter sets. The range starts at 1
*/
public void setParametersFromIndex(int x);
// public String getParas();
}
| 1,181 | 33.764706 | 76 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/SaveParameterInfo.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* DEPRECIATED: DO NOT USE THIS INTERFACE
* It is redundant.
*/
package tsml.classifiers;
/**
*
* @author ajb
* Interface used for checkpointing a classifier. The getParameters is used in
* the ClassifierExperiments class.
* TO BE REMOVED:
* This could be overlapping with another interface and
* could possibly be depreciated.
*
*/
public interface SaveParameterInfo {
String getParameters();
}
| 1,179 | 30.891892 | 77 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/TSClassifier.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import weka.classifiers.Classifier;
import weka.core.Instance;
import weka.core.Instances;
public interface TSClassifier{
public Classifier getClassifier();
public TimeSeriesInstances getTSTrainData();
public void setTSTrainData(TimeSeriesInstances train);
public default void buildClassifier(TimeSeriesInstances data) throws Exception{
setTSTrainData(data);
getClassifier().buildClassifier(Converter.toArff(data));
}
public default double[] distributionForInstance(TimeSeriesInstance inst) throws Exception{
return getClassifier().distributionForInstance(Converter.toArff(inst, getTSTrainData().getClassLabels()));
}
public default double classifyInstance(TimeSeriesInstance inst) throws Exception{
return getClassifier().classifyInstance(Converter.toArff(inst, getTSTrainData().getClassLabels()));
}
public default double[][] distributionForInstances(TimeSeriesInstances data) throws Exception {
double[][] out = new double[data.numInstances()][];
Instances data_inst = Converter.toArff(data);
int i=0;
for(Instance inst : data_inst)
out[i++] = getClassifier().distributionForInstance(inst);
return out;
}
public default double[] classifyInstances(TimeSeriesInstances data) throws Exception {
double[] out = new double[data.numInstances()];
Instances data_inst = Converter.toArff(data);
int i=0;
for(Instance inst : data_inst)
out[i++] = getClassifier().classifyInstance(inst);
return out;
}
static TSClassifier wrapClassifier(Classifier classifier) {
return new TSClassifier() {
private TimeSeriesInstances trainData;
@Override public Classifier getClassifier() {
return classifier;
}
@Override public TimeSeriesInstances getTSTrainData() {
return trainData;
}
@Override public void setTSTrainData(final TimeSeriesInstances train) {
this.trainData = train;
}
};
}
static TSClassifier wrapClassifier(TSClassifier classifier) {
return classifier;
}
}
| 3,185 | 35.204545 | 114 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/TestTimeContractable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.distance_based.utils.classifiers.contracting.TimedTest;
/**
* Interface that allows the user to impose a test time contract of a classifier that
implements this interface
known classifiers: None
* @author pfm15hbu
*/
public interface TestTimeContractable extends TimedTest {
default void setOneSecondLimit(){ setTestTimeLimit(TimeUnit.SECONDS, 1); }
default void setOneMillisecondLimit(){ setTestTimeLimit(TimeUnit.MILLISECONDS, 1); }
default void setSecondLimit(int t){ setTestTimeLimit(TimeUnit.SECONDS, t); }
default void setMillisecondLimit(int t){ setTestTimeLimit(TimeUnit.MILLISECONDS, t); }
//set any value in nanoseconds you like.
void setTestTimeLimit(long nanos);
//pass in an value from the TimeUnit enum and the amount of said values.
default void setTestTimeLimit(TimeUnit unit, long amount) {
setTestTimeLimit(amount, unit);
}
default void setTestTimeLimit(long amount, TimeUnit unit) {
setTestTimeLimit(TimeUnit.NANOSECONDS.convert(amount, unit));
}
}
| 1,898 | 36.235294 | 90 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/TrainEstimateTimeable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
/**
* Purpose: track the time associated with producing a train estimate.
*
* Contributors: goastler
*/
public interface TrainEstimateTimeable extends TrainTimeable {
long getTrainEstimateTime();
default long getTrainPlusEstimateTime() {
return getTrainEstimateTime() + getTrainTime();
}
}
| 1,106 | 31.558824 | 76 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/TrainTimeContractable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import java.util.concurrent.TimeUnit;
/**
* Interface that allows the user to impose a train time contract of a classifier that
implements this interface
known classifiers: ShapeletTransformClassifier, RISE, HIVE_COTE (partial),
* BOSS, TSF , ContractRotationForest
*
* ********************************NOTES********************************
* 1) contract time of <=0 means no contract has been set, even if this is potentially contractable
*
*/
public interface TrainTimeContractable {
/**
* This is the single method that must be implemented to store the contract time
* @param time in nano seconds
*/
void setTrainTimeLimit(long time);
/**
* Are we still within contract? Remove default when fully implemented
* @param start classifier build start time
* @return true if classifier is within the train time contract, false otherwise
*/
boolean withinTrainContract(long start);
default void setOneDayLimit(){ setTrainTimeLimit(TimeUnit.DAYS, 1); }
default void setOneHourLimit(){ setTrainTimeLimit(TimeUnit.HOURS, 1); }
default void setOneMinuteLimit(){ setTrainTimeLimit(TimeUnit.MINUTES, 1); }
default void setDayLimit(int t){ setTrainTimeLimit(TimeUnit.DAYS, t); }
default void setHourLimit(int t){ setTrainTimeLimit(TimeUnit.HOURS, t); }
default void setMinuteLimit(int t){ setTrainTimeLimit(TimeUnit.MINUTES, t); }
//pass in an value from the TimeUnit enum and the amount of said values.
default void setTrainTimeLimit(TimeUnit time, long amount) {
setTrainTimeLimit(TimeUnit.NANOSECONDS.convert(amount, time));
}
default void setTrainTimeLimit(long amount, TimeUnit time) {
setTrainTimeLimit(time, amount);
}
default long getTrainContractTimeNanos() {
throw new UnsupportedOperationException();
}
}
| 2,661 | 35.465753 | 99 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/TrainTimeable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
/**
* Purpose: track the time taken to train a classifier.
*
* Contributors: goastler
*/
public interface TrainTimeable {
default long getTrainTime() { return -1; }
}
| 964 | 33.464286 | 76 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/Tuneable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Tunable interface enforces the method getDefaultParameterSearchSpace, for use
with the general TunedClassifier class.
ParameterSpace are created by calls to the ParameterSpace object with
addParameter(String name, values), where values can be arrays or a List.
*/
package tsml.classifiers;
import evaluation.tuning.ParameterSpace;
/**
* For classifiers which can be tuned, requires an overidden setOptions from abstract classifier in most cases.
*
* @author ajb
*/
public interface Tuneable {
/**
* getDefaultParameterSearchSpace returns the possible parameter values
* that can be looked for with the TunedClassifier
*
* @return default parameter space for tuning
*/
ParameterSpace getDefaultParameterSearchSpace();
}
| 1,536 | 32.413043 | 111 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/Visualisable.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers;
import java.io.File;
/**
* Interface for classifiers that can output visualisations of the final model.
*
* @author Matthew Middlehurst
**/
public interface Visualisable {
/**
* Stores a path to save visualisation files to.
*
* @param path String directory path
* @return true if path is valid, false otherwise.
*/
boolean setVisualisationSavePath(String path);
/**
* Create model visualisations and save them to a set path.
*
* @return true if successful, false otherwise
* @throws Exception if failure to set path or create visualisation
*/
boolean createVisualisation() throws Exception;
/**
* Create a directory at a given path.
*
* @param path String directory path
* @return true if folder is created successfully, false otherwise
*/
default boolean createVisualisationDirectories(String path) {
File f = new File(path);
boolean success = true;
if (!f.isDirectory())
success = f.mkdirs();
return success;
}
} | 1,854 | 30.982759 | 79 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/BOSS.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import java.util.*;
import tsml.classifiers.*;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import utilities.*;
import weka.core.*;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import static utilities.InstanceTools.resampleTrainAndTestInstances;
import static utilities.multivariate_tools.MultivariateInstanceTools.*;
/**
* BOSS classifier with parameter search and ensembling for univariate and
* multivariate time series classification.
* If parameters are known, use the class IndividualBOSS and directly provide them.
*
* Alphabetsize fixed to four and maximum wordLength of 16.
*
* @author James Large, updated by Matthew Middlehurst
*
* Implementation based on the algorithm described in getTechnicalInformation()
*
* It is not contractable on tuneable. See cBOSS
*/
public class BOSS extends EnhancedAbstractClassifier implements
TechnicalInformationHandler, MultiThreadable {
private transient LinkedList<IndividualBOSS>[] classifiers;
private int numDimensions;
private int[] numClassifiers;
private int currentSeries = 0;
private boolean isMultivariate = false;
private final int[] wordLengths = { 16, 14, 12, 10, 8 };
private final int[] alphabetSize = { 4 };
private final boolean[] normOptions = { true, false };
private final double correctThreshold = 0.92;
private int maxEnsembleSize = 500;
private transient Instances train;
private double ensembleCvAcc = -1;
private double[] ensembleCvPreds = null;
private int numThreads = 1;
private boolean multiThread = false;
private ExecutorService ex;
protected static final long serialVersionUID = 22554L;
public BOSS() {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "P. Schafer");
result.setValue(TechnicalInformation.Field.TITLE, "The BOSS is concerned with time series classification in the presence of noise");
result.setValue(TechnicalInformation.Field.JOURNAL, "Data Mining and Knowledge Discovery");
result.setValue(TechnicalInformation.Field.VOLUME, "29");
result.setValue(TechnicalInformation.Field.NUMBER,"6");
result.setValue(TechnicalInformation.Field.PAGES, "1505-1530");
result.setValue(TechnicalInformation.Field.YEAR, "2015");
return result;
}
@Override
public Capabilities getCapabilities(){
Capabilities result = super.getCapabilities();
result.disableAll();
result.setMinimumNumberInstances(2);
// attributes
result.enable(Capabilities.Capability.RELATIONAL_ATTRIBUTES);
result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES);
// class
result.enable(Capabilities.Capability.NOMINAL_CLASS);
return result;
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
sb.append(super.getParameters());
sb.append(",numSeries,").append(numDimensions);
for (int n = 0; n < numDimensions; n++) {
sb.append(",numclassifiers,").append(n).append(",").append(numClassifiers[n]);
for (int i = 0; i < numClassifiers[n]; ++i) {
IndividualBOSS boss = classifiers[n].get(i);
sb.append(",windowSize,").append(boss.getWindowSize()).append(",wordLength,").append(boss.getWordLength());
sb.append(",alphabetSize,").append(boss.getAlphabetSize()).append(",norm,").append(boss.isNorm());
}
}
return sb.toString();
}
@Override
public void enableMultiThreading(int numThreads) {
if (numThreads > 1) {
this.numThreads = numThreads;
multiThread = true;
}
else{
this.numThreads = 1;
multiThread = false;
}
}
@Override
public ClassifierResults getTrainResults(){
return trainResults;
}
public void setMaxEnsembleSize(int size) {
maxEnsembleSize = size;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
printDebug("Building BOSS");
trainResults.setBuildTime(System.nanoTime());
// can classifier handle the data?
getCapabilities().testWithFail(data);
if(data.checkForAttributeType(Attribute.RELATIONAL)){
isMultivariate = true;
}
//Window length settings
int seriesLength = isMultivariate ? channelLength(data)-1 : data.numAttributes()-1; //minus class attribute
int minWindow = 10;
double maxWinLenProportion = 1;
int maxWindow = (int)(seriesLength* maxWinLenProportion);
if (maxWindow < minWindow) minWindow = maxWindow/2;
//whats the max number of window sizes that should be searched through
double maxWinSearchProportion = 0.25;
double maxWindowSearches = seriesLength* maxWinSearchProportion;
int winInc = (int)((maxWindow - minWindow) / maxWindowSearches);
if (winInc < 1) winInc = 1;
//initialise variables
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSS_BuildClassifier: Class attribute not set as last attribute in dataset");
//Multivariate
if (isMultivariate) {
numDimensions = numDimensions(data);
classifiers = new LinkedList[numDimensions];
for (int n = 0; n < numDimensions; n++){
classifiers[n] = new LinkedList<>();
}
numClassifiers = new int[numDimensions];
}
//Univariate
else{
numDimensions = 1;
classifiers = new LinkedList[1];
classifiers[0] = new LinkedList<>();
numClassifiers = new int[1];
}
rand = new Random(seed);
this.train = data;
if (multiThread){
if (numThreads == 1) numThreads = Runtime.getRuntime().availableProcessors();
if (ex == null) ex = Executors.newFixedThreadPool(numThreads);
}
//required to deal with multivariate datasets, each channel is split into its own instances
Instances[] series;
//Multivariate
if (isMultivariate) {
series = splitMultivariateInstances(data);
}
//Univariate
else{
series = new Instances[1];
series[0] = data;
}
for (int n = 0; n < numDimensions; n++) {
currentSeries = n;
double maxAcc = -1.0;
//the acc of the worst member to make it into the final ensemble as it stands
double minMaxAcc = -1.0;
for (boolean normalise : normOptions) {
for (int winSize = minWindow; winSize <= maxWindow; winSize += winInc) {
IndividualBOSS boss = new IndividualBOSS(wordLengths[0], alphabetSize[0], winSize, normalise, multiThread, numThreads, ex);
boss.seed = seed;
boss.buildClassifier(series[n]); //initial setup for this windowsize, with max word length
IndividualBOSS bestClassifierForWinSize = null;
double bestAccForWinSize = -1.0;
//find best word length for this window size
for (Integer wordLen : wordLengths) {
boss = boss.buildShortenedBags(wordLen); //in first iteration, same lengths (wordLengths[0]), will do nothing
double acc = individualTrainAcc(boss, series[n], bestAccForWinSize);
if (acc >= bestAccForWinSize) {
bestAccForWinSize = acc;
bestClassifierForWinSize = boss;
}
}
if(classifiers[n].size()%10==0)
printLineDebug(" BOSS Model "+(classifiers[n].size()+1)+" found ");
//if this window size's accuracy is not good enough to make it into the ensemble, dont bother storing at all
if (makesItIntoEnsemble(bestAccForWinSize, maxAcc, minMaxAcc, classifiers[n].size())) {
bestClassifierForWinSize.clean();
bestClassifierForWinSize.accuracy = bestAccForWinSize;
classifiers[n].add(bestClassifierForWinSize);
if (bestAccForWinSize > maxAcc) {
maxAcc = bestAccForWinSize;
//get rid of any extras that dont fall within the new max threshold
Iterator<IndividualBOSS> it = classifiers[n].iterator();
while (it.hasNext()) {
IndividualBOSS b = it.next();
if (b.accuracy < maxAcc * correctThreshold) {
it.remove();
}
}
}
while (classifiers[n].size() > maxEnsembleSize) {
//cull the 'worst of the best' until back under the max size
int minAccInd = (int) findMinEnsembleAcc()[0];
classifiers[n].remove(minAccInd);
}
minMaxAcc = findMinEnsembleAcc()[1]; //new 'worst of the best' acc
}
numClassifiers[n] = classifiers[n].size();
}
}
}
//end train time in nanoseconds
trainResults.setBuildTime(System.nanoTime() - trainResults.getBuildTime());
//Estimate train accuracy
if (getEstimateOwnPerformance()) {
// trainResults.finaliseResults();
double result = findEnsembleTrainAcc(data);
// System.out.println("CV acc ="+result);
}
trainResults.setParas(getParameters());
}
//[0] = index, [1] = acc
private double[] findMinEnsembleAcc() {
double minAcc = Double.MAX_VALUE;
int minAccInd = 0;
for (int i = 0; i < classifiers[currentSeries].size(); ++i) {
double curacc = classifiers[currentSeries].get(i).accuracy;
if (curacc < minAcc) {
minAcc = curacc;
minAccInd = i;
}
}
return new double[] { minAccInd, minAcc };
}
private double individualTrainAcc(IndividualBOSS boss, Instances series, double lowestAcc) throws Exception {
int correct = 0;
int numInst = series.numInstances();
int requiredCorrect = (int)(lowestAcc*numInst);
if (multiThread){
ArrayList<Future<Double>> futures = new ArrayList<>(numInst);
for (int i = 0; i < numInst; ++i)
futures.add(ex.submit(boss.new TrainNearestNeighbourThread(i)));
int idx = 0;
for (Future<Double> f: futures){
if (f.get() == series.get(idx).classValue()) {
++correct;
}
idx++;
}
}
else {
for (int i = 0; i < numInst; ++i) {
if (correct + numInst - i < requiredCorrect) {
return -1;
}
double c = boss.classifyInstance(i); //classify series i, while ignoring its corresponding histogram i
if (c == series.get(i).classValue()) {
++correct;
}
}
}
return (double) correct / (double) numInst;
}
private boolean makesItIntoEnsemble(double acc, double maxAcc, double minMaxAcc, int curEnsembleSize) {
if (acc >= maxAcc * correctThreshold) {
if (curEnsembleSize >= maxEnsembleSize)
return acc > minMaxAcc;
else
return true;
}
return false;
}
private double findEnsembleTrainAcc(Instances data) throws Exception {
this.ensembleCvPreds = new double[data.numInstances()];
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setEstimatorName(getClassifierName());
trainResults.setDatasetName(data.relationName());
trainResults.setFoldID(seed);
trainResults.setSplit("train");
trainResults.setParas(getParameters());
double correct = 0;
double[] actuals=new double[data.numInstances()];
for (int i = 0; i < data.numInstances(); ++i) {
actuals[i]=data.instance(i).classValue();
long predTime = System.nanoTime();
// classify series i, while ignoring its corresponding histogram i
double[] probs = distributionForInstance(i, data.numClasses());
predTime = System.nanoTime() - predTime;
int maxClass = findIndexOfMax(probs, rand);
if (maxClass == data.get(i).classValue())
++correct;
this.ensembleCvPreds[i] = maxClass;
trainResults.addPrediction(data.get(i).classValue(), probs, maxClass, predTime, "");
}
trainResults.finaliseResults(actuals);
double result = correct / data.numInstances();
return result;
}
public double getTrainAcc(){
if(ensembleCvAcc>=0){
return this.ensembleCvAcc;
}
try{
return this.findEnsembleTrainAcc(train);
}catch(Exception e){
e.printStackTrace();
}
return -1;
}
public double[] getTrainPreds(){
if(this.ensembleCvPreds==null){
try{
this.findEnsembleTrainAcc(train);
}catch(Exception e){
e.printStackTrace();
}
}
return this.ensembleCvPreds;
}
private double[] distributionForInstance(int test, int numClasses) throws Exception {
double[] classHist = new double[numClasses];
//get sum of all channels, votes from each are weighted the same.
double sum = 0;
for (int n = 0; n < numDimensions; n++) {
for (IndividualBOSS classifier : classifiers[n]) {
double classification = classifier.classifyInstance(test);
classHist[(int) classification] += classifier.weight;
sum += classifier.weight;
}
}
double[] distributions = new double[numClasses];
if (sum != 0) {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += (classHist[i] / sum) / numDimensions;
}
else{
for (int i = 0; i < classHist.length; ++i)
distributions[i] += 1 / numClasses;
}
return distributions;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
double[] probs = distributionForInstance(instance);
return findIndexOfMax(probs, rand);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
int numClasses = train.numClasses();
double[] classHist = new double[numClasses];
//get sum of all channels, votes from each are weighted the same.
double sum = 0;
Instance[] series;
//Multivariate
if (isMultivariate) {
series = splitMultivariateInstanceWithClassVal(instance);
}
//Univariate
else {
series = new Instance[1];
series[0] = instance;
}
if (multiThread){
ArrayList<Future<Double>>[] futures = new ArrayList[numDimensions];
for (int n = 0; n < numDimensions; n++) {
futures[n] = new ArrayList<>(numClassifiers[n]);
for (IndividualBOSS classifier : classifiers[n]) {
futures[n].add(ex.submit(classifier.new TestNearestNeighbourThread(series[n])));
}
}
for (int n = 0; n < numDimensions; n++) {
int idx = 0;
for (Future<Double> f : futures[n]) {
double weight = classifiers[n].get(idx).weight;
classHist[f.get().intValue()] += weight;
sum += weight;
idx++;
}
}
}
else {
for (int n = 0; n < numDimensions; n++) {
for (IndividualBOSS classifier : classifiers[n]) {
double classification = classifier.classifyInstance(series[n]);
classHist[(int) classification] += classifier.weight;
sum += classifier.weight;
}
}
}
double[] distributions = new double[instance.numClasses()];
if (sum != 0) {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += (classHist[i] / sum);
}
else{
for (int i = 0; i < classHist.length; ++i)
distributions[i] += 1 / numClasses;
}
return distributions;
}
public static void main(String[] args) throws Exception{
int fold = 0;
//Minimum working example
String dataset = "ItalyPowerDemand";
Instances train = DatasetLoading.loadDataNullable("Z:\\ArchiveData\\Univariate_arff\\"+dataset+"\\"+dataset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("Z:\\ArchiveData\\Univariate_arff\\"+dataset+"\\"+dataset+"_TEST.arff");
Instances[] data = resampleTrainAndTestInstances(train, test, fold);
train = data[0];
test = data[1];
String dataset2 = "ERing";
Instances train2 = DatasetLoading.loadDataNullable("Z:\\ArchiveData\\Multivariate_arff\\"+dataset2+"\\"+dataset2+"_TRAIN.arff");
Instances test2 = DatasetLoading.loadDataNullable("Z:\\ArchiveData\\Multivariate_arff\\"+dataset2+"\\"+dataset2+"_TEST.arff");
Instances[] data2 = resampleMultivariateTrainAndTestInstances(train2, test2, fold);
train2 = data2[0];
test2 = data2[1];
BOSS c;
double accuracy;
c = new BOSS();
c.setEstimateOwnPerformance(true);
c.buildClassifier(train);
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("BOSS accuracy on " + dataset + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new BOSS();
c.setEstimateOwnPerformance(true);
c.buildClassifier(train2);
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("BOSS accuracy on " + dataset2 + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
//Output 02/08/19
/*
CV acc =0.9402985074626866
BOSS accuracy on ItalyPowerDemand fold 0 = 0.9271137026239067 numClassifiers = [4]
CV acc =0.8333333333333334
BOSS accuracy on ERing fold 0 = 0.8333333333333334 numClassifiers = [4, 1, 3, 6]
*/
}
} | 20,308 | 35.526978 | 155 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/BagOfPatternsClassifier.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import experiments.data.DatasetLoading;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.transformers.BagOfPatterns;
import tsml.transformers.SAX;
import utilities.ClassifierTools;
import machine_learning.classifiers.kNN;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.SparseInstance;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformationHandler;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* Converts instances into Bag Of Patterns form, then gives to a 1NN
*
* Params: wordLength, alphabetSize, windowLength
*
* @author James
*/
public class BagOfPatternsClassifier extends EnhancedAbstractClassifier implements TechnicalInformationHandler {
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "J. Lin and R. Khade and Y. Li");
result.setValue(TechnicalInformation.Field.TITLE, "Rotation-invariant similarity in time series using bag-of-patterns representation");
result.setValue(TechnicalInformation.Field.JOURNAL, "Journal of Intelligent Information Systems");
result.setValue(TechnicalInformation.Field.VOLUME, "39");
result.setValue(TechnicalInformation.Field.NUMBER,"2");
result.setValue(TechnicalInformation.Field.PAGES, "287-315");
result.setValue(TechnicalInformation.Field.YEAR, "2012");
return result;
}
public Instances matrix;
public kNN knn;
private BagOfPatterns bop;
private int PAA_intervalsPerWindow;
private int SAX_alphabetSize;
private int windowSize;
private List<String> alphabet;
private final boolean useParamSearch; //does user want parameter search to be performed
/**
* No params given, do parameter search
*/
public BagOfPatternsClassifier() {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.PAA_intervalsPerWindow = -1;
this.SAX_alphabetSize = -1;
this.windowSize = -1;
knn = new kNN(); //defaults to 1NN, Euclidean distance
useParamSearch=true;
}
/**
* Params given, use those only
*/
public BagOfPatternsClassifier(int PAA_intervalsPerWindow, int SAX_alphabetSize, int windowSize) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.PAA_intervalsPerWindow = PAA_intervalsPerWindow;
this.SAX_alphabetSize = SAX_alphabetSize;
this.windowSize = windowSize;
bop = new BagOfPatterns(PAA_intervalsPerWindow, SAX_alphabetSize, windowSize);
knn = new kNN(); //default to 1NN, Euclidean distance
alphabet = SAX.getAlphabet(SAX_alphabetSize);
useParamSearch=false;
}
public int getPAA_intervalsPerWindow() {
return PAA_intervalsPerWindow;
}
public int getSAX_alphabetSize() {
return SAX_alphabetSize;
}
public int getWindowSize() {
return windowSize;
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize }
*/
public int[] getParameterArray() {
return new int[] { PAA_intervalsPerWindow, SAX_alphabetSize, windowSize};
}
/**
* Performs cross validation on given data for varying parameter values, returns
* parameter set which yielded greatest accuracy
*
* @param data Data to perform cross validation testing on
* @return { numIntervals, alphabetSize, slidingWindowSize }
*/
public static int[] parameterSearch(Instances data) throws Exception {
//BoP paper window search range suggestion
int minWinSize = (int)((data.numAttributes()-1) * (15.0/100.0));
int maxWinSize = (int)((data.numAttributes()-1) * (36.0/100.0));
// int winInc = 1; //check every size in range
int winInc = (int)((maxWinSize - minWinSize) / 10.0); //check 10 values within that range
if (winInc < 1) winInc = 1;
double bestAcc = 0.0;
//default to min of each para range
//this (so far) matters only to the TSC2018 data set Fungi, where
//the train set consists of one instance from each class, making it
//impossible to correctly classify using nearest neighbour
int bestAlpha = 2, bestWord = 2, bestWindowSize = minWinSize;
for (int alphaSize = 2; alphaSize <= 8; alphaSize++) {
for (int winSize = minWinSize; winSize <= maxWinSize; winSize+=winInc) {
for (int wordSize = 2; wordSize <= winSize/2; wordSize*=2) { //lin BoP suggestion
BagOfPatternsClassifier bop = new BagOfPatternsClassifier(wordSize, alphaSize, winSize);
double acc = bop.crossValidate(data); //leave-one-out without rebuiding every fold
if (acc > bestAcc) {
bestAcc = acc;
bestAlpha = alphaSize;
bestWord = wordSize;
bestWindowSize = winSize;
}
}
}
}
return new int[] { bestWord, bestAlpha, bestWindowSize};
}
/**
* Leave-one-out CV without re-doing identical transformation every fold
*
* @return cv accuracy
*/
private double crossValidate(Instances data) throws Exception {
buildClassifier(data);
double correct = 0;
for (int i = 0; i < data.numInstances(); ++i)
if (classifyInstance(i) == data.get(i).classValue())
++correct;
return correct / data.numInstances();
}
@Override
public void buildClassifier(final Instances data) throws Exception {
long startTime = System.nanoTime();
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("LinBoP_BuildClassifier: Class attribute not set as last attribute in dataset");
if (useParamSearch) {
//find and set params
int[] params = parameterSearch(data);
this.PAA_intervalsPerWindow = params[0];
this.SAX_alphabetSize = params[1];
this.windowSize = params[2];
bop = new BagOfPatterns(PAA_intervalsPerWindow, SAX_alphabetSize, windowSize);
alphabet = SAX.getAlphabet(SAX_alphabetSize);
}
//validate
if (PAA_intervalsPerWindow<0)
throw new Exception("LinBoP_BuildClassifier: Invalid PAA word size: " + PAA_intervalsPerWindow);
if (PAA_intervalsPerWindow>windowSize)
throw new Exception("LinBoP_BuildClassifier: Invalid PAA word size, bigger than sliding window size: "
+ PAA_intervalsPerWindow + "," + windowSize);
if (SAX_alphabetSize<0 || SAX_alphabetSize>10)
throw new Exception("LinBoP_BuildClassifier: Invalid SAX alphabet size (valid=2-10): " + SAX_alphabetSize);
if (windowSize<0 || windowSize>data.numAttributes()-1)
throw new Exception("LinBoP_BuildClassifier: Invalid sliding window size: "
+ windowSize + " (series length "+ (data.numAttributes()-1) + ")");
//real work
matrix = bop.fitTransform(data); //transform
knn.buildClassifier(matrix); //give to 1nn
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime()-startTime);
}
@Override
public double classifyInstance(Instance instance) throws Exception {
return knn.classifyInstance(bop.transform(instance));
}
/**
* Used as part of a leave-one-out crossvalidation, to skip having to rebuild
* the classifier every time (since n-1 histograms would be identical each time anyway), therefore this classifies
* the instance at the index passed while ignoring its own corresponding histogram
*
* @param test index of instance to classify
* @return classification
*/
public double classifyInstance(int test) {
double bestDist = Double.MAX_VALUE;
double nn = -1.0;
Instance testInst = matrix.get(test);
for (int i = 0; i < matrix.numInstances(); ++i) {
if (i == test) //skip 'this' one, leave-one-out
continue;
double dist = knn.distance(testInst, matrix.get(i));
if (dist < bestDist) {
bestDist = dist;
nn = matrix.get(i).classValue();
}
}
return nn;
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
//convert to BOP form
double[] hist = bop.bagToArray(bop.buildBag(instance));
//stuff into Instance
Instances newInsts = new Instances(matrix, 1); //copy attribute data
newInsts.add(new SparseInstance(1.0, hist));
return knn.distributionForInstance(newInsts.firstInstance());
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
sb.append(super.getParameters());
sb.append(",SAXAlphabetSize,").append(getSAX_alphabetSize()).append(",WindowSize,");
sb.append(getWindowSize()).append(",PAAIntervals,").append(getPAA_intervalsPerWindow());
return sb.toString();
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void main(String[] args) throws Exception{
// System.out.println(ClassifierTools.testUtils_getIPDAcc(new BagOfPatterns()));
// System.out.println(ClassifierTools.testUtils_confirmIPDReproduction(new BagOfPatterns(), 0.8425655976676385, "2019_09_26"));
basicTest();
}
public static void basicTest() {
System.out.println("BOPBasicTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\tempbakeoff\\TSC Problems\\Car\\Car_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\tempbakeoff\\TSC Problems\\Car\\Car_TEST.arff");
// Instances train = ClassifierTools.loadDataThrowable("C:\\tempbakeoff\\TSC Problems\\BeetleFly\\BeetleFly_TRAIN.arff");
// Instances test = ClassifierTools.loadDataThrowable("C:\\tempbakeoff\\TSC Problems\\BeetleFly\\BeetleFly_TEST.arff");
System.out.println(train.relationName());
BagOfPatternsClassifier bop = new BagOfPatternsClassifier();
System.out.println("Training starting");
long start = System.nanoTime();
bop.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
System.out.print("Params: ");
for (int p : bop.getParameterArray())
System.out.print(p + " ");
System.out.println("");
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, bop);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
@Override
public String toString() {
return "BagOfPatterns";
}
}
| 12,728 | 37.456193 | 139 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/IndividualBOSS.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import tsml.classifiers.MultiThreadable;
import tsml.classifiers.dictionary_based.bitword.BitWordInt;
import weka.classifiers.AbstractClassifier;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.UnassignedClassException;
import java.io.Serializable;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* BOSS classifier to be used with known parameters, for boss with parameter search, use BOSSEnsemble.
*
* Current implementation of BitWord as of 07/11/2016 only supports alphabetsize of 4, which is the expected value
* as defined in the paper
*
* Params: wordLength, alphabetSize, windowLength, normalise?
*
* @author James Large. Enhanced by original author Patrick Schaefer
*
* Implementation based on the algorithm described in getTechnicalInformation()
*/
public class IndividualBOSS extends AbstractClassifier implements Serializable, Comparable<IndividualBOSS>, MultiThreadable {
//all sfa words found in original buildClassifier(), no numerosity reduction/shortening applied
protected BitWordInt[/*instance*/][/*windowindex*/] SFAwords;
//histograms of words of the current wordlength with numerosity reduction applied (if selected)
protected ArrayList<Bag> bags;
//breakpoints to be found by MCB
protected double[/*letterindex*/][/*breakpointsforletter*/] breakpoints;
protected double inverseSqrtWindowSize;
protected int windowSize;
protected int wordLength;
protected int alphabetSize;
protected boolean norm;
protected boolean numerosityReduction = true;
protected boolean cleanAfterBuild = false;
protected double accuracy = -1;
protected double weight = 1;
protected ArrayList<Integer> subsampleIndices;
protected boolean multiThread = false;
protected int numThreads = 1;
protected ExecutorService ex;
protected int seed = 0;
protected Random rand;
protected static final long serialVersionUID = 22551L;
public IndividualBOSS(int wordLength, int alphabetSize, int windowSize, boolean normalise, boolean multiThread, int numThreads, ExecutorService ex) {
this.wordLength = wordLength;
this.alphabetSize = alphabetSize;
this.windowSize = windowSize;
this.inverseSqrtWindowSize = 1.0 / Math.sqrt(windowSize);
this.norm = normalise;
this.multiThread = multiThread;
this.numThreads = numThreads;
this.ex = ex;
}
public IndividualBOSS(int wordLength, int alphabetSize, int windowSize, boolean normalise) {
this.wordLength = wordLength;
this.alphabetSize = alphabetSize;
this.windowSize = windowSize;
this.inverseSqrtWindowSize = 1.0 / Math.sqrt(windowSize);
this.norm = normalise;
}
/**
* Used when shortening histograms, copies 'meta' data over, but with shorter
* word length, actual shortening happens separately
*/
public IndividualBOSS(IndividualBOSS boss, int wordLength) {
this.wordLength = wordLength;
this.windowSize = boss.windowSize;
this.inverseSqrtWindowSize = boss.inverseSqrtWindowSize;
this.alphabetSize = boss.alphabetSize;
this.norm = boss.norm;
this.numerosityReduction = boss.numerosityReduction;
this.SFAwords = boss.SFAwords;
this.breakpoints = boss.breakpoints;
this.multiThread = boss.multiThread;
this.numThreads = boss.numThreads;
this.ex = boss.ex;
this.seed = boss.seed;
this.rand = boss.rand;
this.bags = new ArrayList<>(boss.bags.size());
}
@Override
public int compareTo(IndividualBOSS o) {
return Double.compare(this.accuracy, o.accuracy);
}
@Override
public void enableMultiThreading(int numThreads) {
this.numThreads = numThreads;
}
public static class Bag extends HashMap<BitWordInt, Integer> {
double classVal;
protected static final long serialVersionUID = 22552L;
public Bag() {
super();
}
public Bag(int classValue) {
super();
classVal = classValue;
}
public double getClassVal() { return classVal; }
public void setClassVal(double classVal) { this.classVal = classVal; }
}
public int getWindowSize() { return windowSize; }
public int getWordLength() { return wordLength; }
public int getAlphabetSize() { return alphabetSize; }
public boolean isNorm() { return norm; }
public ArrayList<Bag> getBags() { return bags; }
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize, normalise? }
*/
public int[] getParameters() {
return new int[] { wordLength, alphabetSize, windowSize };
}
public void setSeed(int i){ seed = i; }
public void clean() {
SFAwords = null;
}
protected double[][] performDFT(double[][] windows) {
double[][] dfts = new double[windows.length][wordLength];
for (int i = 0; i < windows.length; ++i) {
dfts[i] = DFT(windows[i]);
}
return dfts;
}
protected double stdDev(double[] series) {
double sum = 0.0;
double squareSum = 0.0;
for (int i = 0; i < windowSize; i++) {
sum += series[i];
squareSum += series[i]*series[i];
}
double mean = sum / series.length;
double variance = squareSum / series.length - mean*mean;
return variance > 0 ? Math.sqrt(variance) : 1.0;
}
protected double[] DFT(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
//only calculating first wordlength/2 coefficients (output values),
//and skipping first coefficient if the data is to be normalised
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
//normalize the disjoint windows and sliding windows by dividing them by their standard deviation
//all Fourier coefficients are divided by sqrt(windowSize)
double normalisingFactor = inverseSqrtWindowSize / stdDev(series);
double[] dft=new double[outputLength*2];
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(2*Math.PI * t * k / n);
sumimag += -series[t]*Math.sin(2*Math.PI * t * k / n);
}
dft[(k-start)*2] = sumreal * normalisingFactor;
dft[(k-start)*2+1] = sumimag * normalisingFactor;
}
return dft;
}
private double[] DFTunnormed(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
//only calculating first wordlength/2 coefficients (output values),
//and skipping first coefficient if the data is to be normalised
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
double[] dft = new double[outputLength*2];
double twoPi = 2*Math.PI / n;
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(twoPi * t * k);
sumimag += -series[t]*Math.sin(twoPi * t * k);
}
dft[(k-start)*2] = sumreal;
dft[(k-start)*2+1] = sumimag;
}
return dft;
}
private double[] normalizeDFT(double[] dft, double std) {
double normalisingFactor = (std > 0? 1.0 / std : 1.0) * inverseSqrtWindowSize;
for (int i = 0; i < dft.length; i++)
dft[i] *= normalisingFactor;
return dft;
}
private double[][] performMFT(double[] series) {
// ignore DC value?
int startOffset = norm ? 2 : 0;
int l = wordLength;
l = l + l % 2; // make it even
double[] phis = new double[l];
for (int u = 0; u < phis.length; u += 2) {
double uHalve = -(u + startOffset) / 2;
phis[u] = realephi(uHalve, windowSize);
phis[u + 1] = complexephi(uHalve, windowSize);
}
// means and stddev for each sliding window
int end = Math.max(1, series.length - windowSize + 1);
double[] means = new double[end];
double[] stds = new double[end];
calcIncrementalMeanStddev(windowSize, series, means, stds);
// holds the DFT of each sliding window
double[][] transformed = new double[end][];
double[] mftData = null;
for (int t = 0; t < end; t++) {
// use the MFT
if (t > 0) {
for (int k = 0; k < l; k += 2) {
double real1 = (mftData[k] + series[t + windowSize - 1] - series[t - 1]);
double imag1 = (mftData[k + 1]);
double real = complexMulReal(real1, imag1, phis[k], phis[k + 1]);
double imag = complexMulImag(real1, imag1, phis[k], phis[k + 1]);
mftData[k] = real;
mftData[k + 1] = imag;
}
} // use the DFT for the first offset
else {
mftData = Arrays.copyOf(series, windowSize);
mftData = DFTunnormed(mftData);
}
// normalization for lower bounding
transformed[t] = normalizeDFT(Arrays.copyOf(mftData, l), stds[t]);
}
return transformed;
}
private void calcIncrementalMeanStddev(int windowLength, double[] series, double[] means, double[] stds) {
double sum = 0;
double squareSum = 0;
// it is faster to multiply than to divide
double rWindowLength = 1.0 / (double) windowLength;
double[] tsData = series;
for (int ww = 0; ww < windowLength; ww++) {
sum += tsData[ww];
squareSum += tsData[ww] * tsData[ww];
}
means[0] = sum * rWindowLength;
double buf = squareSum * rWindowLength - means[0] * means[0];
stds[0] = buf > 0 ? Math.sqrt(buf) : 0;
for (int w = 1, end = tsData.length - windowLength + 1; w < end; w++) {
sum += tsData[w + windowLength - 1] - tsData[w - 1];
means[w] = sum * rWindowLength;
squareSum += tsData[w + windowLength - 1] * tsData[w + windowLength - 1] - tsData[w - 1] * tsData[w - 1];
buf = squareSum * rWindowLength - means[w] * means[w];
stds[w] = buf > 0 ? Math.sqrt(buf) : 0;
}
}
private static double complexMulReal(double r1, double im1, double r2, double im2) {
return r1 * r2 - im1 * im2;
}
private static double complexMulImag(double r1, double im1, double r2, double im2) {
return r1 * im2 + r2 * im1;
}
private static double realephi(double u, double M) {
return Math.cos(2 * Math.PI * u / M);
}
private static double complexephi(double u, double M) {
return -Math.sin(2 * Math.PI * u / M);
}
protected double[][] disjointWindows(double [] data) {
int amount = (int)Math.ceil(data.length/(double)windowSize);
double[][] subSequences = new double[amount][windowSize];
for (int win = 0; win < amount; ++win) {
int offset = Math.min(win*windowSize, data.length-windowSize);
//copy the elements windowStart to windowStart+windowSize from data into
//the subsequence matrix at position windowStart
System.arraycopy(data,offset,subSequences[win],0,windowSize);
}
return subSequences;
}
protected double[][] MCB(Instances data) {
double[][][] dfts = new double[data.numInstances()][][];
int sample = 0;
for (Instance inst : data)
dfts[sample++] = performDFT(disjointWindows(toArrayNoClass(inst))); //approximation
int numInsts = dfts.length;
int numWindowsPerInst = dfts[0].length;
int totalNumWindows = numInsts*numWindowsPerInst;
breakpoints = new double[wordLength][alphabetSize];
for (int letter = 0; letter < wordLength; ++letter) { //for each dft coeff
//extract this column from all windows in all instances
double[] column = new double[totalNumWindows];
for (int inst = 0; inst < numInsts; ++inst)
for (int window = 0; window < numWindowsPerInst; ++window) {
//rounding dft coefficients to reduce noise
column[(inst * numWindowsPerInst) + window] = Math.round(dfts[inst][window][letter]*100.0)/100.0;
}
//sort, and run through to find breakpoints for equi-depth bins
Arrays.sort(column);
double binIndex = 0;
double targetBinDepth = (double)totalNumWindows / (double)alphabetSize;
for (int bp = 0; bp < alphabetSize-1; ++bp) {
binIndex += targetBinDepth;
breakpoints[letter][bp] = column[(int)binIndex];
}
breakpoints[letter][alphabetSize-1] = Double.MAX_VALUE; //last one can always = infinity
}
return breakpoints;
}
/**
* Builds a brand new boss bag from the passed fourier transformed data, rather than from
* looking up existing transforms from earlier builds (i.e. SFAWords).
*
* to be used e.g to transform new test instances
*/
protected Bag createBagSingle(double[][] dfts) {
Bag bag = new Bag();
BitWordInt lastWord = new BitWordInt();
for (double[] d : dfts) {
BitWordInt word = createWord(d);
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord))
continue;
Integer val = bag.get(word);
if (val == null)
val = 0;
bag.put(word, ++val);
lastWord = word;
}
return bag;
}
protected BitWordInt createWord(double[] dft) {
BitWordInt word = new BitWordInt();
for (int l = 0; l < wordLength; ++l) //for each letter
for (int bp = 0; bp < alphabetSize; ++bp) //run through breakpoints until right one found
if (dft[l] <= breakpoints[l][bp]) {
word.push(bp); //add corresponding letter to word
break;
}
return word;
}
/**
* @return data of passed instance in a double array with the class value removed if present
*/
protected static double[] toArrayNoClass(Instance inst) {
int length = inst.numAttributes();
if (inst.classIndex() >= 0)
--length;
double[] data = new double[length];
for (int i=0, j=0; i < inst.numAttributes(); ++i)
if (inst.classIndex() != i)
data[j++] = inst.value(i);
return data;
}
/**
* @return BOSSTransform-ed bag, built using current parameters
*/
public Bag BOSSTransform(Instance inst) {
double[][] mfts = performMFT(toArrayNoClass(inst)); //approximation
Bag bag = createBagSingle(mfts); //discretisation/bagging
bag.setClassVal(inst.classValue());
return bag;
}
/**
* Shortens all bags in this BOSS instance (histograms) to the newWordLength, if wordlengths
* are same, instance is UNCHANGED
*
* @param newWordLength wordLength to shorten it to
* @return new boss classifier with newWordLength, or passed in classifier if wordlengths are same
*/
public IndividualBOSS buildShortenedBags(int newWordLength) throws Exception {
if (newWordLength == wordLength) //case of first iteration of word length search in ensemble
return this;
if (newWordLength > wordLength)
throw new Exception("Cannot incrementally INCREASE word length, current:"+wordLength+", requested:"+newWordLength);
if (newWordLength < 2)
throw new Exception("Invalid wordlength requested, current:"+wordLength+", requested:"+newWordLength);
IndividualBOSS newBoss = new IndividualBOSS(this, newWordLength);
//build hists with new word length from SFA words, and copy over the class values of original insts
for (int i = 0; i < bags.size(); ++i) {
Bag newBag = createBagFromWords(newWordLength, SFAwords[i]);
newBag.setClassVal(bags.get(i).getClassVal());
newBoss.bags.add(newBag);
}
return newBoss;
}
/**
* Builds a bag from the set of words for a pre-transformed series of a given wordlength.
*/
protected Bag createBagFromWords(int thisWordLength, BitWordInt[] words) {
Bag bag = new Bag();
BitWordInt lastWord = new BitWordInt();
for (BitWordInt w : words) {
BitWordInt word = new BitWordInt(w);
if (wordLength != thisWordLength)
word.shorten(BitWordInt.MAX_LENGTH-thisWordLength);
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord))
continue;
Integer val = bag.get(word);
if (val == null)
val = 0;
bag.put(word, ++val);
lastWord = word;
}
return bag;
}
protected BitWordInt[] createSFAwords(Instance inst) {
double[][] dfts = performMFT(toArrayNoClass(inst)); //approximation
BitWordInt[] words = new BitWordInt[dfts.length];
for (int window = 0; window < dfts.length; ++window)
words[window] = createWord(dfts[window]);//discretisation
return words;
}
@Override
public void buildClassifier(Instances data) throws Exception {
if (data.classIndex() != -1 && data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSS_BuildClassifier: Class attribute not set as last attribute in dataset");
breakpoints = MCB(data); //breakpoints to be used for making sfa words for train AND test data
SFAwords = new BitWordInt[data.numInstances()][];
bags = new ArrayList<>(data.numInstances());
rand = new Random(seed);
if (multiThread){
if (numThreads == 1) numThreads = Runtime.getRuntime().availableProcessors();
if (ex == null) ex = Executors.newFixedThreadPool(numThreads);
ArrayList<Future<Bag>> futures = new ArrayList<>(data.numInstances());
for (int inst = 0; inst < data.numInstances(); ++inst)
futures.add(ex.submit(new TransformThread(inst, data.get(inst))));
for (Future<Bag> f: futures)
bags.add(f.get());
}
else {
for (int inst = 0; inst < data.numInstances(); ++inst) {
SFAwords[inst] = createSFAwords(data.get(inst));
Bag bag = createBagFromWords(wordLength, SFAwords[inst]);
try {
bag.setClassVal(data.get(inst).classValue());
}
catch(UnassignedClassException e){
bag.setClassVal(-1);
}
bags.add(bag);
}
}
if (cleanAfterBuild) {
clean();
}
}
/**
* Computes BOSS distance between two bags d(test, train), is NON-SYMETRIC operation, ie d(a,b) != d(b,a).
*
* Quits early if the dist-so-far is greater than bestDist (assumed dist is still the squared distance), and returns Double.MAX_VALUE
*
* @return distance FROM instA TO instB, or Double.MAX_VALUE if it would be greater than bestDist
*/
public double BOSSdistance(Bag instA, Bag instB, double bestDist) {
double dist = 0.0;
//find dist only from values in instA
for (Map.Entry<BitWordInt, Integer> entry : instA.entrySet()) {
Integer valA = entry.getValue();
Integer valB = instB.get(entry.getKey());
if (valB == null)
valB = 0;
dist += (valA-valB)*(valA-valB);
if (dist > bestDist)
return Double.MAX_VALUE;
}
return dist;
}
@Override
public double classifyInstance(Instance instance) throws Exception{
IndividualBOSS.Bag testBag = BOSSTransform(instance);
//1NN BOSS distance
double bestDist = Double.MAX_VALUE;
double nn = -1;
for (int i = 0; i < bags.size(); ++i) {
double dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
/**
* Used within BOSSEnsemble as part of a leave-one-out crossvalidation, to skip having to rebuild
* the classifier every time (since the n histograms would be identical each time anyway), therefore this classifies
* the instance at the index passed while ignoring its own corresponding histogram
*
* @param testIndex index of instance to classify
* @return classification
*/
public double classifyInstance(int testIndex) throws Exception{
IndividualBOSS.Bag testBag = bags.get(testIndex);
//1NN BOSS distance
double bestDist = Double.MAX_VALUE;
double nn = -1;
for (int i = 0; i < bags.size(); ++i) {
if (i == testIndex) //skip 'this' one, leave-one-out
continue;
double dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
public class TestNearestNeighbourThread implements Callable<Double>{
Instance inst;
public TestNearestNeighbourThread(Instance inst){
this.inst = inst;
}
@Override
public Double call() {
IndividualBOSS.Bag testBag = BOSSTransform(inst);
//1NN BOSS distance
double bestDist = Double.MAX_VALUE;
double nn = -1;
for (int i = 0; i < bags.size(); ++i) {
double dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
}
public class TrainNearestNeighbourThread implements Callable<Double>{
int testIndex;
public TrainNearestNeighbourThread(int testIndex){
this.testIndex = testIndex;
}
@Override
public Double call() {
IndividualBOSS.Bag testBag = bags.get(testIndex);
//1NN BOSS distance
double bestDist = Double.MAX_VALUE;
double nn = -1;
for (int i = 0; i < bags.size(); ++i) {
if (i == testIndex) //skip 'this' one, leave-one-out
continue;
double dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
}
private class TransformThread implements Callable<Bag>{
int i;
Instance inst;
public TransformThread(int i, Instance inst){
this.i = i;
this.inst = inst;
}
@Override
public Bag call() {
SFAwords[i] = createSFAwords(inst);
Bag bag = createBagFromWords(wordLength, SFAwords[i]);
try {
bag.setClassVal(inst.classValue());
}
catch(UnassignedClassException e){
bag.setClassVal(-1);
}
return bag;
}
}
}
| 25,353 | 34.019337 | 153 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/IndividualTDE.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import com.carrotsearch.hppc.*;
import com.carrotsearch.hppc.cursors.*;
import evaluation.storage.ClassifierResults;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.classifiers.MultiThreadable;
import tsml.classifiers.dictionary_based.bitword.BitWord;
import tsml.classifiers.dictionary_based.bitword.BitWordInt;
import tsml.classifiers.dictionary_based.bitword.BitWordLong;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import utilities.generic_storage.SerialisableComparablePair;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.UnassignedClassException;
import java.util.*;
import java.util.concurrent.*;
/**
* Improved BOSS classifier to be used with known parameters, for ensemble use TDE.
*
* Current implementation of BitWord as of 18/03/2020 only supports alphabetsize of 4, which is the expected value
* as defined in the original BOSS paper
*
* Params: wordLength, alphabetSize, windowLength, normalise, levels, IGB
*
* @author Matthew Middlehurst
*/
public class IndividualTDE extends EnhancedAbstractClassifier implements Comparable<IndividualTDE>,
MultiThreadable {
//all sfa words found in original buildClassifier(), no numerosity reduction/shortening applied
private BitWord[/*instance*/][/*windowindex*/] SFAwords;
//histograms of words of the current wordlength with numerosity reduction applied (if selected)
private ArrayList<Bag> bags;
//breakpoints to be found by MCB or IGB
private double[/*letterindex*/][/*breakpointsforletter*/] breakpoints;
protected int windowSize;
protected int wordLength;
protected int alphabetSize;
protected boolean norm;
protected int levels;
protected boolean IGB;
protected boolean histogramIntersection = true;
protected boolean useBigrams = true;
protected boolean useFeatureSelection = false;
protected double levelWeighting = 0.5;
protected boolean numerosityReduction = true;
protected double inverseSqrtWindowSize;
protected boolean cleanAfterBuild = false;
protected int seriesLength;
//feature selection
private ObjectHashSet<SerialisableComparablePair<BitWord, Byte>> chiSquare;
protected int chiLimit = 2;
protected int ensembleID = -1;
protected double accuracy = -1;
protected double weight = 1;
protected ArrayList<Integer> subsampleIndices;
protected ArrayList<Integer> trainPreds;
protected boolean multiThread = false;
protected int numThreads = 1;
protected ExecutorService ex;
private boolean savePredInfo = false;
private int lastNNIdx;
private Bag lastNNBag;
private static final long serialVersionUID = 2L;
public IndividualTDE(int wordLength, int alphabetSize, int windowSize, boolean normalise, int levels, boolean IGB,
boolean multiThread, int numThreads, ExecutorService ex) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.wordLength = wordLength;
this.alphabetSize = alphabetSize;
this.windowSize = windowSize;
this.inverseSqrtWindowSize = 1.0 / Math.sqrt(windowSize);
this.norm = normalise;
this.levels = levels;
this.IGB = IGB;
this.multiThread = multiThread;
this.numThreads = numThreads;
this.ex = ex;
}
public IndividualTDE(int wordLength, int alphabetSize, int windowSize, boolean normalise, int levels, boolean IGB) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.wordLength = wordLength;
this.alphabetSize = alphabetSize;
this.windowSize = windowSize;
this.inverseSqrtWindowSize = 1.0 / Math.sqrt(windowSize);
this.norm = normalise;
this.levels = levels;
this.IGB = IGB;
}
/**
* Used when shortening histograms, copies 'meta' data over, but with shorter
* word length, actual shortening happens separately
*/
public IndividualTDE(IndividualTDE boss, int wordLength) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.wordLength = wordLength;
this.windowSize = boss.windowSize;
this.inverseSqrtWindowSize = boss.inverseSqrtWindowSize;
this.alphabetSize = boss.alphabetSize;
this.norm = boss.norm;
this.levels = boss.levels;
this.IGB = boss.IGB;
this.histogramIntersection = boss.histogramIntersection;
this.useBigrams = boss.useBigrams;
this.useFeatureSelection = boss.useFeatureSelection;
this.levelWeighting = boss.levelWeighting;
this.numerosityReduction = boss.numerosityReduction;
this.cleanAfterBuild = boss.cleanAfterBuild;
this.seriesLength = boss.seriesLength;
this.multiThread = boss.multiThread;
this.numThreads = boss.numThreads;
this.ex = boss.ex;
this.seed = boss.seed;
this.rand = boss.rand;
this.numClasses = boss.numClasses;
if (!(boss instanceof MultivariateIndividualTDE)) {
this.SFAwords = boss.SFAwords;
this.bags = new ArrayList<>(boss.bags.size());
this.breakpoints = boss.breakpoints;
}
}
@Override
public int compareTo(IndividualTDE o) {
return Double.compare(this.accuracy, o.accuracy);
}
@Override
public void enableMultiThreading(int numThreads) {
this.numThreads = numThreads;
}
//map of <word, level> => count
public static class Bag extends HashMap<SerialisableComparablePair<BitWord, Byte>, Integer> {
private int classVal;
public Bag() {
super();
}
public Bag(int classValue) {
super();
classVal = classValue;
}
public int getClassVal() { return classVal; }
public void setClassVal(int classVal) { this.classVal = classVal; }
}
public int getWindowSize() { return windowSize; }
public int getWordLength() { return wordLength; }
public int getAlphabetSize() { return alphabetSize; }
public boolean getNorm() { return norm; }
public int getLevels() { return levels; }
public boolean getIGB() { return IGB; }
public ArrayList<Bag> getBags() { return bags; }
public int getEnsembleID() { return ensembleID; }
public double getAccuracy() { return accuracy; }
public double getWeight() { return weight; }
public ArrayList<Integer> getSubsampleIndices() { return subsampleIndices; }
public ArrayList<Integer> getTrainPreds() { return trainPreds; }
public double[][] getBreakpoints() { return breakpoints; }
public int getLastNNIdx() { return lastNNIdx; }
public Bag getLastNNBag() { return lastNNBag; }
public void setSeed(int i){ seed = i; }
public void setCleanAfterBuild(boolean b){ cleanAfterBuild = b; }
public void setEnsembleID(int i) { ensembleID = i; }
public void setAccuracy(double d) { accuracy = d; }
public void setWeight(double d) { weight = d; }
public void setSubsampleIndices(ArrayList<Integer> arr) { subsampleIndices = arr; }
public void setTrainPreds(ArrayList<Integer> arr) { trainPreds = arr; }
public void setHistogramIntersection(boolean b) { histogramIntersection = b; }
public void setUseBigrams(boolean b) { useBigrams = b; }
public void setUseFeatureSelection(boolean b) { useFeatureSelection = b; }
public void clean() {
SFAwords = null;
}
protected double[][] performDFT(double[][] windows) {
double[][] dfts = new double[windows.length][wordLength];
for (int i = 0; i < windows.length; ++i) {
dfts[i] = DFT(windows[i]);
}
return dfts;
}
protected double stdDev(double[] series) {
double sum = 0.0;
double squareSum = 0.0;
for (int i = 0; i < windowSize; i++) {
sum += series[i];
squareSum += series[i]*series[i];
}
double mean = sum / series.length;
double variance = squareSum / series.length - mean*mean;
return variance > 0 ? Math.sqrt(variance) : 1.0;
}
protected double[] DFT(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
//only calculating first wordlength/2 coefficients (output values),
//and skipping first coefficient if the data is to be normalised
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
//normalize the disjoint windows and sliding windows by dividing them by their standard deviation
//all Fourier coefficients are divided by sqrt(windowSize)
double normalisingFactor = inverseSqrtWindowSize / stdDev(series);
double[] dft=new double[outputLength*2];
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(2*Math.PI * t * k / n);
sumimag += -series[t]*Math.sin(2*Math.PI * t * k / n);
}
dft[(k-start)*2] = sumreal * normalisingFactor;
dft[(k-start)*2+1] = sumimag * normalisingFactor;
}
return dft;
}
protected double[] DFTunnormed(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
//only calculating first wordlength/2 coefficients (output values),
//and skipping first coefficient if the data is to be normalised
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
double[] dft = new double[outputLength*2];
double twoPi = 2*Math.PI / n;
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(twoPi * t * k);
sumimag += -series[t]*Math.sin(twoPi * t * k);
}
dft[(k-start)*2] = sumreal;
dft[(k-start)*2+1] = sumimag;
}
return dft;
}
protected double[] normalizeDFT(double[] dft, double std) {
double normalisingFactor = (std > 0 ? 1.0 / std : 1.0) * inverseSqrtWindowSize;
for (int i = 0; i < dft.length; i++)
dft[i] *= normalisingFactor;
return dft;
}
protected double[][] performMFT(double[] series) {
// ignore DC value?
int startOffset = norm ? 2 : 0;
int l = wordLength;
l = l + l % 2; // make it even
double[] phis = new double[l];
for (int u = 0; u < phis.length; u += 2) {
double uHalve = -(u + startOffset) / 2; //intentional int
phis[u] = realephi(uHalve, windowSize);
phis[u + 1] = complexephi(uHalve, windowSize);
}
// means and stddev for each sliding window
int end = Math.max(1, series.length - windowSize + 1);
double[] means = new double[end];
double[] stds = new double[end];
calcIncrementalMeanStddev(windowSize, series, means, stds);
// holds the DFT of each sliding window
double[][] transformed = new double[end][];
double[] mftData = null;
for (int t = 0; t < end; t++) {
// use the MFT
if (t > 0) {
for (int k = 0; k < l; k += 2) {
double real1 = (mftData[k] + series[t + windowSize - 1] - series[t - 1]);
double imag1 = (mftData[k + 1]);
double real = complexMulReal(real1, imag1, phis[k], phis[k + 1]);
double imag = complexMulImag(real1, imag1, phis[k], phis[k + 1]);
mftData[k] = real;
mftData[k + 1] = imag;
}
} // use the DFT for the first offset
else {
mftData = Arrays.copyOf(series, windowSize);
mftData = DFTunnormed(mftData);
}
// normalization for lower bounding
transformed[t] = normalizeDFT(Arrays.copyOf(mftData, l), stds[t]);
}
return transformed;
}
protected void calcIncrementalMeanStddev(int windowLength, double[] series, double[] means, double[] stds) {
double sum = 0;
double squareSum = 0;
// it is faster to multiply than to divide
double rWindowLength = 1.0 / (double) windowLength;
for (int ww = 0; ww < windowLength; ww++) {
sum += series[ww];
squareSum += series[ww] * series[ww];
}
means[0] = sum * rWindowLength;
double buf = squareSum * rWindowLength - means[0] * means[0];
stds[0] = buf > 0 ? Math.sqrt(buf) : 0;
for (int w = 1, end = series.length - windowLength + 1; w < end; w++) {
sum += series[w + windowLength - 1] - series[w - 1];
means[w] = sum * rWindowLength;
squareSum += series[w + windowLength - 1] * series[w + windowLength - 1] - series[w - 1] * series[w - 1];
buf = squareSum * rWindowLength - means[w] * means[w];
stds[w] = buf > 0 ? Math.sqrt(buf) : 0;
}
}
protected static double complexMulReal(double r1, double im1, double r2, double im2) { return r1 * r2 - im1 * im2; }
protected static double complexMulImag(double r1, double im1, double r2, double im2) { return r1 * im2 + r2 * im1; }
protected static double realephi(double u, double M) { return Math.cos(2 * Math.PI * u / M); }
protected static double complexephi(double u, double M) { return -Math.sin(2 * Math.PI * u / M); }
protected double[][] disjointWindows(double[] data) {
int amount = (int)Math.ceil(data.length/(double)windowSize);
double[][] subSequences = new double[amount][windowSize];
for (int win = 0; win < amount; ++win) {
int offset = Math.min(win*windowSize, data.length-windowSize);
//copy the elements windowStart to windowStart+windowSize from data into
//the subsequence matrix at position windowStart
System.arraycopy(data,offset,subSequences[win],0,windowSize);
}
return subSequences;
}
private double[][] MCB(double[][][] data, int d) {
double[][][] dfts = new double[data.length][][];
int sample = 0;
for (int i = 0; i < data.length; i++) {
double[][] windows = disjointWindows(data[i][d]);
dfts[sample++] = performDFT(windows); //approximation
}
int numInsts = dfts.length;
int numWindowsPerInst = dfts[0].length;
int totalNumWindows = numInsts*numWindowsPerInst;
double[][] breakpoints = new double[wordLength][alphabetSize];
for (int letter = 0; letter < wordLength; ++letter) { //for each dft coeff
//extract this column from all windows in all instances
double[] column = new double[totalNumWindows];
for (int inst = 0; inst < numInsts; ++inst)
for (int window = 0; window < numWindowsPerInst; ++window) {
//rounding dft coefficients to reduce noise
column[(inst * numWindowsPerInst) + window] = Math.round(dfts[inst][window][letter]*100.0)/100.0;
}
//sort, and run through to find breakpoints for equi-depth bins
Arrays.sort(column);
double binIndex = 0;
double targetBinDepth = (double)totalNumWindows / (double)alphabetSize;
for (int bp = 0; bp < alphabetSize-1; ++bp) {
binIndex += targetBinDepth;
breakpoints[letter][bp] = column[(int)binIndex];
}
breakpoints[letter][alphabetSize-1] = Double.MAX_VALUE; //last one can always = infinity
}
return breakpoints;
}
//IGB code by Patrick Schafer from the WEASEL class
private double[][] IGB(double[][][] data, int d, int[] labels) {
ArrayList<SerialisableComparablePair<Double,Integer>>[] orderline = new ArrayList[wordLength];
for (int i = 0; i < orderline.length; i++) {
orderline[i] = new ArrayList<>();
}
for (int i = 0; i < data.length; i++) {
double[][] windows = disjointWindows(data[i][d]);
double[][] dfts = performDFT(windows); //approximation
for (double[] dft : dfts) {
for (int n = 0; n < dft.length; n++) {
// round to 2 decimal places to reduce noise
double value = Math.round(dft[n] * 100.0) / 100.0;
orderline[n].add(new SerialisableComparablePair<>(value, labels[i]));
}
}
}
double[][] breakpoints = new double[wordLength][alphabetSize];
for (int i = 0; i < orderline.length; i++) {
if (!orderline[i].isEmpty()) {
Collections.sort(orderline[i]);
ArrayList<Integer> splitPoints = new ArrayList<>();
findBestSplit(orderline[i], 0, orderline[i].size(), alphabetSize, splitPoints);
Collections.sort(splitPoints);
for (int n = 0; n < splitPoints.size(); n++) {
breakpoints[i][n] = orderline[i].get(splitPoints.get(n) + 1).var1;
}
breakpoints[i][alphabetSize-1] = Double.MAX_VALUE;
}
}
return breakpoints;
}
protected void findBestSplit(List<SerialisableComparablePair<Double,Integer>> element, int start, int end,
int remainingSymbols, List<Integer> splitPoints) {
double bestGain = -1;
int bestPos = -1;
// class entropy
IntIntHashMap cIn = new IntIntHashMap();
IntIntHashMap cOut = new IntIntHashMap();
for (int pos = start; pos < end; pos++) {
cOut.putOrAdd(element.get(pos).var2, 1, 1);
}
double class_entropy = entropy(cOut, end - start);
int lastLabel = element.get(start).var2;
moveElement(element, cIn, cOut, start);
for (int split = start + 1; split < end - 1; split++) {
int label = element.get(split).var2;
moveElement(element, cIn, cOut, split);
// only inspect changes of the label
if (label != lastLabel) {
double gain = calculateInformationGain(cIn, cOut, class_entropy);
gain = Math.round(gain * 1000.0) / 1000.0; // round for 4 decimal places
if (gain >= bestGain) {
bestPos = split;
bestGain = gain;
}
}
lastLabel = label;
}
if (bestPos > -1) {
splitPoints.add(bestPos);
// recursive split
remainingSymbols = remainingSymbols / 2;
if (remainingSymbols > 1) {
if (bestPos - start > 2 && end - bestPos > 2) { // enough data points?
findBestSplit(element, start, bestPos, remainingSymbols, splitPoints);
findBestSplit(element, bestPos, end, remainingSymbols, splitPoints);
} else if (end - bestPos > 4) { // enough data points?
findBestSplit(element, bestPos, (end - bestPos) / 2, remainingSymbols, splitPoints);
findBestSplit(element, (end - bestPos) / 2, end, remainingSymbols, splitPoints);
} else if (bestPos - start > 4) { // enough data points?
findBestSplit(element, start, (bestPos - start) / 2, remainingSymbols, splitPoints);
findBestSplit(element, (bestPos - start) / 2, end, remainingSymbols, splitPoints);
}
}
}
}
protected double entropy(IntIntHashMap frequency, double total) {
double entropy = 0;
double log2 = 0.6931471805599453;
for (IntCursor element : frequency.values()) {
double p = element.value / total;
if (p > 0) {
entropy -= p * Math.log(p) * log2;
}
}
return entropy;
}
protected double calculateInformationGain(IntIntHashMap cIn, IntIntHashMap cOut,
double class_entropy) {
double total_c_in = cIn.size();
double total_c_out = cOut.size();
double total = total_c_in + total_c_out;
return class_entropy
- total_c_in / total * entropy(cIn, total_c_in)
- total_c_out / total * entropy(cOut, total_c_out);
}
protected void moveElement(List<SerialisableComparablePair<Double,Integer>> element, IntIntHashMap cIn,
IntIntHashMap cOut, int pos) {
cIn.putOrAdd(element.get(pos).var2, 1, 1);
cOut.putOrAdd(element.get(pos).var2, -1, -1);
}
private void trainChiSquared() {
// Chi2 Test
ObjectIntHashMap<SerialisableComparablePair<BitWord, Byte>> featureCount
= new ObjectIntHashMap<>(bags.get(0).size());
DoubleDoubleHashMap classProb = new DoubleDoubleHashMap(10);
DoubleObjectHashMap<ObjectIntHashMap<SerialisableComparablePair<BitWord, Byte>>> observed
= new DoubleObjectHashMap<>(bags.get(0).size());
// count number of samples with this word
for (Bag bag : bags) {
if (!observed.containsKey(bag.classVal)) {
observed.put(bag.classVal, new ObjectIntHashMap<>());
}
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> word : bag.entrySet()) {
if (word.getValue() > 0) {
featureCount.putOrAdd(word.getKey(), 1, 1);
observed.get(bag.classVal).putOrAdd(word.getKey(), 1, 1);
}
}
classProb.putOrAdd(bag.classVal, 1, 1);
}
// chi-squared: observed minus expected occurrence
chiSquare = new ObjectHashSet<>(featureCount.size());
for (DoubleDoubleCursor classLabel : classProb) {
classLabel.value /= bags.size();
if (observed.get(classLabel.key) != null) {
ObjectIntHashMap<SerialisableComparablePair<BitWord, Byte>> observe = observed.get(classLabel.key);
for (ObjectIntCursor<SerialisableComparablePair<BitWord, Byte>> feature : featureCount) {
double expected = classLabel.value * feature.value;
double chi = observe.get(feature.key) - expected;
double newChi = chi * chi / expected;
if (newChi >= chiLimit && !chiSquare.contains(feature.key)) {
chiSquare.add(feature.key);
}
}
}
}
// best elements above limit
for (int i = 0; i < bags.size(); i++) {
Bag newBag = new Bag(bags.get(i).classVal);
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> cursor : bags.get(i).entrySet()) {
if (chiSquare.contains(cursor.getKey())) {
newBag.put(cursor.getKey(), cursor.getValue());
}
}
bags.set(i, newBag);
}
}
private Bag filterChiSquared(Bag bag) {
Bag newBag = new Bag(bag.classVal);
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> cursor : bag.entrySet()) {
if (chiSquare.contains(cursor.getKey())) {
newBag.put(cursor.getKey(), cursor.getValue());
}
}
return newBag;
}
/**
* Builds a brand new boss bag from the passed fourier transformed data, rather than from
* looking up existing transforms from earlier builds (i.e. SFAWords).
*
* to be used e.g to transform new test instances
*/
private Bag createSPBagSingle(double[][] dfts) {
Bag bag = new Bag();
BitWord lastWord = new BitWordInt();
BitWord[] words = new BitWord[dfts.length];
int wInd = 0;
int trivialMatchCount = 0;
for (double[] d : dfts) {
BitWord word = createWord(d);
words[wInd] = word;
if (useBigrams) {
if (wInd - windowSize >= 0) {
BitWord bigram = new BitWordLong(words[wInd - windowSize], word);
SerialisableComparablePair<BitWord, Byte> key = new SerialisableComparablePair<>(bigram, (byte) -1);
bag.merge(key, 1, Integer::sum);
}
}
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position of the elongated pattern to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
applyPyramidWeights(bag);
return bag;
}
private BitWord createWord(double[] dft) {
BitWord word = new BitWordInt();
for (int l = 0; l < wordLength; ++l) //for each letter
for (int bp = 0; bp < alphabetSize; ++bp) //run through breakpoints until right one found
if (dft[l] <= breakpoints[l][bp]) {
word.push(bp); //add corresponding letter to word
break;
}
return word;
}
/**
* @return data of passed instance in a double array with the class value removed if present
*/
protected static double[] toArrayNoClass(Instance inst) {
int length = inst.numAttributes();
if (inst.classIndex() >= 0)
--length;
double[] data = new double[length];
for (int i=0, j=0; i < inst.numAttributes(); ++i)
if (inst.classIndex() != i)
data[j++] = inst.value(i);
return data;
}
/**
* @return BOSSSpatialPyramidsTransform-ed bag, built using current parameters
*/
private Bag BOSSSpatialPyramidsTransform(TimeSeriesInstance inst) {
double[][] mfts = performMFT(inst.toValueArray()[0]); //approximation
Bag bag = createSPBagSingle(mfts); //discretisation/bagging
bag.setClassVal(inst.getLabelIndex());
return bag;
}
/**
* Shortens all bags in this BOSSSpatialPyramids_Redo instance (histograms) to the newWordLength, if wordlengths
* are same, instance is UNCHANGED
*
* @param newWordLength wordLength to shorten it to
* @return new boss classifier with newWordLength, or passed in classifier if wordlengths are same
*/
public IndividualTDE buildShortenedSPBags(int newWordLength) throws Exception {
if (newWordLength == wordLength) //case of first iteration of word length search in ensemble
return this;
if (newWordLength > wordLength)
throw new Exception("Cannot incrementally INCREASE word length, current:"+wordLength+", requested:"
+newWordLength);
if (newWordLength < 2)
throw new Exception("Invalid wordlength requested, current:"+wordLength+", requested:"+newWordLength);
IndividualTDE newBoss = new IndividualTDE(this, newWordLength);
//build hists with new word length from SFA words, and copy over the class values of original insts
for (int i = 0; i < bags.size(); ++i) {
Bag newSPBag = createSPBagFromWords(newWordLength, SFAwords[i]);
newSPBag.setClassVal(bags.get(i).classVal);
newBoss.bags.add(newSPBag);
}
return newBoss;
}
/**
* Builds a bag from the set of words for a pre-transformed series of a given wordlength.
*/
private Bag createSPBagFromWords(int thisWordLength, BitWord[] words) {
Bag bag = new Bag();
BitWord lastWord = new BitWordInt();
BitWord[] newWords = new BitWord[words.length];
int wInd = 0;
int trivialMatchCount = 0; //keeps track of how many words have been the same so far
for (BitWord w : words) {
BitWord word = new BitWordInt(w);
if (wordLength != thisWordLength)
word.shorten(16-thisWordLength); //max word length, no classifier currently uses past 16.
newWords[wInd] = word;
if (useBigrams) {
if (wInd - windowSize >= 0) {
BitWord bigram = new BitWordLong(newWords[wInd - windowSize], word);
SerialisableComparablePair<BitWord, Byte> key = new SerialisableComparablePair<>(bigram, (byte) -1);
bag.merge(key, 1, Integer::sum);
}
}
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
applyPyramidWeights(bag);
return bag;
}
public void changeNumLevels(int newLevels) {
//curently, simply remaking bags from words
//alternatively: un-weight all bags, add(run through SFAwords again)/remove levels, re-weight all
if (newLevels == this.levels)
return;
this.levels = newLevels;
for (int inst = 0; inst < bags.size(); ++inst) {
Bag bag = createSPBagFromWords(wordLength, SFAwords[inst]); //rebuild bag
bag.setClassVal(bags.get(inst).classVal);
bags.set(inst, bag); //overwrite old
}
}
protected void applyPyramidWeights(Bag bag) {
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> ent : bag.entrySet()) {
//find level that this quadrant is on
int quadrant = ent.getKey().var2;
int qEnd = 0;
int level = 0;
while (qEnd < quadrant) {
int numQuadrants = (int)Math.pow(2, ++level);
qEnd+=numQuadrants;
}
//double val = ent.getValue() * (Math.pow(levelWeighting, levels-level-1)); //weighting ^ (levels - level)
int val = ent.getValue() * (int)Math.pow(2,level);
bag.put(ent.getKey(), val);
}
}
private void addWordToPyramid(BitWord word, int wInd, Bag bag) {
int qStart = 0; //for this level, whats the start index for quadrants
//e.g level 0 = 0
// level 1 = 1
// level 2 = 3
for (int l = 0; l < levels; ++l) {
//need to do the cell finding thing in the regular grid
int numQuadrants = (int)Math.pow(2, l);
int quadrantSize = seriesLength / numQuadrants;
int pos = wInd + (windowSize/2); //use the middle of the window as its position
int quadrant = qStart + (pos/quadrantSize);
SerialisableComparablePair<BitWord, Byte> key = new SerialisableComparablePair<>(word, (byte)quadrant);
bag.merge(key, 1, Integer::sum);
qStart += numQuadrants;
}
}
private BitWord[] createSFAwords(double[] inst) {
double[][] dfts = performMFT(inst); //approximation
BitWord[] words = new BitWord[dfts.length];
for (int window = 0; window < dfts.length; ++window) {
words[window] = createWord(dfts[window]);//discretisation
}
return words;
}
@Override
public void buildClassifier(TimeSeriesInstances data) throws Exception {
trainResults = new ClassifierResults();
rand.setSeed(seed);
numClasses = data.numClasses();
trainResults.setEstimatorName(getClassifierName());
trainResults.setParas(getParameters());
trainResults.setBuildTime(System.nanoTime());
double[][][] split = data.toValueArray();
if (IGB) breakpoints = IGB(split, 0, data.getClassIndexes());
else breakpoints = MCB(split, 0); //breakpoints to be used for making sfa words for train
//AND test data
SFAwords = new BitWord[data.numInstances()][];
bags = new ArrayList<>(data.numInstances());
seriesLength = data.getMaxLength();
if (multiThread){
if (numThreads == 1) numThreads = Runtime.getRuntime().availableProcessors();
if (ex == null) ex = Executors.newFixedThreadPool(numThreads);
ArrayList<Future<Bag>> futures = new ArrayList<>(data.numInstances());
for (int inst = 0; inst < data.numInstances(); ++inst)
futures.add(ex.submit(new TransformThread(inst, data.get(inst))));
for (Future<Bag> f: futures)
bags.add(f.get());
}
else {
for (int inst = 0; inst < data.numInstances(); ++inst) {
SFAwords[inst] = createSFAwords(data.get(inst).toValueArray()[0]);
Bag bag = createSPBagFromWords(wordLength, SFAwords[inst]);
bag.setClassVal(data.get(inst).getLabelIndex());
bags.add(bag);
}
}
if (useFeatureSelection) trainChiSquared();
if (cleanAfterBuild) {
clean();
}
//end train time in nanoseconds
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime() - trainResults.getBuildTime());
}
@Override
public void buildClassifier(Instances data) throws Exception {
buildClassifier(Converter.fromArff(data));
}
/**
* Computes BOSS distance between two bags d(test, train), is NON-SYMETRIC operation,
* ie d(a,b) != d(b,a).
*
* Quits early if the dist-so-far is greater than bestDist (assumed is in fact the dist still squared),
* and returns Double.MAX_VALUE
*
* @return distance FROM instA TO instB, or Double.MAX_VALUE if it would be greater than bestDist
*/
public double BOSSdistance(Bag instA, Bag instB, double bestDist) {
double dist = 0.0;
//find dist only from values in instA
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> entry : instA.entrySet()) {
Integer valA = entry.getValue();
Integer valB = instB.get(entry.getKey());
if (valB == null) valB = 1;
dist += (valA-valB)*(valA-valB);
if (dist > bestDist)
return Double.MAX_VALUE;
}
return dist;
}
public double histogramIntersection(Bag instA, Bag instB) {
//min vals of keys that exist in only one of the bags will always be 0
//therefore want to only bother looking at counts of words in both bags
//therefore will simply loop over words in a, skipping those that dont appear in b
//no need to loop over b, since only words missed will be those not in a anyway
double sim = 0.0;
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> entry : instA.entrySet()) {
Integer valA = entry.getValue();
Integer valB = instB.get(entry.getKey());
if (valB == null)
continue;
sim += Math.min(valA,valB);
}
return sim;
}
@Override
public double classifyInstance(TimeSeriesInstance instance) throws Exception{
Bag testBag = BOSSSpatialPyramidsTransform(instance);
if (useFeatureSelection) testBag = filterChiSquared(testBag);
//1NN distance
double bestDist = Double.MAX_VALUE;
int nn = 0;
for (int i = 0; i < bags.size(); ++i) {
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bags.get(i));
else dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = i;
}
}
if (savePredInfo) {
lastNNIdx = subsampleIndices.get(nn);
lastNNBag = testBag;
}
return bags.get(nn).getClassVal();
}
@Override
public double classifyInstance(Instance instance) throws Exception{
return classifyInstance(Converter.fromArff(instance));
}
@Override
public double[] distributionForInstance(TimeSeriesInstance instance) throws Exception{
double pred = classifyInstance(instance);
double[] probs = new double[numClasses];
probs[(int)pred] = 1;
return probs;
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception{
return distributionForInstance(Converter.fromArff(instance));
}
/**
* Used within BOSSEnsemble as part of a leave-one-out crossvalidation, to skip having to rebuild
* the classifier every time (since the n histograms would be identical each time anyway), therefore this classifies
* the instance at the index passed while ignoring its own corresponding histogram
*
* @param testIndex index of instance to classify
* @return classification
*/
public double classifyInstance(int testIndex) throws Exception{
Bag testBag = bags.get(testIndex);
//1NN distance
double bestDist = Double.MAX_VALUE;
int nn = 0;
for (int i = 0; i < bags.size(); ++i) {
if (i == testIndex) //skip 'this' one, leave-one-out
continue;
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bags.get(i));
else dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = i;
}
}
return bags.get(nn).getClassVal();
}
public double[] firstWordVis(TimeSeriesInstance inst, BitWord word) {
double[] dft = performMFT(inst.toValueArray()[0])[0];
word.setWord(createWord(dft).getWord());
word.setLength((byte) wordLength);
return dft;
}
public class TestNearestNeighbourThread implements Callable<Double>{
TimeSeriesInstance inst;
public TestNearestNeighbourThread(TimeSeriesInstance inst){
this.inst = inst;
}
@Override
public Double call() {
Bag testBag = BOSSSpatialPyramidsTransform(inst);
if (useFeatureSelection) testBag = filterChiSquared(testBag);
//1NN distance
double bestDist = Double.MAX_VALUE;
double nn = 0;
for (Bag bag : bags) {
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bag);
else dist = BOSSdistance(testBag, bag, bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bag.classVal;
}
}
return nn;
}
}
public class TrainNearestNeighbourThread implements Callable<Double>{
int testIndex;
public TrainNearestNeighbourThread(int testIndex){
this.testIndex = testIndex;
}
@Override
public Double call() {
Bag testBag = bags.get(testIndex);
//1NN distance
double bestDist = Double.MAX_VALUE;
double nn = 0;
for (int i = 0; i < bags.size(); ++i) {
if (i == testIndex) //skip 'this' one, leave-one-out
continue;
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bags.get(i));
else dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).classVal;
}
}
return nn;
}
}
private class TransformThread implements Callable<Bag>{
int i;
TimeSeriesInstance inst;
public TransformThread(int i, TimeSeriesInstance inst){
this.i = i;
this.inst = inst;
}
@Override
public Bag call() {
SFAwords[i] = createSFAwords(inst.toValueArray()[0]);
Bag bag = createSPBagFromWords(wordLength, SFAwords[i]);
try {
bag.setClassVal(inst.getLabelIndex());
}
catch(UnassignedClassException e){
bag.setClassVal(-1);
}
return bag;
}
}
}
| 42,655 | 36.681979 | 120 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/MultivariateIndividualTDE.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import com.carrotsearch.hppc.DoubleDoubleHashMap;
import com.carrotsearch.hppc.DoubleObjectHashMap;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.DoubleDoubleCursor;
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
import evaluation.storage.ClassifierResults;
import tsml.classifiers.dictionary_based.bitword.BitWord;
import tsml.classifiers.dictionary_based.bitword.BitWordInt;
import tsml.classifiers.dictionary_based.bitword.BitWordLong;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import utilities.generic_storage.SerialisableComparablePair;
import java.io.Serializable;
import java.util.*;
import java.util.concurrent.*;
/**
* Improved BOSS classifier to be used with known parameters, for ensemble use TDE.
*
* Current implementation of BitWord as of 18/03/2020 only supports alphabetsize of 4, which is the expected value
* as defined in the original BOSS paper
*
* Params: wordLength, alphabetSize, windowLength, normalise, levels, IGB
*
* @author Matthew Middlehurst
*/
public class MultivariateIndividualTDE extends IndividualTDE {
//all sfa words found in original buildClassifier(), no numerosity reduction/shortening applied
private BitWord[/*dimension*/][/*instance*/][/*windowindex*/] SFAwords;
//histograms of words of the current wordlength with numerosity reduction applied (if selected)
private ArrayList<BagMV> bags;
//dft transforms for each series found during breakpoint calculation
double[][][][] breakpointDFT;
//breakpoints to be found by MCB or IGB
private double[/*dimension*/][/*letterindex*/][/*breakpointsforletter*/] breakpoints;
//feature selection
private ObjectHashSet<Word> chiSquare;
//dimension selection
private double dimensionCutoffThreshold = 0.85;
private int maxNoDimensions = -1;
private ArrayList<Integer> dimensionSubsample;
private static final long serialVersionUID = 2L;
public MultivariateIndividualTDE(int wordLength, int alphabetSize, int windowSize, boolean normalise, int levels,
boolean IGB, boolean multiThread, int numThreads, ExecutorService ex) {
super(wordLength, alphabetSize, windowSize, normalise, levels, IGB, multiThread, numThreads, ex);
}
public MultivariateIndividualTDE(int wordLength, int alphabetSize, int windowSize, boolean normalise, int levels,
boolean IGB) {
super(wordLength, alphabetSize, windowSize, normalise, levels, IGB);
}
/**
* Used when shortening histograms, copies 'meta' data over, but with shorter
* word length, actual shortening happens separately
*/
public MultivariateIndividualTDE(MultivariateIndividualTDE boss, int wordLength) {
super(boss, wordLength);
this.SFAwords = boss.SFAwords;
this.bags = new ArrayList<>(boss.bags.size());
this.breakpoints = boss.breakpoints;
}
//map of <word, level, dimension> => count
public static class BagMV extends HashMap<Word, Integer> implements Serializable {
private int classVal;
public BagMV() {
super();
}
public BagMV(int classValue) {
super();
classVal = classValue;
}
public int getClassVal() { return classVal; }
public void setClassVal(int classVal) { this.classVal = classVal; }
}
public static class Word implements Serializable {
BitWord word;
byte level;
int dimension;
public Word(BitWord word, byte level, int dimension) {
this.word = word;
this.level = level;
this.dimension = dimension;
}
@Override
public boolean equals(Object o) {
if (o instanceof Word) {
return word.equals(((Word) o).word) && dimension == ((Word) o).dimension && level == ((Word) o).level;
}
return false;
}
@Override
public int hashCode() {
int result = 1;
result = 31 * result + word.hashCode();
result = 31 * result + Byte.hashCode(level);
result = 31 * result + Integer.hashCode(dimension);
return result;
}
@Override
public String toString(){
return "[" + word + "," + level + "," + dimension + "]";
}
}
public ArrayList<BagMV> getMultivariateBags() { return bags; }
public void setDimensionCutoffThreshold(double d) { dimensionCutoffThreshold = d; }
public void setMaxNoDimensions(int i) { maxNoDimensions = i; }
protected double[][] MCB(double[][][] data, int d) {
breakpointDFT[d] = new double[data.length][][];
int sample = 0;
for (int i = 0; i < data.length; i++) {
double[][] windows = disjointWindows(data[i][d]);
breakpointDFT[d][sample++] = performDFT(windows); //approximation
}
int numInsts = breakpointDFT[d].length;
int numWindowsPerInst = breakpointDFT[d][0].length;
int totalNumWindows = numInsts*numWindowsPerInst;
double[][] breakpoints = new double[wordLength][alphabetSize];
for (int letter = 0; letter < wordLength; ++letter) { //for each dft coeff
//extract this column from all windows in all instances
double[] column = new double[totalNumWindows];
for (int inst = 0; inst < numInsts; ++inst)
for (int window = 0; window < numWindowsPerInst; ++window) {
//rounding dft coefficients to reduce noise
column[(inst * numWindowsPerInst) + window] = Math.round(
breakpointDFT[d][inst][window][letter]*100.0)/100.0;
}
//sort, and run through to find breakpoints for equi-depth bins
Arrays.sort(column);
double binIndex = 0;
double targetBinDepth = (double)totalNumWindows / (double)alphabetSize;
for (int bp = 0; bp < alphabetSize-1; ++bp) {
binIndex += targetBinDepth;
breakpoints[letter][bp] = column[(int)binIndex];
}
breakpoints[letter][alphabetSize-1] = Double.MAX_VALUE; //last one can always = infinity
}
return breakpoints;
}
//IGB code by Patrick Schafer from the WEASEL class
protected double[][] IGB(double[][][] data, int d, int[] labels) {
ArrayList<SerialisableComparablePair<Double,Integer>>[] orderline = new ArrayList[wordLength];
for (int i = 0; i < orderline.length; i++) {
orderline[i] = new ArrayList<>();
}
breakpointDFT[d] = new double[data.length][][];
for (int i = 0; i < data.length; i++) {
double[][] windows = disjointWindows(data[i][d]);
breakpointDFT[d][i] = performDFT(windows); //approximation
for (double[] dft : breakpointDFT[d][i]) {
for (int n = 0; n < dft.length; n++) {
// round to 2 decimal places to reduce noise
double value = Math.round(dft[n] * 100.0) / 100.0;
orderline[n].add(new SerialisableComparablePair<>(value, labels[i]));
}
}
}
double[][] breakpoints = new double[wordLength][alphabetSize];
for (int i = 0; i < orderline.length; i++) {
if (!orderline[i].isEmpty()) {
Collections.sort(orderline[i]);
ArrayList<Integer> splitPoints = new ArrayList<>();
findBestSplit(orderline[i], 0, orderline[i].size(), alphabetSize, splitPoints);
Collections.sort(splitPoints);
for (int n = 0; n < splitPoints.size(); n++) {
breakpoints[i][n] = orderline[i].get(splitPoints.get(n) + 1).var1;
}
breakpoints[i][alphabetSize-1] = Double.MAX_VALUE;
}
}
return breakpoints;
}
@Override
public void clean() {
SFAwords = null;
}
private void trainChiSquared() {
// Chi2 Test
ObjectIntHashMap<Word> featureCount
= new ObjectIntHashMap<>(bags.get(0).size());
DoubleDoubleHashMap classProb = new DoubleDoubleHashMap(10);
DoubleObjectHashMap<ObjectIntHashMap<Word>> observed
= new DoubleObjectHashMap<>(bags.get(0).size());
// count number of samples with this word
for (BagMV bag : bags) {
if (!observed.containsKey(bag.classVal)) {
observed.put(bag.classVal, new ObjectIntHashMap<>());
}
for (Map.Entry<Word, Integer> word : bag.entrySet()) {
if (word.getValue() > 0) {
featureCount.putOrAdd(word.getKey(), 1, 1);
observed.get(bag.classVal).putOrAdd(word.getKey(), 1, 1);
}
}
classProb.putOrAdd(bag.classVal, 1, 1);
}
// chi-squared: observed minus expected occurrence
chiSquare = new ObjectHashSet<>(featureCount.size());
for (DoubleDoubleCursor classLabel : classProb) {
classLabel.value /= bags.size();
if (observed.get(classLabel.key) != null) {
ObjectIntHashMap<Word> observe = observed.get(classLabel.key);
for (ObjectIntCursor<Word> feature : featureCount) {
double expected = classLabel.value * feature.value;
double chi = observe.get(feature.key) - expected;
double newChi = chi * chi / expected;
if (newChi >= chiLimit && !chiSquare.contains(feature.key)) {
chiSquare.add(feature.key);
}
}
}
}
// best elements above limit
for (int i = 0; i < bags.size(); i++) {
BagMV newBag = new BagMV(bags.get(i).classVal);
for (Map.Entry<Word, Integer> cursor : bags.get(i).entrySet()) {
if (chiSquare.contains(cursor.getKey())) {
newBag.put(cursor.getKey(), cursor.getValue());
}
}
bags.set(i, newBag);
}
}
private BagMV filterChiSquared(BagMV bag) {
BagMV newBag = new BagMV(bag.classVal);
for (Map.Entry<Word, Integer> cursor : bag.entrySet()) {
if (chiSquare.contains(cursor.getKey())) {
newBag.put(cursor.getKey(), cursor.getValue());
}
}
return newBag;
}
/**
* Builds a brand new boss bag from the passed fourier transformed data, rather than from
* looking up existing transforms from earlier builds (i.e. SFAWords).
*
* to be used e.g to transform new test instances
*/
private void addToSPBagSingle(BagMV bag, double[][] dfts, int dimension) {
BitWord lastWord = new BitWordInt();
BitWord[] words = new BitWord[dfts.length];
int wInd = 0;
int trivialMatchCount = 0;
for (double[] d : dfts) {
BitWord word = createWord(d, dimension);
words[wInd] = word;
if (useBigrams) {
if (wInd - windowSize >= 0) {
BitWord bigram = new BitWordLong(words[wInd - windowSize], word);
Word key = new Word(bigram, (byte) -1, dimension);
bag.merge(key, 1, Integer::sum);
}
}
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position of the elongated pattern to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag, dimension);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
}
private BitWord createWord(double[] dft, int dimension) {
BitWord word = new BitWordInt();
for (int l = 0; l < wordLength; ++l) //for each letter
for (int bp = 0; bp < alphabetSize; ++bp) //run through breakpoints until right one found
if (dft[l] <= breakpoints[dimension][l][bp]) {
word.push(bp); //add corresponding letter to word
break;
}
return word;
}
/**
* @return BOSSSpatialPyramidsTransform-ed bag, built using current parameters
*/
private BagMV BOSSSpatialPyramidsTransform(TimeSeriesInstance inst) {
BagMV bag = new BagMV(inst.getLabelIndex());
double[][] split = inst.toValueArray();
for (Integer d : dimensionSubsample) {
double[][] mfts = performMFT(split[d]); //approximation
addToSPBagSingle(bag, mfts, d); //discretisation/bagging
}
applyPyramidWeights(bag);
return bag;
}
/**
* Shortens all bags in this BOSSSpatialPyramids_Redo instance (histograms) to the newWordLength, if wordlengths
* are same, instance is UNCHANGED
*
* @param newWordLength wordLength to shorten it to
* @return new boss classifier with newWordLength, or passed in classifier if wordlengths are same
*/
@Override
public MultivariateIndividualTDE buildShortenedSPBags(int newWordLength) throws Exception {
if (newWordLength == wordLength) //case of first iteration of word length search in ensemble
return this;
if (newWordLength > wordLength)
throw new Exception("Cannot incrementally INCREASE word length, current:"+wordLength+", requested:"
+newWordLength);
if (newWordLength < 2)
throw new Exception("Invalid wordlength requested, current:"+wordLength+", requested:"+newWordLength);
MultivariateIndividualTDE newBoss = new MultivariateIndividualTDE(this, newWordLength);
//build hists with new word length from SFA words, and copy over the class values of original insts
for (int i = 0; i < bags.size(); ++i) {
BagMV newSPBag = new BagMV(bags.get(i).classVal);
for (int d = 0; d < SFAwords.length; d++) {
addWordsToSPBag(newSPBag, newWordLength, SFAwords[d][i], d);
}
applyPyramidWeights(newSPBag);
newBoss.bags.add(newSPBag);
}
return newBoss;
}
/**
* Builds a bag from the set of words for a pre-transformed series of a given wordlength.
*/
private void addWordsToSPBag(BagMV bag, int thisWordLength, BitWord[] words, int dimension) {
BitWord lastWord = new BitWordInt();
BitWord[] newWords = new BitWord[words.length];
int wInd = 0;
int trivialMatchCount = 0; //keeps track of how many words have been the same so far
for (BitWord w : words) {
BitWord word = new BitWordInt(w);
if (wordLength != thisWordLength)
word.shorten(16-thisWordLength); //max word length, no classifier currently uses past 16.
newWords[wInd] = word;
if (useBigrams) {
if (wInd - windowSize >= 0) {
BitWord bigram = new BitWordLong(newWords[wInd - windowSize], word);
Word key = new Word(bigram, (byte) -1, dimension);
bag.merge(key, 1, Integer::sum);
}
}
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag, dimension);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
}
@Override
public void changeNumLevels(int newLevels) {
//curently, simply remaking bags from words
//alternatively: un-weight all bags, add(run through SFAwords again)/remove levels, re-weight all
if (newLevels == this.levels)
return;
this.levels = newLevels;
for (int inst = 0; inst < bags.size(); ++inst) {
BagMV bag = new BagMV(bags.get(inst).classVal); //rebuild bag
for (int d = 0; d < SFAwords.length; d++) {
addWordsToSPBag(bag, wordLength, SFAwords[d][inst], d); //rebuild bag
}
applyPyramidWeights(bag);
bags.set(inst, bag); //overwrite old
}
}
private void applyPyramidWeights(BagMV bag) {
for (Map.Entry<Word, Integer> ent : bag.entrySet()) {
//find level that this quadrant is on
int quadrant = ent.getKey().level;
int qEnd = 0;
int level = 0;
while (qEnd < quadrant) {
int numQuadrants = (int)Math.pow(2, ++level);
qEnd+=numQuadrants;
}
//double val = ent.getValue() * (Math.pow(levelWeighting, levels-level-1)); //weighting ^ (levels - level)
int val = ent.getValue() * (int)Math.pow(2,level);
bag.put(ent.getKey(), val);
}
}
private void addWordToPyramid(BitWord word, int wInd, BagMV bag, int dimension) {
int qStart = 0; //for this level, whats the start index for quadrants
//e.g level 0 = 0
// level 1 = 1
// level 2 = 3
for (int l = 0; l < levels; ++l) {
//need to do the cell finding thing in the regular grid
int numQuadrants = (int)Math.pow(2, l);
int quadrantSize = seriesLength / numQuadrants;
int pos = wInd + (windowSize/2); //use the middle of the window as its position
int quadrant = qStart + (pos/quadrantSize);
Word key = new Word(word, (byte)quadrant, dimension);
bag.merge(key, 1, Integer::sum);
qStart += numQuadrants;
}
}
private BitWord[] createSFAwords(double[] inst, int dimension) {
double[][] dfts = performMFT(inst); //approximation
BitWord[] words = new BitWord[dfts.length];
for (int window = 0; window < dfts.length; ++window) {
words[window] = createWord(dfts[window], dimension);//discretisation
}
return words;
}
private void selectDimensions(TimeSeriesInstances data, double[][][] split){
seriesLength = data.getMaxLength();
double[] accuracies = new double[breakpoints.length];
for (int d = 0; d < breakpoints.length; d++) {
ArrayList<Bag> tempBags = new ArrayList<>();
for (int i = 0; i < split.length; i++){
Bag bag = new Bag(data.get(i).getLabelIndex());
for (int n = 0; n < breakpointDFT[d][i].length; n++){
BitWord word = createWord(breakpointDFT[d][i][n], d);
int qStart = 0; //for this level, whats the start index for quadrants
int wInd = n*windowSize;
for (int l = 0; l < levels; ++l) {
//need to do the cell finding thing in the regular grid
int numQuadrants = (int)Math.pow(2, l);
int quadrantSize = seriesLength / numQuadrants;
int pos = wInd + (windowSize/2); //use the middle of the window as its position
int quadrant = qStart + (pos/quadrantSize);
SerialisableComparablePair<BitWord, Byte> key = new SerialisableComparablePair<>(word,
(byte)quadrant);
bag.merge(key, 1, Integer::sum);
qStart += numQuadrants;
}
}
applyPyramidWeights(bag);
tempBags.add(bag);
}
for (int n = 0; n < split.length; n++){
Bag testBag = tempBags.get(n);
//1NN distance
double bestDist = Double.MAX_VALUE;
double nn = 0;
for (int i = 0; i < tempBags.size(); ++i) {
if (i == n) //skip 'this' one, leave-one-out
continue;
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, tempBags.get(i));
else dist = BOSSdistance(testBag, tempBags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = tempBags.get(i).getClassVal();
}
}
if (nn == tempBags.get(n).getClassVal()) accuracies[d]++;
}
accuracies[d] /= split.length;
}
double maxAcc = 0;
for (int d = 0; d < breakpoints.length; d++) {
if (accuracies[d] > maxAcc) maxAcc = accuracies[d];
}
dimensionSubsample = new ArrayList<>();
for (int d = 0; d < breakpoints.length; d++) {
if (accuracies[d] >= maxAcc * dimensionCutoffThreshold) {
dimensionSubsample.add(d);
}
}
if (maxNoDimensions > 0){
while (dimensionSubsample.size() > maxNoDimensions){
dimensionSubsample.remove(rand.nextInt(dimensionSubsample.size()));
}
}
breakpointDFT = null;
}
@Override
public void buildClassifier(TimeSeriesInstances data) throws Exception {
trainResults = new ClassifierResults();
rand.setSeed(seed);
numClasses = data.numClasses();
trainResults.setEstimatorName(getClassifierName());
trainResults.setParas(getParameters());
trainResults.setBuildTime(System.nanoTime());
double[][][] split = data.toValueArray();
seriesLength = data.getMaxLength();
breakpointDFT = new double[data.getMaxNumDimensions()][][][];
breakpoints = new double[data.getMaxNumDimensions()][][];
for (int d = 0; d < breakpoints.length; d++) {
if (IGB) breakpoints[d] = IGB(split, d, data.getClassIndexes());
else breakpoints[d] = MCB(split, d); //breakpoints to be used for making sfa words for train
//AND test data
}
selectDimensions(data, split);
SFAwords = new BitWord[data.getMaxNumDimensions()][data.numInstances()][];
bags = new ArrayList<>(data.numInstances());
rand = new Random(seed);
if (multiThread){
if (numThreads == 1) numThreads = Runtime.getRuntime().availableProcessors();
if (ex == null) ex = Executors.newFixedThreadPool(numThreads);
ArrayList<Future<BagMV>> futures = new ArrayList<>(data.numInstances());
for (int inst = 0; inst < data.numInstances(); ++inst)
futures.add(ex.submit(new TransformThread(inst, data.get(inst))));
for (Future<BagMV> f: futures)
bags.add(f.get());
}
else {
for (int inst = 0; inst < data.numInstances(); ++inst) {
BagMV bag = new BagMV(data.get(inst).getLabelIndex());
for (Integer d : dimensionSubsample) {
SFAwords[d][inst] = createSFAwords(split[inst][d], d);
addWordsToSPBag(bag, wordLength, SFAwords[d][inst], d);
}
applyPyramidWeights(bag);
bags.add(bag);
}
}
if (useFeatureSelection) trainChiSquared();
if (cleanAfterBuild) {
clean();
}
//end train time in nanoseconds
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime() - trainResults.getBuildTime());
}
/**
* Computes BOSS distance between two bags d(test, train), is NON-SYMETRIC operation,
* ie d(a,b) != d(b,a).
*
* Quits early if the dist-so-far is greater than bestDist (assumed is in fact the dist still squared),
* and returns Double.MAX_VALUE
*
* @return distance FROM instA TO instB, or Double.MAX_VALUE if it would be greater than bestDist
*/
public double BOSSdistance(BagMV instA, BagMV instB, double bestDist) {
double dist = 0.0;
//find dist only from values in instA
for (Map.Entry<Word, Integer> entry : instA.entrySet()) {
Integer valA = entry.getValue();
Integer valB = instB.get(entry.getKey());
if (valB == null) valB = 1;
dist += (valA-valB)*(valA-valB);
if (dist > bestDist)
return Double.MAX_VALUE;
}
return dist;
}
public double histogramIntersection(BagMV instA, BagMV instB) {
//min vals of keys that exist in only one of the bags will always be 0
//therefore want to only bother looking at counts of words in both bags
//therefore will simply loop over words in a, skipping those that dont appear in b
//no need to loop over b, since only words missed will be those not in a anyway
double sim = 0.0;
for (Map.Entry<Word, Integer> entry : instA.entrySet()) {
Integer valA = entry.getValue();
Integer valB = instB.get(entry.getKey());
if (valB == null)
continue;
sim += Math.min(valA,valB);
}
return sim;
}
@Override
public double classifyInstance(TimeSeriesInstance instance) throws Exception{
BagMV testBag = BOSSSpatialPyramidsTransform(instance);
if (useFeatureSelection) testBag = filterChiSquared(testBag);
//1NN distance
double bestDist = Double.MAX_VALUE;
double nn = 0;
for (BagMV bag : bags) {
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bag);
else dist = BOSSdistance(testBag, bag, bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bag.getClassVal();
}
}
return nn;
}
/**
* Used within BOSSEnsemble as part of a leave-one-out crossvalidation, to skip having to rebuild
* the classifier every time (since the n histograms would be identical each time anyway), therefore this
* classifies the instance at the index passed while ignoring its own corresponding histogram
*
* @param testIndex index of instance to classify
* @return classification
*/
@Override
public double classifyInstance(int testIndex) throws Exception{
BagMV testBag = bags.get(testIndex);
//1NN distance
double bestDist = Double.MAX_VALUE;
double nn = 0;
for (int i = 0; i < bags.size(); ++i) {
if (i == testIndex) //skip 'this' one, leave-one-out
continue;
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bags.get(i));
else dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
public class TestNearestNeighbourThread implements Callable<Double>{
TimeSeriesInstance inst;
public TestNearestNeighbourThread(TimeSeriesInstance inst){
this.inst = inst;
}
@Override
public Double call() {
BagMV testBag = BOSSSpatialPyramidsTransform(inst);
if (useFeatureSelection) testBag = filterChiSquared(testBag);
//1NN distance
double bestDist = Double.MAX_VALUE;
double nn = 0;
for (BagMV bag : bags) {
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bag);
else dist = BOSSdistance(testBag, bag, bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bag.getClassVal();
}
}
return nn;
}
}
public class TrainNearestNeighbourThread implements Callable<Double>{
int testIndex;
public TrainNearestNeighbourThread(int testIndex){
this.testIndex = testIndex;
}
@Override
public Double call() {
BagMV testBag = bags.get(testIndex);
//1NN distance
double bestDist = Double.MAX_VALUE;
double nn = 0;
for (int i = 0; i < bags.size(); ++i) {
if (i == testIndex) //skip 'this' one, leave-one-out
continue;
double dist;
if (histogramIntersection)
dist = -histogramIntersection(testBag, bags.get(i));
else dist = BOSSdistance(testBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
}
private class TransformThread implements Callable<BagMV>{
int i;
TimeSeriesInstance inst;
public TransformThread(int i, TimeSeriesInstance inst){
this.i = i;
this.inst = inst;
}
@Override
public BagMV call() {
BagMV bag = new BagMV(inst.getLabelIndex());
double[][] split = inst.toValueArray();
for (int d = 0; d < inst.getNumDimensions(); d++) {
SFAwords[d][i] = createSFAwords(split[d], d);
addWordsToSPBag(bag, wordLength, SFAwords[d][i], d);
}
applyPyramidWeights(bag);
return bag;
}
}
}
| 31,511 | 35.813084 | 118 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/SAXVSM.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import experiments.data.DatasetLoading;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.transformers.BagOfPatterns;
import utilities.ClassifierTools;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.SparseInstance;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformationHandler;
import java.util.concurrent.TimeUnit;
/**
* Classifier using SAX and Vector Space Model.
*
* Params: wordLength, alphabetSize, windowLength
*
* In training, generates class weighting matrix for SAX patterns found in the series,
* in testing uses cosine similarity to find most similar class
* @inproceedings{senin13sax_vsm,
author="P. Senin and S. Malinchik",
title="{SAX-VSM:} Interpretable Time Series Classification Using SAX and Vector Space Model",
booktitle ="Proc. 13th {IEEE ICDM}",
year="2013"
}
* @author James Large
*/
public class SAXVSM extends EnhancedAbstractClassifier implements TechnicalInformationHandler{
Instances transformedData;
Instances corpus;
private BagOfPatterns bop;
private int PAA_intervalsPerWindow;
private int SAX_alphabetSize;
private int windowSize;
private final boolean useParamSearch; //does user want parameter search to be performed
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "P. Senin and S. Malinchik");
result.setValue(TechnicalInformation.Field.TITLE, "SAX-VSM: Interpretable Time Series Classification Using SAX and Vector Space Model");
result.setValue(TechnicalInformation.Field.JOURNAL, "Proc. 13th IEEE ICDM");
result.setValue(TechnicalInformation.Field.YEAR, "2013");
return result;
}
/**
* Will use parameter search during training
*/
public SAXVSM() {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.PAA_intervalsPerWindow = -1;
this.SAX_alphabetSize = -1;
this.windowSize = -1;
useParamSearch = true;
}
/**
* Will build using only parameters passed
*/
public SAXVSM(int PAA_intervalsPerWindow, int SAX_alphabetSize, int windowSize) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.PAA_intervalsPerWindow = PAA_intervalsPerWindow;
this.SAX_alphabetSize = SAX_alphabetSize;
this.windowSize = windowSize;
bop = new BagOfPatterns(PAA_intervalsPerWindow, SAX_alphabetSize, windowSize);
useParamSearch = false;
}
public int getPAA_intervalsPerWindow() {
return PAA_intervalsPerWindow;
}
public int getSAX_alphabetSize() {
return SAX_alphabetSize;
}
public int getWindowSize() {
return windowSize;
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize }
*/
public int[] getParametersArray() {
return new int[] { PAA_intervalsPerWindow, SAX_alphabetSize, windowSize};
}
@Override
public String getParameters() {
return super.getParameters()+",PAAIntervalsPerWindow,"+PAA_intervalsPerWindow+",alphabetSize,"+SAX_alphabetSize+",windowSize,"+windowSize;
}
/**
* Performs cross validation on given data for varying parameter values, returns
* parameter set which yielded greatest accuracy
*
* @param data Data to perform cross validation testing on
* @return { numIntervals, alphabetSize, slidingWindowSize }
*/
public static int[] parameterSearch(Instances data) throws Exception {
double bestAcc = -1.0;
int bestAlpha = 0, bestWord = 0, bestWindowSize = 0;
//BoP paper window search range suggestion
int minWinSize = (int)((data.numAttributes()-1) * (15.0/100.0));
int maxWinSize = (int)((data.numAttributes()-1) * (36.0/100.0));
// int winInc = 1; //check every size in range
int winInc = (int)((maxWinSize - minWinSize) / 10.0); //check 10 sizes within that range
if (winInc < 1) winInc = 1;
for (int alphaSize = 2; alphaSize <= 8; alphaSize+=2) {
for (int winSize = minWinSize; winSize <= maxWinSize; winSize+=winInc) {
for (int wordSize = 2; wordSize <= 8 && wordSize < winSize; wordSize+=1) {
SAXVSM vsm = new SAXVSM(wordSize,alphaSize,winSize);
double acc = vsm.crossValidate(data);
if (acc > bestAcc) {
bestAcc = acc;
bestAlpha = alphaSize;
bestWord = wordSize;
bestWindowSize = winSize;
}
}
}
}
return new int[] { bestWord, bestAlpha, bestWindowSize};
}
/**
* Leave-one-out CV without re-doing bop transformation every fold (still re-applying tfxidf)
*
* @return cv accuracy
*/
private double crossValidate(Instances data) throws Exception {
transformedData = bop.fitTransform(data);
double correct = 0;
for (int i = 0; i < data.numInstances(); ++i) {
corpus = tfxidf(transformedData, i); //apply tfxidf while ignoring BOP bag i
if (classifyInstance(data.get(i)) == data.get(i).classValue())
++correct;
}
return correct / data.numInstances();
}
@Override
public void buildClassifier(Instances data) throws Exception {
long startTime=System.nanoTime();
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("SAXVSM_BuildClassifier: Class attribute not set as last attribute in dataset");
if (useParamSearch) {
int[] params = parameterSearch(data);
this.PAA_intervalsPerWindow = params[0];
this.SAX_alphabetSize = params[1];
this.windowSize = params[2];
bop = new BagOfPatterns(PAA_intervalsPerWindow, SAX_alphabetSize, windowSize);
}
if (PAA_intervalsPerWindow<1)
throw new Exception("SAXVSM_BuildClassifier: Invalid PAA word size: " + PAA_intervalsPerWindow);
if (PAA_intervalsPerWindow>windowSize)
throw new Exception("SAXVSM_BuildClassifier: Invalid PAA word size, bigger than sliding window size: "
+ PAA_intervalsPerWindow + "," + windowSize);
if (SAX_alphabetSize<2 || SAX_alphabetSize>10)
throw new Exception("SAXVSM_BuildClassifier: Invalid SAX alphabet size (valid=2-10): " + SAX_alphabetSize);
if (windowSize<1 || windowSize>data.numAttributes()-1)
throw new Exception("SAXVSM_BuildClassifier: Invalid sliding window size: "
+ windowSize + " (series length "+ (data.numAttributes()-1) + ")");
transformedData = bop.fitTransform(data);
corpus = tfxidf(transformedData);
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime()-startTime);
}
/**
* Given a set of *individual* series transformed into bop form, will return a corpus
* containing *class* bags made from that data with tfxidf weighting applied
*/
public Instances tfxidf(Instances bopData) {
return tfxidf(bopData, -1); //include all instances into corpus
}
/**
* If skip = one of <0 ... numInstances-1>, will not include instance at that index into the corpus
* Part of leave one out cv, while avoiding unnecessary repeats of the BoP transformation
*/
private Instances tfxidf(Instances bopData, int skip) {
int numClasses = bopData.numClasses();
int numInstances = bopData.numInstances();
int numTerms = bopData.numAttributes()-1; //minus class attribute
//initialise class weights
double[][] classWeights = new double[numClasses][numTerms];
//build class bags
int inst = 0;
for (Instance in : bopData) {
if (inst++ == skip) //skip 'this' one, for leave-one-out cv
continue;
int classVal = (int)in.classValue();
for (int j = 0; j < numTerms; ++j) {
classWeights[classVal][j] += in.value(j);
}
}
//apply tf x idf
for (int i = 0; i < numTerms; ++i) { //for each term
double df = 0; //document frequency
for (int j = 0; j < numClasses; ++j) //find how many classes (documents) this term appears in
if (classWeights[j][i] != 0)
++df;
if (df != 0) { //if it appears
if (df != numClasses) { //but not in all, apply weighting
for (int j = 0; j < numClasses; ++j)
if (classWeights[j][i] != 0)
classWeights[j][i] = Math.log(1 + classWeights[j][i]) * Math.log(numClasses / df);
}
else { //appears in all
//avoid log calculations
//if df == num classes -> idf = log(N/df) = log(1) = 0
for (int j = 0; j < numClasses; ++j)
classWeights[j][i] = 0;
}
}
}
Instances tfxidfCorpus = new Instances(bopData, numClasses);
for (int i = 0; i < numClasses; ++i)
tfxidfCorpus.add(new SparseInstance(1.0, classWeights[i]));
return tfxidfCorpus;
}
/**
* Takes two vectors of equal length, and computes the cosine similarity between them.
*
* @return a.b / ( |a|*|b| )
* @throws java.lang.Exception if a.length != b.length
*/
public double cosineSimilarity(double[] a, double[] b) throws Exception {
if (a.length != b.length)
throw new Exception("Cannot calculate cosine similarity between vectors of different lengths "
+ "(" + a.length + ", " + b.length + ")");
return cosineSimilarity(a,b,a.length);
}
/**
* Takes two vectors, and computes the cosine similarity between them using the first n values in each vector.
*
* To be used when e.g one or both vectors have class values as the last element, only compute
* similarity up to n-1
*
* @param n Elements 0 to n-1 will be computed for similarity, elements n to size-1 ignored
* @return a.b / ( |a|*|b| )
* @throws java.lang.Exception if n > (a.length or b.length)
*/
public double cosineSimilarity(double[] a, double[] b, int n) throws Exception {
if (n > a.length || n > b.length)
throw new IllegalArgumentException("SAXVSM_CosineSimilarity n greater than vector lengths "
+ "(a:" + a.length + ", b:" + b.length + " n:" + n + ")");
double dotProd = 0.0, aMag = 0.0, bMag = 0.0;
for (int i = 0; i < n; ++i) {
dotProd += a[i]*b[i];
aMag += a[i]*a[i];
bMag += b[i]*b[i];
}
if (aMag == 0 || bMag == 0 || dotProd == 0)
return 0;
if (aMag == bMag) //root(n) * root(n) just = n^(1/2)^2 = n, save the root operation
return dotProd / aMag;
return dotProd / (Math.sqrt(aMag) * Math.sqrt(bMag));
}
@Override
public double classifyInstance(Instance instance) throws Exception {
double[] distribution = distributionForInstance(instance); //outputs 0's during cv sometimes
return findIndexOfMax(distribution, rand);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
int numClasses = corpus.numInstances();
double[] termFreqs = bop.bagToArray(bop.buildBag(instance));
//find similarity to each class
double[] similarities = new double[numClasses];
double sum = 0.0;
for (int i = 0; i < numClasses; ++i) {
similarities[i] = cosineSimilarity(corpus.get(i).toDoubleArray(), termFreqs, termFreqs.length);
sum+=similarities[i];
}
//return as a set of probabilities
if (sum != 0)
for (int i = 0; i < numClasses; ++i)
similarities[i] /= sum;
return similarities;
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void main(String[] args) throws Exception{
// System.out.println(ClassifierTools.testUtils_getIPDAcc(new SAXVSM()));
// System.out.println(ClassifierTools.testUtils_confirmIPDReproduction(new SAXVSM(), 0.7580174927113703, "2019/09/26"));
basicTest();
}
public static void basicTest() {
System.out.println("SAXVSMBasicTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\tempbakeoff\\TSC Problems\\Car\\Car_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\tempbakeoff\\TSC Problems\\Car\\Car_TEST.arff");
// Instances train = ClassifierTools.loadDataThrowable("C:\\tempbakeoff\\TSC Problems\\BeetleFly\\BeetleFly_TRAIN.arff");
// Instances test = ClassifierTools.loadDataThrowable("C:\\tempbakeoff\\TSC Problems\\BeetleFly\\BeetleFly_TEST.arff");
System.out.println(train.relationName());
SAXVSM vsm = new SAXVSM();
System.out.println("Training starting");
long start = System.nanoTime();
vsm.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
System.out.print("Params: ");
for (int p : vsm.getParametersArray())
System.out.print(p + " ");
System.out.println("");
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, vsm);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
@Override
public String toString() {
return "SAXVSM";
}
}
| 15,755 | 37.807882 | 146 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/SAX_1NN.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import experiments.data.DatasetLoading;
import tsml.classifiers.EnhancedAbstractClassifier;
import utilities.ClassifierTools;
import machine_learning.classifiers.kNN;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import tsml.transformers.SAX;
import weka.filters.unsupervised.instance.Randomize;
import java.util.concurrent.TimeUnit;
/**
*
* @author James
*/
public class SAX_1NN extends EnhancedAbstractClassifier {
public Instances SAXdata;
private kNN knn;
private SAX sax;
private final int PAA_intervalsPerWindow;
private final int SAX_alphabetSize;
public SAX_1NN(int PAA_intervalsPerWindow, int SAX_alphabetSize) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.PAA_intervalsPerWindow = PAA_intervalsPerWindow;
this.SAX_alphabetSize = SAX_alphabetSize;
sax = new SAX();
sax.setNumIntervals(PAA_intervalsPerWindow);
sax.setAlphabetSize(SAX_alphabetSize);
sax.useRealValuedAttributes(false);
knn = new kNN(); //default to 1NN, Euclidean distance
}
@Override
public String getParameters() {
return super.getParameters()+",PAAIntervalsPerWindow,"+PAA_intervalsPerWindow+",alphabetSize,"+SAX_alphabetSize;
}
@Override
public void buildClassifier(Instances data) throws Exception {
long startTime=System.nanoTime();
SAXdata = sax.transform(data);
knn.buildClassifier(SAXdata);
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime()-startTime);
}
@Override
public double classifyInstance(Instance instance) throws Exception {
return knn.classifyInstance(sax.transform(instance));
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
return knn.distributionForInstance(sax.transform(instance));
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void main(String[] args) throws Exception{
// System.out.println(ClassifierTools.testUtils_getIPDAcc(new SAX_1NN(10, 4)));
// System.out.println(ClassifierTools.testUtils_confirmIPDReproduction(new SAX_1NN(10, 4), 0.9154518950437318, "2019_09_26"));
System.out.println("BagofPatternsTest\n\n");
try {
Instances all = DatasetLoading.loadDataNullable("C:\\\\Temp\\\\TESTDATA\\\\FiveClassV1.arff");
all.deleteAttributeAt(0); //just name of bottle
Randomize rand = new Randomize();
rand.setInputFormat(all);
for (int i = 0; i < all.numInstances(); ++i) {
rand.input(all.get(i));
}
rand.batchFinished();
int trainNum = (int) (all.numInstances() * 0.7);
int testNum = all.numInstances() - trainNum;
Instances train = new Instances(all, trainNum);
for (int i = 0; i < trainNum; ++i)
train.add(rand.output());
Instances test = new Instances(all, testNum);
for (int i = 0; i < testNum; ++i)
test.add(rand.output());
SAX_1NN saxc = new SAX_1NN(6,3);
saxc.buildClassifier(train);
System.out.println(saxc.SAXdata);
System.out.println("\nACCURACY TEST");
System.out.println(ClassifierTools.accuracy(test, saxc));
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
@Override
public String toString() {
return "SAX";
}
}
| 4,746 | 32.907143 | 135 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/SpatialBOSS.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import tsml.classifiers.dictionary_based.bitword.BitWord;
import tsml.classifiers.dictionary_based.bitword.BitWordInt;
import utilities.InstanceTools;
import tsml.classifiers.SaveParameterInfo;
import weka.core.TechnicalInformation;
import utilities.generic_storage.ComparablePair;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map.Entry;
import utilities.ClassifierTools;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.classifiers.Classifier;
import experiments.data.DatasetLoading;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.EnhancedAbstractClassifier;
/**
* SpatialBOSS classifier with parameter search and ensembling, if parameters are known,
* use nested 'BOSSSpatialPyramidsIndividual' classifier and directly provide them.
*
* Intended use is with the default constructor, however can force the normalisation
* parameter to true/false by passing a boolean, e.g c = new BOSSSpatialPyramids(true)
*
* Params: normalise? (i.e should first fourier coefficient(mean value) be discarded)
* Alphabetsize fixed to four
*
* @author James Large
*
* Base algorithm information found in BOSS.java
* Spatial Pyramids based on the algorithm described in getTechnicalInformation()
*/
public class SpatialBOSS extends EnhancedAbstractClassifier implements SaveParameterInfo {
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "Lazebnik, Svetlana and Schmid, Cordelia and Ponce, Jean");
result.setValue(TechnicalInformation.Field.TITLE, "Beyond bags of features: Spatial pyramid matching for recognizing natural scene categories");
result.setValue(TechnicalInformation.Field.BOOKTITLE, "Computer Vision and Pattern Recognition, 2006 IEEE Computer Society Conference on");
result.setValue(TechnicalInformation.Field.VOLUME, "2");
result.setValue(TechnicalInformation.Field.PAGES, "2169--2178");
result.setValue(TechnicalInformation.Field.YEAR, "2006");
return result;
}
private List<BOSSWindow> classifiers;
private final double correctThreshold = 0.92;
// private int maxEnsembleSize = Integer.MAX_VALUE;
private int maxEnsembleSize = 100;
private final Integer[] wordLengths = { 16, 14, 12, 10, 8 };
private final Integer[] levels = { 1, 2, 3 };
private final int alphabetSize = 4;
public enum SerialiseOptions {
//dont do any seriealising, run as normal
NONE,
//serialise the final boss classifiers which made it into ensemble (does not serialise the entire BOSSEnsembleSP_Redo object)
//slight runtime cost
STORE,
//serialise the final boss classifiers, and delete from main memory. reload each from ser file when needed in classification.
//the most memory used at any one time is therefore ~2 individual boss classifiers during training.
//massive runtime cost, order of magnitude
STORE_LOAD
};
private SerialiseOptions serOption = SerialiseOptions.NONE;
private static String serFileLoc = "BOSSWindowSers\\";
private boolean[] normOptions;
/**
* Providing a particular value for normalisation will force that option, if
* whether to normalise should be a parameter to be searched, use default constructor
*
* @param normalise whether or not to normalise by dropping the first Fourier coefficient
*/
public SpatialBOSS(boolean normalise) {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
normOptions = new boolean[] { normalise };
}
/**
* During buildClassifier(...), will search through normalisation as well as
* window size and word length if no particular normalisation option is provided
*/
public SpatialBOSS() {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
normOptions = new boolean[] { true, false };
}
public static class BOSSWindow implements Comparable<BOSSWindow>, Serializable {
private BOSSSpatialPyramidsIndividual classifier;
public double accuracy;
public String filename;
private static final long serialVersionUID = 2L;
public BOSSWindow(String filename) {
this.filename = filename;
}
public BOSSWindow(BOSSSpatialPyramidsIndividual classifer, double accuracy, String dataset) {
this.classifier = classifer;
this.accuracy = accuracy;
buildFileName(dataset);
}
public double classifyInstance(Instance inst) throws Exception {
return classifier.classifyInstance(inst);
}
public double classifyInstance(int test) throws Exception {
return classifier.classifyInstance(test);
}
private void buildFileName(String dataset) {
filename = serFileLoc + dataset + "_" + classifier.windowSize + "_" + classifier.wordLength + "_" + classifier.alphabetSize + "_" + classifier.norm + ".ser";
}
public boolean storeAndClearClassifier() {
try {
ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));
out.writeObject(this);
out.close();
clearClassifier();
return true;
}catch(IOException e) {
System.out.print("Error serialiszing to " + filename);
e.printStackTrace();
return false;
}
}
public boolean store() {
try {
ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));
out.writeObject(this);
out.close();
return true;
}catch(IOException e) {
System.out.print("Error serialiszing to " + filename);
e.printStackTrace();
return false;
}
}
public void clearClassifier() {
classifier = null;
}
public boolean load() {
BOSSWindow bw = null;
try {
ObjectInputStream in = new ObjectInputStream(new FileInputStream(filename));
bw = (BOSSWindow) in.readObject();
in.close();
this.accuracy = bw.accuracy;
this.classifier = bw.classifier;
return true;
}catch(IOException i) {
System.out.print("Error deserialiszing from " + filename);
i.printStackTrace();
return false;
}catch(ClassNotFoundException c) {
System.out.println("BOSSWindow class not found");
c.printStackTrace();
return false;
}
}
public boolean deleteSerFile() {
try {
File f = new File(filename);
return f.delete();
} catch(SecurityException s) {
System.out.println("Unable to delete, access denied: " + filename);
s.printStackTrace();
return false;
}
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize }
*/
public int[] getParameters() { return classifier.getParameters(); }
public int getWindowSize() { return classifier.getWindowSize(); }
public int getWordLength() { return classifier.getWordLength(); }
public int getAlphabetSize() { return classifier.getAlphabetSize(); }
public boolean isNorm() { return classifier.isNorm(); }
public double getLevelWeighting() { return classifier.getLevelWeighting(); }
public int getLevels() { return classifier.getLevels(); }
@Override
public int compareTo(BOSSWindow other) {
if (this.accuracy > other.accuracy)
return 1;
if (this.accuracy == other.accuracy)
return 0;
return -1;
}
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
BOSSWindow first = classifiers.get(0);
sb.append("windowSize=").append(first.getWindowSize()).append("/wordLength=").append(first.getWordLength());
sb.append("/alphabetSize=").append(first.getAlphabetSize()).append("/norm=").append(first.isNorm());
for (int i = 1; i < classifiers.size(); ++i) {
BOSSWindow boss = classifiers.get(i);
sb.append(",windowSize=").append(boss.getWindowSize()).append("/wordLength=").append(boss.getWordLength());
sb.append("/alphabetSize=").append(boss.getAlphabetSize()).append("/norm=").append(boss.isNorm());
}
return sb.toString();
}
@Override
public int setNumberOfFolds(Instances data){
return data.numInstances();
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize } for each BOSSWindow in this *built* classifier
*/
public int[][] getParametersValues() {
int[][] params = new int[classifiers.size()][];
int i = 0;
for (BOSSWindow boss : classifiers)
params[i++] = boss.getParameters();
return params;
}
public void setSerOption(SerialiseOptions option) {
serOption = option;
}
public void setSerFileLoc(String path) {
serFileLoc = path;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSSEnsembleSP_BuildClassifier: Class attribute not set as last attribute in dataset");
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD) {
DateFormat dateFormat = new SimpleDateFormat("yyyyMMddHHmmss");
Date date = new Date();
serFileLoc += data.relationName() + "_" + dateFormat.format(date) + "\\";
File f = new File(serFileLoc);
if (!f.isDirectory())
f.mkdirs();
}
long t1=System.nanoTime();
classifiers = new LinkedList<BOSSWindow>();
int numSeries = data.numInstances();
int seriesLength = data.numAttributes()-1; //minus class attribute
int minWindow = 10;
int maxWindow = seriesLength;
//int winInc = 1; //check every window size in range
// //whats the max number of window sizes that should be searched through
//double maxWindowSearches = Math.min(200, Math.sqrt(seriesLength));
double maxWindowSearches = seriesLength/4.0;
int winInc = (int)((maxWindow - minWindow) / maxWindowSearches);
if (winInc < 1) winInc = 1;
//keep track of current max window size accuracy, constantly check for correctthreshold to discard to save space
double maxAcc = -1.0;
//the acc of the worst member to make it into the final ensemble as it stands
double minMaxAcc = -1.0;
for (boolean normalise : normOptions) {
for (int winSize = minWindow; winSize <= maxWindow; winSize += winInc) {
BOSSSpatialPyramidsIndividual boss = new BOSSSpatialPyramidsIndividual(wordLengths[0], alphabetSize, winSize, normalise, levels[0]); //1 level, find best 'normal' boss classifier
boss.buildClassifier(data); //initial setup for this windowsize, with max word length
BOSSSpatialPyramidsIndividual bestClassifierForWinSize = null;
double bestAccForWinSize = -1.0;
//find best word length for this window size
for (Integer wordLen : wordLengths) {
boss = boss.buildShortenedSPBags(wordLen); //in first iteration, same lengths (wordLengths[0]), will do nothing
int correct = 0;
for (int i = 0; i < numSeries; ++i) {
double c = boss.classifyInstance(i); //classify series i, while ignoring its corresponding histogram i
if (c == data.get(i).classValue())
++correct;
}
double acc = (double)correct/(double)numSeries;
if (acc >= bestAccForWinSize) {
bestAccForWinSize = acc;
bestClassifierForWinSize = boss;
}
}
//best 'normal' boss classifier found, now find the best number of levels
//effectively determining whether the feature this member/classifier specialises in is
//local or global
int bestLevels = bestClassifierForWinSize.getLevels();
for (int l = 1; l < levels.length; ++l) { //skip first, already found acc for it before
bestClassifierForWinSize.changeNumLevels(levels[l]);
int correct = 0;
for (int i = 0; i < numSeries; ++i) {
double c = bestClassifierForWinSize.classifyInstance(i); //classify series i, while ignoring its corresponding histogram i
if (c == data.get(i).classValue())
++correct;
}
double acc = (double)correct/(double)numSeries;
if (acc > bestAccForWinSize) { //only store if >, not >= (favours lower levels = less space)
bestAccForWinSize = acc;
bestLevels = levels[l];
}
}
if (makesItIntoEnsemble(bestAccForWinSize, maxAcc, minMaxAcc, classifiers.size())) {
bestClassifierForWinSize.changeNumLevels(bestLevels);
BOSSWindow bw = new BOSSWindow(bestClassifierForWinSize, bestAccForWinSize, data.relationName());
bw.classifier.clean();
if (serOption == SerialiseOptions.STORE)
bw.store();
else if (serOption == SerialiseOptions.STORE_LOAD)
bw.storeAndClearClassifier();
classifiers.add(bw);
if (bestAccForWinSize > maxAcc) {
maxAcc = bestAccForWinSize;
//get rid of any extras that dont fall within the final max threshold
Iterator<BOSSWindow> it = classifiers.iterator();
while (it.hasNext()) {
BOSSWindow b = it.next();
if (b.accuracy < maxAcc * correctThreshold) {
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD)
b.deleteSerFile();
it.remove();
}
}
}
while (classifiers.size() > maxEnsembleSize) {
//cull the 'worst of the best' until back under the max size
int minAccInd = (int)findMinEnsembleAcc()[0];
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD)
classifiers.get(minAccInd).deleteSerFile();
classifiers.remove(minAccInd);
}
minMaxAcc = findMinEnsembleAcc()[1]; //new 'worst of the best' acc
}
}
}
//end train time in nanoseconds
trainResults.setBuildTime(System.nanoTime() - t1);
if (getEstimateOwnPerformance())
findEnsembleTrainAcc(data);
trainResults.setParas(getParameters());
}
//[0] = index, [1] = acc
private double[] findMinEnsembleAcc() {
double minAcc = Double.MIN_VALUE;
int minAccInd = 0;
for (int i = 0; i < classifiers.size(); ++i) {
double curacc = classifiers.get(i).accuracy;
if (curacc < minAcc) {
minAcc = curacc;
minAccInd = i;
}
}
return new double[] { minAccInd, minAcc };
}
private boolean makesItIntoEnsemble(double acc, double maxAcc, double minMaxAcc, int curEnsembleSize) {
if (acc >= maxAcc * correctThreshold) {
if (curEnsembleSize >= maxEnsembleSize)
return acc > minMaxAcc;
else
return true;
}
return false;
}
private double findEnsembleTrainAcc(Instances data) throws Exception {
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setEstimatorName(getClassifierName());
trainResults.setDatasetName(data.relationName());
trainResults.setFoldID(seed);
trainResults.setSplit("train");
trainResults.setParas(getParameters());
double correct = 0;
for (int i = 0; i < data.numInstances(); ++i) {
long predTime = System.nanoTime();
//classify series i, while ignoring its corresponding histogram i
double[] probs = distributionForInstance(i, data.numClasses());
predTime = System.nanoTime() - predTime;
int maxClass = findIndexOfMax(probs, rand);
if (maxClass == data.get(i).classValue())
++correct;
trainResults.addPrediction(data.get(i).classValue(), probs, maxClass, predTime, "");
}
trainResults.finaliseResults();
double result = correct / data.numInstances();
return result;
}
/**
* Classify the train instance at index 'test', whilst ignoring the corresponding bags
* in each of the members of the ensemble, for use in CV of BOSSEnsembleSP_Redo
*/
public double classifyInstance(int test, int numclasses) throws Exception {
double[] dist = distributionForInstance(test, numclasses);
return findIndexOfMax(dist, rand);
}
public double[] distributionForInstance(int test, int numclasses) throws Exception {
double[] classHist = new double[numclasses];
//get votes from all windows
double sum = 0;
for (BOSSWindow classifier : classifiers) {
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.load();
double classification = classifier.classifyInstance(test);
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.clearClassifier();
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
return findIndexOfMax(dist, rand);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
double[] classHist = new double[instance.numClasses()];
double sum = 0;
//get votes from all windows
for (BOSSWindow classifier : classifiers) {
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.load();
double classification = classifier.classifyInstance(instance);
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.clearClassifier();
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void main(String[] args) throws Exception{
// ClassifierExperiments.ExperimentalArguments exp = new ClassifierExperiments.ExperimentalArguments();
// exp.dataReadLocation = "C:/TSCProblems2018/";
// exp.resultsWriteLocation = "C:/Temp/spatialboss/";
// exp.classifierName = "SpatialBOSS";
// exp.datasetName = "Arrowhead";
// exp.generateErrorEstimateOnTrainSet = true;
// exp.forceEvaluation = true;
// exp.foldId = 1;
//
// ClassifierExperiments.setupAndRunExperiment(exp);
//Minimum working example
String dataset = "ItalyPowerDemand";
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TEST.arff");
Classifier c = new SpatialBOSS();
c.buildClassifier(train);
double accuracy = ClassifierTools.accuracy(test, c);
System.out.println("BOSSEnsembleSP accuracy on " + dataset + " fold 0 = " + accuracy);
//Other examples/tests
// detailedFold0Test(dataset);
// resampleTest(dataset, 25);
}
public static void detailedFold0Test(String dset) {
System.out.println("BOSSEnsembleSPDetailedTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
System.out.println(train.relationName());
SpatialBOSS boss = new SpatialBOSS();
//TRAINING
System.out.println("Training starting");
long start = System.nanoTime();
boss.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
//RESULTS OF TRAINING
System.out.println("Ensemble Size: " + boss.classifiers.size());
System.out.println("Param sets: ");
int[][] params = boss.getParametersValues();
for (int i = 0; i < params.length; ++i)
System.out.println(i + ": " + params[i][0] + " " + params[i][1] + " " + params[i][2] + " " + boss.classifiers.get(i).isNorm() + " " + boss.classifiers.get(i).getLevels() + " " + boss.classifiers.get(i).accuracy);
//TESTING
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, boss);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
public static void resampleTest(String dset, int resamples) throws Exception {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
Classifier c = new SpatialBOSS();
//c.setCVPath("C:\\tempproject\\BOSSEnsembleCVtest.csv");
double [] accs = new double[resamples];
for(int i=0;i<resamples;i++){
Instances[] data=InstanceTools.resampleTrainAndTestInstances(train, test, i);
c.buildClassifier(data[0]);
accs[i]= ClassifierTools.accuracy(data[1], c);
if (i==0)
System.out.print(accs[i]);
else
System.out.print("," + accs[i]);
}
double mean = 0;
for(int i=0;i<resamples;i++)
mean += accs[i];
mean/=resamples;
System.out.println("\n\nBOSSEnsembleSP mean acc over " + resamples + " resamples: " + mean);
}
/**
* BOSSSpatialPyramidsIndividual classifier to be used with known parameters,
* for boss with parameter search, use BOSSSpatialPyramids.
*
* Params: wordLength, alphabetSize, windowLength, normalise?
*
* @author James Large.
*/
public static class BOSSSpatialPyramidsIndividual implements Classifier, Serializable {
protected BitWordInt[][] SFAwords; //all sfa words found in original buildClassifier(), no numerosity reduction/shortening applied
public ArrayList<SPBag> bags; //histograms of words of the current wordlength with numerosity reduction applied (if selected)
protected double[/*letterindex*/][/*breakpointsforletter*/] breakpoints;
protected double inverseSqrtWindowSize;
protected int windowSize;
protected int wordLength;
protected int alphabetSize;
protected boolean norm;
protected int levels = 0;
protected double levelWeighting = 0.5;
protected int seriesLength;
protected boolean numerosityReduction = true;
protected static final long serialVersionUID = 1L;
public BOSSSpatialPyramidsIndividual(int wordLength, int alphabetSize, int windowSize, boolean normalise, int levels) {
this.wordLength = wordLength;
this.alphabetSize = alphabetSize;
this.windowSize = windowSize;
this.inverseSqrtWindowSize = 1.0 / Math.sqrt(windowSize);
this.norm = normalise;
this.levels = levels;
}
/**
* Used when shortening histograms, copies 'meta' data over, but with shorter
* word length, actual shortening happens separately
*/
public BOSSSpatialPyramidsIndividual(BOSSSpatialPyramidsIndividual boss, int wordLength) {
this.wordLength = wordLength;
this.windowSize = boss.windowSize;
this.inverseSqrtWindowSize = boss.inverseSqrtWindowSize;
this.alphabetSize = boss.alphabetSize;
this.norm = boss.norm;
this.numerosityReduction = boss.numerosityReduction;
//this.alphabet = boss.alphabet;
this.SFAwords = boss.SFAwords;
this.breakpoints = boss.breakpoints;
this.levelWeighting = boss.levelWeighting;
this.levels = boss.levels;
this.seriesLength = boss.seriesLength;
bags = new ArrayList<>(boss.bags.size());
}
//map of <word, level> => count
public static class SPBag extends HashMap<ComparablePair<BitWord, Integer>, Double> {
double classVal;
public SPBag() {
super();
}
public SPBag(int classValue) {
super();
classVal = classValue;
}
public double getClassVal() { return classVal; }
public void setClassVal(double classVal) { this.classVal = classVal; }
}
public int getWindowSize() { return windowSize; }
public int getWordLength() { return wordLength; }
public int getAlphabetSize() { return alphabetSize; }
public boolean isNorm() { return norm; }
public int getLevels() { return levels; }
public double getLevelWeighting() { return levelWeighting; }
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize, normalise? }
*/
public int[] getParameters() {
return new int[] { wordLength, alphabetSize, windowSize };
}
public void clean() {
SFAwords = null;
}
protected double[][] slidingWindow(double[] data) {
int numWindows = data.length-windowSize+1;
double[][] subSequences = new double[numWindows][windowSize];
for (int windowStart = 0; windowStart < numWindows; ++windowStart) {
//copy the elements windowStart to windowStart+windowSize from data into
//the subsequence matrix at row windowStart
System.arraycopy(data,windowStart,subSequences[windowStart],0,windowSize);
}
return subSequences;
}
protected double[][] performDFT(double[][] windows) {
double[][] dfts = new double[windows.length][wordLength];
for (int i = 0; i < windows.length; ++i) {
dfts[i] = DFT(windows[i]);
}
return dfts;
}
protected double stdDev(double[] series) {
double sum = 0.0;
double squareSum = 0.0;
for (int i = 0; i < windowSize; i++) {
sum += series[i];
squareSum += series[i]*series[i];
}
double mean = sum / series.length;
double variance = squareSum / series.length - mean*mean;
return variance > 0 ? Math.sqrt(variance) : 1.0;
}
/**
* Performs DFT but calculates only wordLength/2 coefficients instead of the
* full transform, and skips the first coefficient if it is to be normalised
*
* @return double[] size wordLength, { real1, imag1, ... realwl/2, imagwl/2 }
*/
protected double[] DFT(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
//normalize the disjoint windows and sliding windows by dividing them by their standard deviation
//all Fourier coefficients are divided by sqrt(windowSize)
double normalisingFactor = inverseSqrtWindowSize / stdDev(series);
double[] dft=new double[outputLength*2];
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(2*Math.PI * t * k / n);
sumimag += -series[t]*Math.sin(2*Math.PI * t * k / n);
}
dft[(k-start)*2] = sumreal * normalisingFactor;
dft[(k-start)*2+1] = sumimag * normalisingFactor;
}
return dft;
}
private double[] DFTunnormed(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
//normalize the disjoint windows and sliding windows by dividing them by their standard deviation
//all Fourier coefficients are divided by sqrt(windowSize)
double[] dft = new double[outputLength*2];
double twoPi = 2*Math.PI / n;
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(twoPi * t * k);
sumimag += -series[t]*Math.sin(twoPi * t * k);
}
dft[(k-start)*2] = sumreal;
dft[(k-start)*2+1] = sumimag;
}
return dft;
}
private double[] normalizeDFT(double[] dft, double std) {
double normalisingFactor = (std > 0? 1.0 / std : 1.0) * inverseSqrtWindowSize;
for (int i = 0; i < dft.length; i++) {
dft[i] *= normalisingFactor;
}
return dft;
}
private double[][] performMFT(double[] series) {
// ignore DC value?
int startOffset = norm ? 2 : 0;
int l = wordLength;
l = l + l % 2; // make it even
double[] phis = new double[l];
for (int u = 0; u < phis.length; u += 2) {
double uHalve = -(u + startOffset) / 2;
phis[u] = realephi(uHalve, windowSize);
phis[u + 1] = complexephi(uHalve, windowSize);
}
// means and stddev for each sliding window
int end = Math.max(1, series.length - windowSize + 1);
double[] means = new double[end];
double[] stds = new double[end];
calcIncreamentalMeanStddev(windowSize, series, means, stds);
// holds the DFT of each sliding window
double[][] transformed = new double[end][];
double[] mftData = null;
for (int t = 0; t < end; t++) {
// use the MFT
if (t > 0) {
for (int k = 0; k < l; k += 2) {
double real1 = (mftData[k] + series[t + windowSize - 1] - series[t - 1]);
double imag1 = (mftData[k + 1]);
double real = complexMulReal(real1, imag1, phis[k], phis[k + 1]);
double imag = complexMulImag(real1, imag1, phis[k], phis[k + 1]);
mftData[k] = real;
mftData[k + 1] = imag;
}
} // use the DFT for the first offset
else {
mftData = Arrays.copyOf(series, windowSize);
mftData = DFTunnormed(mftData);
}
// normalization for lower bounding
transformed[t] = normalizeDFT(Arrays.copyOf(mftData, l), stds[t]);
}
return transformed;
}
private void calcIncreamentalMeanStddev(int windowLength, double[] series, double[] means, double[] stds) {
double sum = 0;
double squareSum = 0;
// it is faster to multiply than to divide
double rWindowLength = 1.0 / (double) windowLength;
double[] tsData = series;
for (int ww = 0; ww < windowLength; ww++) {
sum += tsData[ww];
squareSum += tsData[ww] * tsData[ww];
}
means[0] = sum * rWindowLength;
double buf = squareSum * rWindowLength - means[0] * means[0];
stds[0] = buf > 0 ? Math.sqrt(buf) : 0;
for (int w = 1, end = tsData.length - windowLength + 1; w < end; w++) {
sum += tsData[w + windowLength - 1] - tsData[w - 1];
means[w] = sum * rWindowLength;
squareSum += tsData[w + windowLength - 1] * tsData[w + windowLength - 1] - tsData[w - 1] * tsData[w - 1];
buf = squareSum * rWindowLength - means[w] * means[w];
stds[w] = buf > 0 ? Math.sqrt(buf) : 0;
}
}
private static double complexMulReal(double r1, double im1, double r2, double im2) {
return r1 * r2 - im1 * im2;
}
private static double complexMulImag(double r1, double im1, double r2, double im2) {
return r1 * im2 + r2 * im1;
}
private static double realephi(double u, double M) {
return Math.cos(2 * Math.PI * u / M);
}
private static double complexephi(double u, double M) {
return -Math.sin(2 * Math.PI * u / M);
}
protected double[][] disjointWindows(double [] data) {
int amount = (int)Math.ceil(data.length/(double)windowSize);
double[][] subSequences = new double[amount][windowSize];
for (int win = 0; win < amount; ++win) {
int offset = Math.min(win*windowSize, data.length-windowSize);
//copy the elements windowStart to windowStart+windowSize from data into
//the subsequence matrix at position windowStart
System.arraycopy(data,offset,subSequences[win],0,windowSize);
}
return subSequences;
}
protected double[][] MCB(Instances data) {
double[][][] dfts = new double[data.numInstances()][][];
int sample = 0;
for (Instance inst : data) {
dfts[sample++] = performDFT(disjointWindows(toArrayNoClass(inst))); //approximation
}
int numInsts = dfts.length;
int numWindowsPerInst = dfts[0].length;
int totalNumWindows = numInsts*numWindowsPerInst;
breakpoints = new double[wordLength][alphabetSize];
for (int letter = 0; letter < wordLength; ++letter) { //for each dft coeff
//extract this column from all windows in all instances
double[] column = new double[totalNumWindows];
for (int inst = 0; inst < numInsts; ++inst)
for (int window = 0; window < numWindowsPerInst; ++window) {
//rounding dft coefficients to reduce noise
column[(inst * numWindowsPerInst) + window] = Math.round(dfts[inst][window][letter]*100.0)/100.0;
}
//sort, and run through to find breakpoints for equi-depth bins
Arrays.sort(column);
double binIndex = 0;
double targetBinDepth = (double)totalNumWindows / (double)alphabetSize;
for (int bp = 0; bp < alphabetSize-1; ++bp) {
binIndex += targetBinDepth;
breakpoints[letter][bp] = column[(int)binIndex];
}
breakpoints[letter][alphabetSize-1] = Double.MAX_VALUE; //last one can always = infinity
}
return breakpoints;
}
/**
* Builds a brand new boss bag from the passed fourier transformed data, rather than from
* looking up existing transforms from earlier builds.
*
* to be used e.g to transform new test instances
*/
protected SPBag createSPBagSingle(double[][] dfts) {
SPBag bag = new SPBag();
BitWordInt lastWord = new BitWordInt();
int wInd = 0;
int trivialMatchCount = 0;
for (double[] d : dfts) {
BitWordInt word = createWord(d);
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position of the elongated pattern to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
applyPyramidWeights(bag);
return bag;
}
protected BitWordInt createWord(double[] dft) {
BitWordInt word = new BitWordInt();
for (int l = 0; l < wordLength; ++l) {//for each letter
for (int bp = 0; bp < alphabetSize; ++bp) {//run through breakpoints until right one found
if (dft[l] <= breakpoints[l][bp]) {
word.push(bp); //add corresponding letter to word
break;
}
}
}
return word;
}
/**
* Assumes class index, if present, is last
* @return data of passed instance in a double array with the class value removed if present
*/
protected static double[] toArrayNoClass(Instance inst) {
int length = inst.numAttributes();
if (inst.classIndex() >= 0)
--length;
double[] data = new double[length];
for (int i=0, j=0; i < inst.numAttributes(); ++i)
if (inst.classIndex() != i)
data[j++] = inst.value(i);
return data;
}
/**
* @return BOSSSpatialPyramidsTransform-ed bag, built using current parameters
*/
public SPBag BOSSSpatialPyramidsTransform(Instance inst) {
double[][] mfts = performMFT(toArrayNoClass(inst)); //approximation
SPBag bag2 = createSPBagSingle(mfts); //discretisation/bagging
bag2.setClassVal(inst.classValue());
return bag2;
}
/**
* Shortens all bags in this BOSSSpatialPyramids_Redo instance (histograms) to the newWordLength, if wordlengths
* are same, instance is UNCHANGED
*
* @param newWordLength wordLength to shorten it to
* @return new boss classifier with newWordLength, or passed in classifier if wordlengths are same
*/
public BOSSSpatialPyramidsIndividual buildShortenedSPBags(int newWordLength) throws Exception {
if (newWordLength == wordLength) //case of first iteration of word length search in ensemble
return this;
if (newWordLength > wordLength)
throw new Exception("Cannot incrementally INCREASE word length, current:"+wordLength+", requested:"+newWordLength);
if (newWordLength < 2)
throw new Exception("Invalid wordlength requested, current:"+wordLength+", requested:"+newWordLength);
BOSSSpatialPyramidsIndividual newBoss = new BOSSSpatialPyramidsIndividual(this, newWordLength);
//build hists with new word length from SFA words, and copy over the class values of original insts
for (int i = 0; i < bags.size(); ++i) {
SPBag newSPBag = createSPBagFromWords(newWordLength, SFAwords[i], true);
newSPBag.setClassVal(bags.get(i).getClassVal());
newBoss.bags.add(newSPBag);
}
return newBoss;
}
protected SPBag shortenSPBag(int newWordLength, int bagIndex) {
SPBag newSPBag = new SPBag();
for (BitWordInt word : SFAwords[bagIndex]) {
BitWordInt shortWord = new BitWordInt(word);
shortWord.shortenByFourierCoefficient();
Double val = newSPBag.get(shortWord);
if (val == null)
val = 0.0;
newSPBag.put(new ComparablePair<BitWord, Integer>(shortWord, 0), val + 1.0);
}
return newSPBag;
}
/**
* Builds a bag from the set of words for a pre-transformed series of a given wordlength.
* @param wordLengthSearching if true, length of each SFAwords word assumed to be 16,
* and need to shorten it to whatever actual value needed in this particular version of the
* classifier. if false, this is a standalone classifier with pre-defined wordlength (etc),
* and therefore sfawords are that particular length already, no need to shorten
*/
protected SPBag createSPBagFromWords(int thisWordLength, BitWordInt[] words, boolean wordLengthSearching) {
SPBag bag = new SPBag();
BitWordInt lastWord = new BitWordInt();
int wInd = 0;
int trivialMatchCount = 0; //keeps track of how many words have been the same so far
for (BitWordInt w : words) {
BitWordInt word = new BitWordInt(w);
if (wordLengthSearching)
word.shorten(16-thisWordLength); //TODO hack, word.length=16=maxwordlength, wordLength of 'this' BOSSSpatialPyramids instance unreliable, length of SFAwords = maxlength
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
applyPyramidWeights(bag);
return bag;
}
protected void changeNumLevels(int newLevels) {
//curently, simply remaking bags from words
//alternatively: un-weight all bags, add(run through SFAwords again)/remove levels, re-weight all
if (newLevels == this.levels)
return;
this.levels = newLevels;
for (int inst = 0; inst < bags.size(); ++inst) {
SPBag bag = createSPBagFromWords(wordLength, SFAwords[inst], true); //rebuild bag
bag.setClassVal(bags.get(inst).classVal);
bags.set(inst, bag); //overwrite old
}
}
protected void applyPyramidWeights(SPBag bag) {
for (Entry<ComparablePair<BitWord, Integer>, Double> ent : bag.entrySet()) {
//find level that this quadrant is on
int quadrant = ent.getKey().var2;
int qEnd = 0;
int level = 0;
while (qEnd < quadrant) {
int numQuadrants = (int)Math.pow(2, ++level);
qEnd+=numQuadrants;
}
double val = ent.getValue() * (Math.pow(levelWeighting, levels-level-1)); //weighting ^ (levels - level)
bag.put(ent.getKey(), val);
}
}
protected void addWordToPyramid(BitWordInt word, int wInd, SPBag bag) {
int qStart = 0; //for this level, whats the start index for quadrants
//e.g level 0 = 0
// level 1 = 1
// level 2 = 3
for (int l = 0; l < levels; ++l) {
//need to do the cell finding thing in the regular grid
int numQuadrants = (int)Math.pow(2, l);
int quadrantSize = seriesLength / numQuadrants;
int pos = wInd + (windowSize/2); //use the middle of the window as its position
int quadrant = qStart + (pos/quadrantSize);
ComparablePair<BitWord, Integer> key = new ComparablePair<>(word, quadrant);
Double val = bag.get(key);
if (val == null)
val = 0.0;
bag.put(key, ++val);
qStart += numQuadrants;
}
}
protected BitWordInt[] createSFAwords(Instance inst) throws Exception {
double[][] dfts2 = performMFT(toArrayNoClass(inst)); //approximation
BitWordInt[] words2 = new BitWordInt[dfts2.length];
for (int window = 0; window < dfts2.length; ++window)
words2[window] = createWord(dfts2[window]);//discretisation
return words2;
}
@Override
public void buildClassifier(Instances data) throws Exception {
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSSSpatialPyramids_BuildClassifier: Class attribute not set as last attribute in dataset");
seriesLength = data.numAttributes()-1;
breakpoints = MCB(data); //breakpoints to be used for making sfa words for train AND test data
SFAwords = new BitWordInt[data.numInstances()][];
bags = new ArrayList<>(data.numInstances());
for (int inst = 0; inst < data.numInstances(); ++inst) {
SFAwords[inst] = createSFAwords(data.get(inst));
SPBag bag = createSPBagFromWords(wordLength, SFAwords[inst], false);
bag.setClassVal(data.get(inst).classValue());
bags.add(bag);
}
}
/**
* Computes BOSSSpatialPyramids distance between two bags d(test, train), is NON-SYMETRIC operation, ie d(a,b) != d(b,a)
* @return distance FROM instA TO instB
*/
public double BOSSSpatialPyramidsDistance(SPBag instA, SPBag instB) {
double dist = 0.0;
//find dist only from values in instA
for (Entry<ComparablePair<BitWord, Integer>, Double> entry : instA.entrySet()) {
Double valA = entry.getValue();
Double valB = instB.get(entry.getKey());
if (valB == null)
valB = 0.0;
dist += (valA-valB)*(valA-valB);
}
return dist;
}
/**
* Computes BOSSSpatialPyramids distance between two bags d(test, train), is NON-SYMETRIC operation, ie d(a,b) != d(b,a).
*
* Quits early if the dist-so-far is greater than bestDist (assumed is in fact the dist still squared), and returns Double.MAX_VALUE
*
* @return distance FROM instA TO instB, or Double.MAX_VALUE if it would be greater than bestDist
*/
public double BOSSSpatialPyramidsDistance(SPBag instA, SPBag instB, double bestDist) {
double dist = 0.0;
//find dist only from values in instA
for (Entry<ComparablePair<BitWord, Integer>, Double> entry : instA.entrySet()) {
Double valA = entry.getValue();
Double valB = instB.get(entry.getKey());
if (valB == null)
valB = 0.0;
dist += (valA-valB)*(valA-valB);
if (dist > bestDist)
return Double.MAX_VALUE;
}
return dist;
}
public double histogramIntersection(SPBag instA, SPBag instB) {
//min vals of keys that exist in only one of the bags will always be 0
//therefore want to only bother looking at counts of words in both bags
//therefore will simply loop over words in a, skipping those that dont appear in b
//no need to loop over b, since only words missed will be those not in a anyway
double sim = 0.0;
for (Entry<ComparablePair<BitWord, Integer>, Double> entry : instA.entrySet()) {
Double valA = entry.getValue();
Double valB = instB.get(entry.getKey());
if (valB == null)
continue;
sim += Math.min(valA,valB);
}
return sim;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
SPBag testSPBag = BOSSSpatialPyramidsTransform(instance);
double bestSimilarity = 0.0;
double nn = -1.0;
for (int i = 0; i < bags.size(); ++i) {
double similarity = histogramIntersection(testSPBag, bags.get(i));
if (similarity > bestSimilarity) {
bestSimilarity = similarity;
nn = bags.get(i).getClassVal();
}
}
//if no bags had ANY similarity, just randomly guess 0
//found that this occurs in <1% of test cases for certain parameter sets
//in the ensemble
if (nn == -1.0)
nn = 0.0;
return nn;
}
/**
* Used within BOSSSpatialPyramidsEnsemble as part of a leave-one-out crossvalidation, to skip having to rebuild
* the classifier every time (since the n histograms would be identical each time anyway), therefore this classifies
* the instance at the index passed while ignoring its own corresponding histogram
*
* @param test index of instance to classify
* @return classification
*/
public double classifyInstance(int test) {
double bestSimilarity = -1.0;
double nn = -1.0;
SPBag testSPBag = bags.get(test);
for (int i = 0; i < bags.size(); ++i) {
if (i == test) //skip 'this' one, leave-one-out
continue;
double similarity = histogramIntersection(testSPBag, bags.get(i));
if (similarity > bestSimilarity) {
bestSimilarity = similarity;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void detailedFold0Test(String dset) {
System.out.println("BOSSSpatialPyramidsIndividual DetailedTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
System.out.println(train.relationName());
int windowSize = 10;
int alphabetSize = 4;
int wordLength = 58;
int levels = 2;
boolean norm = true;
BOSSSpatialPyramidsIndividual boss = new BOSSSpatialPyramidsIndividual(windowSize, alphabetSize, wordLength, norm, levels);
System.out.println(boss.getWordLength() + " " + boss.getAlphabetSize() + " " + boss.getWindowSize() + " " + boss.isNorm());
System.out.println("Training starting");
long start = System.nanoTime();
boss.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
System.out.println("Breakpoints: ");
for (int i = 0; i < boss.breakpoints.length; i++) {
System.out.print("Letter " + i + ": ");
for (int j = 0; j < boss.breakpoints[i].length; j++) {
System.out.print(boss.breakpoints[i][j] + " ");
}
System.out.println("");
}
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, boss);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
}
} | 57,769 | 40.00071 | 228 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/TDE.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import fileIO.OutFile;
import tsml.classifiers.*;
import tsml.classifiers.dictionary_based.bitword.BitWord;
import tsml.classifiers.dictionary_based.bitword.BitWordInt;
import tsml.classifiers.dictionary_based.bitword.BitWordLong;
import tsml.data_containers.TSCapabilities;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import utilities.ClassifierTools;
import utilities.generic_storage.SerialisableComparablePair;
import weka.classifiers.functions.GaussianProcesses;
import weka.core.*;
import java.io.*;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static utilities.Utilities.argMax;
/**
* TDE classifier with parameter search and ensembling for univariate and
* multivariate time series classification.
* If parameters are known, use the class IndividualTDE and directly provide them.
* <p>
* Has the capability to contract train time and checkpoint.
* <p>
* Alphabetsize fixed to four and maximum wordLength of 16.
* <p>
* Implementation based on the algorithm described in getTechnicalInformation()
*
* @author Matthew Middlehurst
*/
public class TDE extends EnhancedAbstractClassifier implements TrainTimeContractable,
Checkpointable, TechnicalInformationHandler, MultiThreadable, Visualisable, Interpretable {
/**
* Paper defining TDE.
*
* @return TechnicalInformation for TDE
*/
@Override //TechnicalInformationHandler
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "M. Middlehurst, J. Large, G. Cawley and A. Bagnall");
result.setValue(TechnicalInformation.Field.TITLE, "The Temporal Dictionary Ensemble (TDE) Classifier for " +
"Time Series Classification");
result.setValue(TechnicalInformation.Field.JOURNAL, "The European Conference on Machine Learning and " +
"Principles and Practice of Knowledge Discovery in Databases");
result.setValue(TechnicalInformation.Field.YEAR, "2020");
return result;
}
private int parametersConsidered = 250;
private int maxEnsembleSize = 50;
private boolean histogramIntersection = true;
private Boolean useBigrams; //defaults to true if univariate, false if multivariate
private boolean useFeatureSelection = false;
private double trainProportion = 0.7;
private double dimensionCutoffThreshold = 0.85;
private int maxNoDimensions = 20;
private boolean bayesianParameterSelection = true;
private int initialRandomParameters = 50;
private int initialParameterCount;
private Instances parameterPool;
private Instances prevParameters;
private int parametersRemaining;
private final int[] wordLengths = {16, 14, 12, 10, 8};
private final int[] alphabetSize = {4};
private final boolean[] normOptions = {true, false};
private final Integer[] levels = {1, 2, 3};
private final boolean[] useIGB = {true, false};
private double maxWinLenProportion = 1;
private double maxWinSearchProportion = 0.25;
private boolean cutoff = false;
private double cutoffThreshold = 0.7;
private transient LinkedList<IndividualTDE> classifiers;
private String checkpointPath;
private boolean checkpoint = false;
private long checkpointTime = 0;
private long checkpointTimeDiff = 0;
private ArrayList<Integer> checkpointIDs;
private boolean internalContractCheckpointHandling = true;
private boolean cleanupCheckpointFiles = false;
private boolean loadAndFinish = false;
private long trainContractTimeNanos = 0;
private boolean trainTimeContract = false;
private boolean underContractTime = true;
private ArrayList<Double> paramAccuracy;
private ArrayList<Double> paramTime;
private transient TimeSeriesInstances train;
private int numThreads = 1;
private boolean multiThread = false;
private ExecutorService ex;
//Classifier build data, stored as field for checkpointing.
private int classifiersBuilt;
private int lowestAccIdx;
private double lowestAcc;
private double maxAcc;
private String visSavePath;
private String interpSavePath;
private ArrayList<Integer> interpData;
private ArrayList<Integer> interpPreds;
private int interpCount = 0;
private double[] interpSeries;
private int interpPred;
protected static final long serialVersionUID = 1L;
/**
* Default constructor for TDE. Can estimate own performance.
*/
public TDE() {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
}
/**
* Set the amount of parameter sets considered for the ensemble.
*
* @param size number of parameters considered
*/
public void setParametersConsidered(int size) {
parametersConsidered = size;
}
/**
* Max number of classifiers for the ensemble.
*
* @param size max ensemble size
*/
public void setMaxEnsembleSize(int size) {
maxEnsembleSize = size;
}
/**
* Proportion of train set to be randomly subsampled for each classifier.
*
* @param d train subsample proportion
*/
public void setTrainProportion(double d) {
trainProportion = d;
}
/**
* Dimension accuracy cutoff threshold for multivariate time series.
*
* @param d dimension cutoff threshold
*/
public void setDimensionCutoffThreshold(double d) {
dimensionCutoffThreshold = d;
}
/**
* Maximum number of dimensions kept for multivariate time series.
*
* @param d Max number of dimensions
*/
public void setMaxNoDimensions(int d) {
maxNoDimensions = d;
}
/**
* Whether to delete checkpoint files after building has finished.
*
* @param b clean up checkpoint files
*/
public void setCleanupCheckpointFiles(boolean b) {
cleanupCheckpointFiles = b;
}
/**
* Whether to load checkpoint files and finish building from the loaded state.
*
* @param b load ser files and finish building
*/
public void loadAndFinish(boolean b) {
loadAndFinish = b;
}
/**
* Max window length as proportion of the series length.
*
* @param d max window length proportion
*/
public void setMaxWinLenProportion(double d) {
maxWinLenProportion = d;
}
/**
* Max number of window lengths to search through as proportion of the series length.
*
* @param d window length search proportion
*/
public void setMaxWinSearchProportion(double d) {
maxWinSearchProportion = d;
}
/**
* Whether to use GP parameter selection for IndividualBOSS classifiers.
*
* @param b use GP parameter selection
*/
public void setBayesianParameterSelection(boolean b) {
bayesianParameterSelection = b;
}
/**
* Wether to use bigrams in IndividualBOSS classifiers.
*
* @param b use bigrams
*/
public void setUseBigrams(Boolean b) {
useBigrams = b;
}
/**
* Wether to use feature selection in IndividualBOSS classifiers.
*
* @param b use feature selection
*/
public void setUseFeatureSelection(boolean b) {
useFeatureSelection = b;
}
/**
* Whether to remove ensemble members below a proportion of the highest accuracy.
*
* @param b use ensemble cutoff
*/
public void setCutoff(boolean b) {
cutoff = b;
}
/**
* Ensemble accuracy cutoff as proportion of highest ensemble memeber accuracy.
*
* @param d cutoff proportion
*/
public void setCutoffThreshold(double d) {
cutoffThreshold = d;
}
/**
* Outputs TDE and IndivdiualTDE parameters as a String.
*
* @return String written to results files
*/
@Override //SaveParameterInfo
public String getParameters() {
StringBuilder sb = new StringBuilder();
sb.append(super.getParameters());
sb.append(",numClassifiers,").append(classifiers.size()).append(",contractTime,")
.append(trainContractTimeNanos);
for (int i = 0; i < classifiers.size(); ++i) {
IndividualTDE indiv = classifiers.get(i);
sb.append(",windowSize,").append(indiv.getWindowSize()).append(",wordLength,");
sb.append(indiv.getWordLength()).append(",alphabetSize,").append(indiv.getAlphabetSize());
sb.append(",norm,").append(indiv.getNorm()).append(",levels,").append(indiv.getLevels());
sb.append(",IGB,").append(indiv.getIGB());
}
return sb.toString();
}
/**
* Returns the capabilities for TDE. These are that the
* data must be numeric or relational, with no missing and a nominal class
*
* @return the capabilities of TDE
*/
@Override //AbstractClassifier
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
result.setMinimumNumberInstances(2);
// attributes
result.enable(Capabilities.Capability.RELATIONAL_ATTRIBUTES);
result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES);
// class
result.enable(Capabilities.Capability.NOMINAL_CLASS);
return result;
}
/**
* Returns the time series capabilities for TDE. These are that the
* data must be equal length, with no missing values
*
* @return the time series capabilities of TDE
*/
public TSCapabilities getTSCapabilities() {
TSCapabilities capabilities = new TSCapabilities();
capabilities.enable(TSCapabilities.EQUAL_LENGTH)
.enable(TSCapabilities.MULTI_OR_UNIVARIATE)
.enable(TSCapabilities.NO_MISSING_VALUES);
return capabilities;
}
/**
* Build the TDE classifier.
*
* @param data TimeSeriesInstances object
* @throws Exception unable to train model
*/
@Override //TSClassifier
public void buildClassifier(final TimeSeriesInstances data) throws Exception {
trainResults = new ClassifierResults();
rand.setSeed(seed);
numClasses = data.numClasses();
trainResults.setEstimatorName(getClassifierName());
trainResults.setBuildTime(System.nanoTime());
// can classifier handle the data?
getTSCapabilities().test(data);
train = data;
//Window length settings
int minWindow = 10;
int maxWindow = (int) (data.getMaxLength() * maxWinLenProportion);
if (maxWindow < minWindow) minWindow = maxWindow / 2;
//whats the max number of window sizes that should be searched through
double maxWindowSearches = data.getMaxLength() * maxWinSearchProportion;
int winInc = (int) ((maxWindow - minWindow) / maxWindowSearches);
if (winInc < 1) winInc = 1;
printLineDebug("TDE Classifier: Total contract time limit = "+ trainContractTimeNanos+" nanos");
printLineDebug("maxWindow = "+maxWindow+" window increment = "+winInc);
//path checkpoint files will be saved to
checkpointPath = checkpointPath + "/" + checkpointName(data.getProblemName()) + "/";
File f = new File(checkpointPath + "TDE.ser");
//if checkpointing and serialised files exist load said files
if (checkpoint && f.exists()) {
if (debug)
System.out.println("Loading from checkpoint file");
long time = System.nanoTime();
loadFromFile(checkpointPath + "TDE.ser");
printLineDebug("Spent " + (System.nanoTime() - time) + "nanoseconds loading ser files");
}
//initialise variables
else {
classifiers = new LinkedList<>();
if (checkpoint) {
checkpointIDs = new ArrayList<>();
for (int i = 0; i < maxEnsembleSize; i++) {
checkpointIDs.add(i);
}
}
useBigrams = !data.isMultivariate() && useBigrams == null;
parameterPool = uniqueParameters(minWindow, maxWindow, winInc);
classifiersBuilt = 0;
lowestAccIdx = 0;
lowestAcc = Double.MAX_VALUE;
maxAcc = 0;
}
if (multiThread) {
ex = Executors.newFixedThreadPool(numThreads);
}
//Contracting
if (trainTimeContract) {
parametersConsidered = Integer.MAX_VALUE;
}
//Build ensemble if not set to just load ser files
if (!(checkpoint && loadAndFinish)) {
buildTDE(data);
}
if (checkpoint) {
checkpoint(null, false);
}
//end train time in nanoseconds
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime() - trainResults.getBuildTime() - checkpointTimeDiff);
printLineDebug(" Build time = "+(System.nanoTime()-trainResults.getBuildTime())+" nanos ");
//Estimate train accuracy
if (getEstimateOwnPerformance()) {
long start = System.nanoTime();
findEnsembleTrainEstimate();
long end = System.nanoTime();
trainResults.setErrorEstimateTime(end - start);
}
trainResults.setBuildPlusEstimateTime(trainResults.getBuildTime() + trainResults.getErrorEstimateTime());
trainResults.setParas(getParameters());
//delete any serialised files and holding folder for checkpointing on completion
if (checkpoint && cleanupCheckpointFiles) {
checkpointCleanup();
}
printLineDebug("*************** Finished TDE Build with "+classifiersBuilt+" classifiers built in train time " +
(trainResults.getBuildTime()/1000000000/60/60.0) + " hours, Train+Estimate time = "+(trainResults.getBuildPlusEstimateTime()/1000000000/60/60.0)+" hours ***************");
}
/**
* Build the TDE classifier.
*
* @param data weka Instances object
* @throws Exception unable to train model
*/
@Override //AbstractClassifier
public void buildClassifier(final Instances data) throws Exception {
buildClassifier(Converter.fromArff(data));
}
/**
* Builds parametersConsidered IndividualTDE classifiers or contiues building until the train time contract
* finishes.
* Randomly subsamples the train set for each classifier and selects parameters using a GP model.
* Keeps the top maxEnsembleSize classifiers with the highest accuracy found using LOOCV for the ensemble.
*
* @param series TimeSeriesInstances object
* @throws Exception unable to train model
*/
private void buildTDE(TimeSeriesInstances series) throws Exception {
//build classifiers up to a set size
while (underContractTime && classifiersBuilt < parametersConsidered && parametersRemaining > 0) {
long indivBuildTime = System.nanoTime();
boolean checkpointChange = false;
double[] parameters = selectParameters();
if (parameters == null) break;
IndividualTDE indiv;
if (series.isMultivariate()) {
indiv = new MultivariateIndividualTDE((int) parameters[0], (int) parameters[1], (int) parameters[2],
parameters[3] == 1, (int) parameters[4], parameters[5] == 1,
multiThread, numThreads, ex);
((MultivariateIndividualTDE) indiv).setDimensionCutoffThreshold(dimensionCutoffThreshold);
((MultivariateIndividualTDE) indiv).setMaxNoDimensions(maxNoDimensions);
} else {
indiv = new IndividualTDE((int) parameters[0], (int) parameters[1], (int) parameters[2],
parameters[3] == 1, (int) parameters[4], parameters[5] == 1,
multiThread, numThreads, ex);
}
indiv.setCleanAfterBuild(true);
indiv.setHistogramIntersection(histogramIntersection);
indiv.setUseBigrams(useBigrams);
indiv.setUseFeatureSelection(useFeatureSelection);
indiv.setSeed(seed);
TimeSeriesInstances data = trainProportion < 1 && trainProportion > 0 ? subsampleData(series, indiv)
: series;
indiv.buildClassifier(data);
double accuracy = individualTrainAcc(indiv, data, classifiers.size() < maxEnsembleSize
? -99999999 : lowestAcc);
indiv.setAccuracy(accuracy);
if (accuracy == 0) indiv.setWeight(Double.MIN_VALUE);
else indiv.setWeight(Math.pow(accuracy, 4));
if (bayesianParameterSelection) paramAccuracy.add(accuracy);
if (trainTimeContract) paramTime.add((double) (System.nanoTime() - indivBuildTime));
if (cutoff && indiv.getAccuracy() > maxAcc) {
maxAcc = indiv.getAccuracy();
//get rid of any extras that dont fall within the new max threshold
Iterator<IndividualTDE> it = classifiers.iterator();
while (it.hasNext()) {
IndividualTDE b = it.next();
if (b.getAccuracy() < maxAcc * cutoffThreshold) {
it.remove();
if (checkpoint) {
checkpointIDs.add(b.getEnsembleID());
}
}
}
}
if (!cutoff || indiv.getAccuracy() >= maxAcc * cutoffThreshold) {
if (classifiers.size() < maxEnsembleSize) {
if (accuracy < lowestAcc) {
lowestAccIdx = classifiers.size();
lowestAcc = accuracy;
}
classifiers.add(indiv);
if (checkpoint) {
indiv.setEnsembleID(checkpointIDs.remove(0));
checkpointChange = true;
}
} else if (accuracy > lowestAcc) {
double[] newLowestAcc = findMinEnsembleAcc();
lowestAccIdx = (int) newLowestAcc[0];
lowestAcc = newLowestAcc[1];
IndividualTDE rm = classifiers.remove(lowestAccIdx);
classifiers.add(lowestAccIdx, indiv);
if (checkpoint) {
indiv.setEnsembleID(rm.getEnsembleID());
checkpointChange = true;
}
}
}
classifiersBuilt++;
printLineDebug("Classifiers built = "+classifiersBuilt);
if (checkpoint) {
checkpoint(indiv, checkpointChange);
}
underContractTime = withinTrainContract(trainResults.getBuildTime());
}
}
/**
* Saved the current state of the classifier to file, if the ensemble has changed save the new classifier as well
* as meta info.
*
* @param classifier last build IndividualTDE classifier
* @param saveIndiv whether to save the new IndividualClassifier
*/
private void checkpoint(IndividualTDE classifier, boolean saveIndiv) {
try {
File f = new File(checkpointPath);
if (!f.isDirectory())
f.mkdirs();
//time the checkpoint occured
checkpointTime = System.nanoTime();
if (saveIndiv) {
FileOutputStream fos = new FileOutputStream(checkpointPath + "IndividualTDE-" +
classifier.getEnsembleID() + ".ser");
try (ObjectOutputStream out = new ObjectOutputStream(fos)) {
out.writeObject(classifier);
out.close();
fos.close();
}
}
//dont take into account time spent serialising into build time
if (internalContractCheckpointHandling) checkpointTimeDiff += System.nanoTime() - checkpointTime;
checkpointTime = System.nanoTime();
//save this, classifiers and train data not included
saveToFile(checkpointPath + "TDEtemp.ser");
File file = new File(checkpointPath + "TDEtemp.ser");
File file2 = new File(checkpointPath + "TDE.ser");
file2.delete();
file.renameTo(file2);
if (internalContractCheckpointHandling) checkpointTimeDiff += System.nanoTime() - checkpointTime;
} catch (Exception e) {
e.printStackTrace();
System.out.println("Serialisation to " + checkpointPath + " FAILED");
}
}
/**
* Remove any checkpoint files used.
*/
private void checkpointCleanup() {
File f = new File(checkpointPath);
String[] files = f.list();
for (String file : files) {
File f2 = new File(f.getPath() + "\\" + file);
boolean b = f2.delete();
}
f.delete();
}
/**
* Checkpoint classifier name differing by dataset and parameters used to prevent overlap.
*
* @param datasetName name of the dataset
* @return checkpoint file name
*/
private String checkpointName(String datasetName) {
String name = datasetName + seed + "TDE";
if (trainTimeContract) {
name += ("TTC" + trainContractTimeNanos);
} else {
name += ("S" + parametersConsidered);
}
name += ("M" + maxEnsembleSize);
return name;
}
/**
* Finds the index and accuracy of the ensemble member with least accuracy
*
* @return index and accuracy of ensemble member with least accuracy, [0] = index, [1] = acc
*/
private double[] findMinEnsembleAcc() {
double minAcc = Double.MAX_VALUE;
int minAccInd = 0;
for (int i = 0; i < classifiers.size(); ++i) {
double curacc = classifiers.get(i).getAccuracy();
if (curacc < minAcc) {
minAcc = curacc;
minAccInd = i;
}
}
return new double[]{minAccInd, minAcc};
}
/**
* Finds and returns all possible parameter sets for IndividualTDE classifiers.
*
* @param minWindow min window size
* @param maxWindow max windoew size
* @param winInc window size increment
* @return possible parameters as an Instances object
*/
private Instances uniqueParameters(int minWindow, int maxWindow, int winInc) {
ArrayList<double[]> possibleParameters = new ArrayList<>();
for (Boolean normalise : normOptions) {
for (Integer alphSize : alphabetSize) {
for (int winSize = minWindow; winSize <= maxWindow; winSize += winInc) {
for (Integer wordLen : wordLengths) {
for (Integer level : levels) {
for (Boolean igb : useIGB) {
double[] parameters = {wordLen, alphSize, winSize, normalise ? 1 : 0, level,
igb ? 1 : 0, -1};
possibleParameters.add(parameters);
}
}
}
}
}
}
int numAtts = possibleParameters.get(0).length;
ArrayList<Attribute> atts = new ArrayList<>(numAtts);
for (int i = 0; i < numAtts; i++) {
atts.add(new Attribute("att" + i));
}
Instances parameterPool = new Instances("params", atts, possibleParameters.size());
parameterPool.setClassIndex(numAtts - 1);
prevParameters = new Instances(parameterPool, 0);
prevParameters.setClassIndex(numAtts - 1);
parametersRemaining = possibleParameters.size();
for (double[] possibleParameter : possibleParameters) {
DenseInstance inst = new DenseInstance(1, possibleParameter);
parameterPool.add(inst);
}
if (bayesianParameterSelection) {
paramAccuracy = new ArrayList<>();
}
if (trainTimeContract) {
paramTime = new ArrayList<>();
}
return parameterPool;
}
/**
* Finds a parameter set for an IndividualTDE classifier using GP parameter selection.
* If contracting remove any parameter sets estimated to go past the contract time after 90% of the contract has
* elapsed.
*
* @return IndividualTDE parameters
* @throws Exception unable to select parameters
*/
private double[] selectParameters() throws Exception {
Instance params;
if (trainTimeContract && System.nanoTime() - trainResults.getBuildTime() - checkpointTimeDiff
> trainContractTimeNanos / 10 * 9) {
if (prevParameters.size() > 0) {
for (int i = 0; i < paramTime.size(); i++) {
prevParameters.get(i).setClassValue(paramTime.get(i));
}
GaussianProcesses gp = new GaussianProcesses();
gp.buildClassifier(prevParameters);
long remainingTime = trainContractTimeNanos - (System.nanoTime() - trainResults.getBuildTime()
- checkpointTimeDiff);
for (int i = 0; i < parameterPool.size(); i++) {
double pred = gp.classifyInstance(parameterPool.get(i));
if (pred > remainingTime) {
parameterPool.remove(i);
i--;
}
}
if (parameterPool.size() == 0) return null;
}
}
if (bayesianParameterSelection) {
if (initialParameterCount < initialRandomParameters) {
initialParameterCount++;
params = parameterPool.remove(rand.nextInt(parameterPool.size()));
} else {
for (int i = 0; i < paramAccuracy.size(); i++) {
prevParameters.get(i).setClassValue(paramAccuracy.get(i));
}
GaussianProcesses gp = new GaussianProcesses();
gp.buildClassifier(prevParameters);
int bestIndex = 0;
double bestAcc = -1;
for (int i = 0; i < parameterPool.numInstances(); i++) {
double pred = gp.classifyInstance(parameterPool.get(i));
if (pred > bestAcc) {
bestIndex = i;
bestAcc = pred;
}
}
params = parameterPool.remove(bestIndex);
}
} else {
params = parameterPool.remove(rand.nextInt(parameterPool.size()));
}
prevParameters.add(params);
parametersRemaining = parameterPool.size();
return params.toDoubleArray();
}
/**
* Randomly subsample the train set.
*
* @param series data to be subsampled
* @param indiv classifier being subsampled for
* @return subsampled data
*/
private TimeSeriesInstances subsampleData(TimeSeriesInstances series, IndividualTDE indiv) {
int newSize = (int) (series.numInstances() * trainProportion);
ArrayList<TimeSeriesInstance> data = new ArrayList<>();
ArrayList<Integer> indices = new ArrayList<>(series.numInstances());
for (int n = 0; n < series.numInstances(); n++) {
indices.add(n);
}
ArrayList<Integer> subsampleIndices = new ArrayList<>(series.numInstances());
while (subsampleIndices.size() < newSize) {
subsampleIndices.add(indices.remove(rand.nextInt(indices.size())));
}
for (int i = 0; i < newSize; i++) {
data.add(series.get(subsampleIndices.get(i)));
}
indiv.setSubsampleIndices(subsampleIndices);
return new TimeSeriesInstances(data, series.getClassLabels());
}
/**
* Estimate the accruacy of an IndividualTDE using LOO CV for the subsampled data. Early exit if it is impossible
* to meet the required accuracy.
*
* @param indiv classifier to evaluate
* @param series subsampled data
* @param lowestAcc lowest accuracy in the ensemble
* @return estimated accuracy
* @throws Exception unable to estimate accuracy
*/
private double individualTrainAcc(IndividualTDE indiv, TimeSeriesInstances series, double lowestAcc)
throws Exception {
if (getEstimateOwnPerformance() && trainEstimateMethod == TrainEstimateMethod.NONE) {
indiv.setTrainPreds(new ArrayList<>());
}
int correct = 0;
int requiredCorrect = (int) (lowestAcc * series.numInstances());
if (multiThread) {
ArrayList<Future<Double>> futures = new ArrayList<>(series.numInstances());
for (int i = 0; i < series.numInstances(); ++i) {
if (series.isMultivariate())
futures.add(ex.submit(((MultivariateIndividualTDE) indiv).new TrainNearestNeighbourThread(i)));
else
futures.add(ex.submit(indiv.new TrainNearestNeighbourThread(i)));
}
int idx = 0;
for (Future<Double> f : futures) {
if (correct + series.numInstances() - idx < requiredCorrect) {
return -1;
}
if (f.get() == series.get(idx).getLabelIndex()) {
++correct;
}
idx++;
if (getEstimateOwnPerformance() && trainEstimateMethod == TrainEstimateMethod.NONE) {
indiv.getTrainPreds().add(f.get().intValue());
}
}
} else {
for (int i = 0; i < series.numInstances(); ++i) {
if (correct + series.numInstances() - i < requiredCorrect) {
return -1;
}
double c = indiv.classifyInstance(i); //classify series i, while ignoring its corresponding histogram i
if (c == series.get(i).getLabelIndex()) {
++correct;
}
if (getEstimateOwnPerformance() && trainEstimateMethod == TrainEstimateMethod.NONE) {
indiv.getTrainPreds().add((int) c);
}
}
}
return (double) correct / (double) series.numInstances();
}
/**
* Estimate accuracy stage: Three scenarios
* 1. Subsampled LOO CV using the transformed instances for each classifier.
* 2. Full LOO CV.
* 3. Using the out of bag instances for each classifier.
*
* @throws Exception unable to obtain estimate
*/
private void findEnsembleTrainEstimate() throws Exception {
if (trainEstimateMethod == TrainEstimateMethod.OOB && trainProportion < 1) {
for (int i = 0; i < train.numInstances(); ++i) {
double[] probs = new double[train.numClasses()];
double sum = 0;
for (IndividualTDE classifier : classifiers) {
if (!classifier.getSubsampleIndices().contains(i)) {
probs[(int) classifier.classifyInstance(train.get(i))] += classifier.getWeight();
sum += classifier.getWeight();
}
}
if (sum != 0) {
for (int j = 0; j < probs.length; ++j)
probs[j] = (probs[j] / sum);
} else {
Arrays.fill(probs, 1.0 / train.numClasses());
}
trainResults.addPrediction(train.get(i).getLabelIndex(), probs, findIndexOfMax(probs, rand),
-1, "");
}
trainResults.setEstimatorName("TDEOOB");
trainResults.setErrorEstimateMethod("OOB");
} else {
double[][] trainDistributions = new double[train.numInstances()][train.numClasses()];
double[] idxSubsampleCount = new double[train.numInstances()];
if ((trainEstimateMethod == TrainEstimateMethod.NONE || trainEstimateMethod == TrainEstimateMethod.TRAIN)
&& trainProportion < 1) {
for (IndividualTDE classifier : classifiers) {
ArrayList<Integer> trainIdx = classifier.getSubsampleIndices();
ArrayList<Integer> trainPreds = classifier.getTrainPreds();
double weight = classifier.getWeight();
for (int g = 0; g < trainIdx.size(); g++) {
idxSubsampleCount[trainIdx.get(g)] += weight;
trainDistributions[trainIdx.get(g)][trainPreds.get(g)] += weight;
}
}
for (int i = 0; i < trainDistributions.length; i++) {
if (idxSubsampleCount[i] > 0) {
for (int n = 0; n < trainDistributions[i].length; n++) {
trainDistributions[i][n] /= idxSubsampleCount[i];
}
}
}
trainResults.setEstimatorName("TDESubsampleLOO");
trainResults.setErrorEstimateMethod("SubsampleLOOCV");
} else {
trainResults.setEstimatorName("TDELOO");
trainResults.setErrorEstimateMethod("LOOCV");
}
for (int i = 0; i < train.numInstances(); ++i) {
double[] probs;
if (idxSubsampleCount[i] > 0 && (trainEstimateMethod == TrainEstimateMethod.NONE
|| trainEstimateMethod == TrainEstimateMethod.TRAIN)) {
probs = trainDistributions[i];
} else {
probs = distributionForInstance(i);
}
trainResults.addPrediction(train.get(i).getLabelIndex(), probs, findIndexOfMax(probs, rand),
-1, "");
}
}
trainResults.setDatasetName(train.getProblemName());
trainResults.setFoldID(seed);
trainResults.setSplit("train");
trainResults.finaliseResults();
}
/**
* Find class probabilities of a training instance using the trained model, removing said instance if present.
*
* @param test train instance index
* @return array of doubles: probability of each class
* @throws Exception failure to classify
*/
private double[] distributionForInstance(int test) throws Exception {
double[] probs = new double[train.numClasses()];
//get sum of all channels, votes from each are weighted the same.
double sum = 0;
for (IndividualTDE classifier : classifiers) {
double classification;
if (classifier.getSubsampleIndices() == null) {
classification = classifier.classifyInstance(test);
} else if (classifier.getSubsampleIndices().contains(test)) {
classification = classifier.classifyInstance(classifier.getSubsampleIndices().indexOf(test));
} else if (trainEstimateMethod == TrainEstimateMethod.CV) {
TimeSeriesInstance series = train.get(test);
classification = classifier.classifyInstance(series);
} else {
continue;
}
probs[(int) classification] += classifier.getWeight();
sum += classifier.getWeight();
}
if (sum != 0) {
for (int i = 0; i < probs.length; ++i)
probs[i] = (probs[i] / sum);
} else {
Arrays.fill(probs, 1.0 / train.numClasses());
}
return probs;
}
/**
* Find class probabilities of an instance using the trained model.
*
* @param instance TimeSeriesInstance object
* @return array of doubles: probability of each class
* @throws Exception failure to classify
*/
@Override //TSClassifier
public double[] distributionForInstance(TimeSeriesInstance instance) throws Exception {
double[] classHist = new double[numClasses];
//get sum of all channels, votes from each are weighted the same.
double sum = 0;
if (interpSavePath != null) {
interpData = new ArrayList<>();
interpPreds = new ArrayList<>();
}
if (multiThread) {
ArrayList<Future<Double>> futures = new ArrayList<>(classifiers.size());
for (IndividualTDE classifier : classifiers) {
if (train.isMultivariate())
futures.add(ex.submit(((MultivariateIndividualTDE) classifier)
.new TestNearestNeighbourThread(instance)));
else
futures.add(ex.submit(classifier.new TestNearestNeighbourThread(instance)));
}
int idx = 0;
for (Future<Double> f : futures) {
double weight = classifiers.get(idx).getWeight();
classHist[f.get().intValue()] += weight;
sum += weight;
idx++;
}
} else {
for (IndividualTDE classifier : classifiers) {
double classification = classifier.classifyInstance(instance);
classHist[(int) classification] += classifier.getWeight();
sum += classifier.getWeight();
if (interpSavePath != null) {
interpData.add(classifier.getLastNNIdx());
interpPreds.add((int) classification);
}
}
}
double[] distributions = new double[numClasses];
if (sum != 0) {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += classHist[i] / sum;
} else {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += 1.0 / numClasses;
}
if (interpSavePath != null) {
interpSeries = instance.toValueArray()[0];
interpPred = argMax(distributions, rand);
}
return distributions;
}
/**
* Find class probabilities of an instance using the trained model.
*
* @param instance weka Instance object
* @return array of doubles: probability of each class
* @throws Exception failure to classify
*/
@Override //AbstractClassifier
public double[] distributionForInstance(Instance instance) throws Exception {
return distributionForInstance(Converter.fromArff(instance));
}
/**
* Classify an instance using the trained model.
*
* @param instance TimeSeriesInstance object
* @return predicted class value
* @throws Exception failure to classify
*/
@Override //TSClassifier
public double classifyInstance(TimeSeriesInstance instance) throws Exception {
double[] probs = distributionForInstance(instance);
return findIndexOfMax(probs, rand);
}
/**
* Classify an instance using the trained model.
*
* @param instance weka Instance object
* @return predicted class value
* @throws Exception failure to classify
*/
@Override //AbstractClassifier
public double classifyInstance(Instance instance) throws Exception {
return classifyInstance(Converter.fromArff(instance));
}
/**
* Set the train time limit for a contracted classifier.
*
* @param amount contract time in nanoseconds
*/
@Override //TrainTimeContractable
public void setTrainTimeLimit(long amount) {
trainContractTimeNanos = amount;
trainTimeContract = true;
}
/**
* Check if a contracted classifier is within its train time limit.
*
* @param start classifier build start time
* @return true if within the contract or not contracted, false otherwise.
*/
@Override //TrainTimeContractable
public boolean withinTrainContract(long start) {
if (trainContractTimeNanos <= 0) return true; //Not contracted
if (getEstimateOwnPerformance() && trainEstimateMethod == TrainEstimateMethod.OOB)
return System.nanoTime() - start - checkpointTimeDiff < trainContractTimeNanos -
(20000000l*(long)train.numInstances() * (long)classifiers.size());
return System.nanoTime() - start - checkpointTimeDiff < trainContractTimeNanos;
}
/**
* Set the path to save checkpoint files to.
*
* @param path string for full path for the directory to store checkpointed files
* @return true if valid path, false otherwise
*/
@Override //Checkpointable
public boolean setCheckpointPath(String path) {
boolean validPath = Checkpointable.super.createDirectories(path);
if (validPath) {
checkpointPath = path;
checkpoint = true;
}
return validPath;
}
/**
* Copies values from a loaded TDE object and IndividualTDE objects into this object.
*
* @param obj a TDE object
* @throws Exception if obj is not an instance of TDE
*/
@Override //Checkpointable
public void copyFromSerObject(Object obj) throws Exception {
if (!(obj instanceof TDE))
throw new Exception("The SER file is not an instance of TDE");
TDE saved = ((TDE) obj);
System.out.println("Loading TDE.ser");
//copy over variables from serialised object
parametersConsidered = saved.parametersConsidered;
maxEnsembleSize = saved.maxEnsembleSize;
histogramIntersection = saved.histogramIntersection;
useBigrams = saved.useBigrams;
useFeatureSelection = saved.useFeatureSelection;
trainProportion = saved.trainProportion;
dimensionCutoffThreshold = saved.dimensionCutoffThreshold;
maxNoDimensions = saved.maxNoDimensions;
bayesianParameterSelection = saved.bayesianParameterSelection;
initialRandomParameters = saved.initialRandomParameters;
initialParameterCount = saved.initialParameterCount;
parameterPool = saved.parameterPool;
prevParameters = saved.prevParameters;
parametersRemaining = saved.parametersRemaining;
//wordLengths = saved.wordLengths;
//alphabetSize = saved.alphabetSize;
//normOptions = saved.normOptions;
//levels = saved.levels;
//useIGB = saved.useIGB;
maxWinLenProportion = saved.maxWinLenProportion;
maxWinSearchProportion = saved.maxWinSearchProportion;
cutoff = saved.cutoff;
cutoffThreshold = saved.cutoffThreshold;
//classifiers = saved.classifier;
//checkpointPath = saved.checkpointPath;
//checkpoint = saved.checkpoint;
//checkpointTime = saved.checkpointTime;
//checkpointTimeDiff = saved.checkpointTimeDiff;
checkpointIDs = saved.checkpointIDs;
//internalContractCheckpointHandling = saved.internalContractCheckpointHandling;
//cleanupCheckpointFiles = saved.cleanupCheckpointFiles;
//loadAndFinish = saved.loadAndFinish;
if (internalContractCheckpointHandling) trainContractTimeNanos = saved.trainContractTimeNanos;
trainTimeContract = saved.trainTimeContract;
if (internalContractCheckpointHandling) underContractTime = saved.underContractTime;
paramAccuracy = saved.paramAccuracy;
paramTime = saved.paramTime;
//train = saved.train;
//numThreads = saved.numThreads;
//multiThread = saved.multiThread;
//ex = saved.ex;
classifiersBuilt = saved.classifiersBuilt;
lowestAccIdx = saved.lowestAccIdx;
lowestAcc = saved.lowestAcc;
maxAcc = saved.maxAcc;
visSavePath = saved.visSavePath;
interpSavePath = saved.interpSavePath;
//interpData = saved.interpData;
//interpPreds = saved.interpPreds;
//interpCount = saved.interpCount;
//interpSeries = saved.interpSeries;
//interpPred = saved.interpPred;
trainResults = saved.trainResults;
if (!internalContractCheckpointHandling) trainResults.setBuildTime(System.nanoTime());
seedClassifier = saved.seedClassifier;
seed = saved.seed;
rand = saved.rand;
estimateOwnPerformance = saved.estimateOwnPerformance;
trainEstimateMethod = saved.trainEstimateMethod;
//load in each serisalised classifier
classifiers = new LinkedList<>();
for (int i = 0; i < maxEnsembleSize; i++) {
if (!checkpointIDs.contains(i)) {
System.out.println("Loading IndividualTDE-" + i + ".ser");
FileInputStream fis = new FileInputStream(checkpointPath + "IndividualTDE-" + i + ".ser");
try (ObjectInputStream in = new ObjectInputStream(fis)) {
Object indv = in.readObject();
if (!(indv instanceof IndividualTDE))
throw new Exception("The SER file " + i + " is not an instance of IndividualTDE");
IndividualTDE ser = ((IndividualTDE) indv);
classifiers.add(ser);
}
}
}
if (internalContractCheckpointHandling) checkpointTimeDiff = saved.checkpointTimeDiff
+ (System.nanoTime() - saved.checkpointTime);
underContractTime = withinTrainContract(trainResults.getBuildTime());
}
/**
* Enables multi threading with a set number of threads to use.
*
* @param numThreads number of threads available for multi threading
*/
@Override //MultiThreadable
public void enableMultiThreading(int numThreads) {
if (numThreads > 1) {
this.numThreads = numThreads;
multiThread = true;
} else {
this.numThreads = 1;
multiThread = false;
}
}
@Override
public boolean setInterpretabilitySavePath(String path) {
boolean validPath = Interpretable.super.createInterpretabilityDirectories(path);
if (validPath) {
interpSavePath = path;
}
return validPath;
}
@Override
public boolean lastClassifiedInterpretability() throws Exception {
if (interpSavePath == null) {
System.err.println("TDE interpretability output save path not set.");
return false;
}
if (train.isMultivariate()) {
System.err.println("TDE interpretability only available for univariate series.");
return false;
}
TreeMap<Integer, Double> topNeighbours = new TreeMap<>(Collections.reverseOrder());
for (int i = 0; i < interpData.size(); i++) {
if (train.get(interpData.get(i)).getLabelIndex() == interpPred) {
Double val = topNeighbours.get(interpData.get(i));
if (val == null) val = 0.0;
topNeighbours.put(interpData.get(i), val + classifiers.get(i).getWeight());
}
}
int topNeighbour = 0;
double topInstanceWeight = Double.MIN_VALUE;
for (Map.Entry<Integer, Double> entry : topNeighbours.entrySet()) {
if (entry.getValue() > topInstanceWeight) {
topNeighbour = entry.getKey();
topInstanceWeight = entry.getValue();
}
}
int topClassifier = 0;
double topClassifierWeight = Double.MIN_VALUE;
for (int i = 0; i < interpData.size(); i++) {
if (interpData.get(i) == topNeighbour && classifiers.get(i).getWeight() > topClassifierWeight) {
topClassifier = i;
topClassifierWeight = classifiers.get(i).getWeight();
}
}
IndividualTDE tde = classifiers.get(topClassifier);
double[] nearestSeries = train.get(topNeighbour).toValueArray()[0];
IndividualTDE.Bag histogram = tde.getLastNNBag();
IndividualTDE.Bag nearestHistogram = tde.getBags().get(tde.getSubsampleIndices().indexOf(topNeighbour));
TreeSet<SerialisableComparablePair<Byte, String>> keys = new TreeSet<>((obj1, obj2) -> {
int c1 = obj1.var1 - obj2.var1;
if (c1 != 0) {
return c1;
} else {
int c2 = obj1.var2.length() - obj2.var2.length();
if (c2 != 0) {
return c2;
} else {
return obj1.var2.compareTo(obj2.var2);
}
}
});
HashMap<SerialisableComparablePair<Byte, String>, Integer> histWords = new HashMap<>();
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> entry : histogram.entrySet()) {
String word = entry.getKey().var2 == -1 ? ((BitWordLong) entry.getKey().var1).toStringBigram()
: ((BitWordInt) entry.getKey().var1).toStringUnigram();
keys.add(new SerialisableComparablePair<>(entry.getKey().var2, word));
histWords.put(new SerialisableComparablePair<>(entry.getKey().var2, word), entry.getValue());
}
HashMap<SerialisableComparablePair<Byte, String>, Integer> nearestWords = new HashMap<>();
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> entry : nearestHistogram.entrySet()) {
String word = entry.getKey().var2 == -1 ? ((BitWordLong) entry.getKey().var1).toStringBigram()
: ((BitWordInt) entry.getKey().var1).toStringUnigram();
keys.add(new SerialisableComparablePair<>(entry.getKey().var2, word));
nearestWords.put(new SerialisableComparablePair<>(entry.getKey().var2, word), entry.getValue());
}
int numLevels = 1;
for (int i = 0; i < tde.getLevels(); i++) {
numLevels += Math.pow(2, i);
}
ArrayList<Integer>[][] counts = new ArrayList[numLevels][2];
ArrayList<String>[] words = new ArrayList[numLevels];
for (int i = 0; i < numLevels; i++) {
words[i] = new ArrayList<>();
for (int n = 0; n < 2; n++) {
counts[i][n] = new ArrayList<>();
}
}
for (SerialisableComparablePair<Byte, String> key : keys) {
int idx = key.var1 == -1 ? numLevels - 1 : key.var1;
words[idx].add(key.var2);
Integer val = histWords.get(key);
if (val == null) val = 0;
counts[idx][0].add(val);
Integer val2 = nearestWords.get(key);
if (val2 == null) val2 = 0;
counts[idx][1].add(val2);
}
OutFile of = new OutFile(interpSavePath + "/pred" + seed + "-" + interpCount
+ ".txt");
of.writeLine(Arrays.toString(interpSeries));
for (int i = 0; i < numLevels; i++) {
of.writeLine(words[i].toString());
of.writeLine(counts[i][0].toString());
}
of.writeLine(Arrays.toString(nearestSeries));
for (int i = 0; i < numLevels; i++) {
of.writeLine(words[i].toString());
of.writeLine(counts[i][1].toString());
}
Process p = Runtime.getRuntime().exec("py src/main/python/visualisation/interpretabilityTDE.py \"" +
interpSavePath.replace("\\", "/") + "\" " + seed + " " + interpCount
+ " " + tde.getLevels() + " " + interpPred + " " + train.get(topNeighbour).getLabelIndex());
interpCount++;
if (debug) {
System.out.println("TDE interp python output:");
BufferedReader out = new BufferedReader(new InputStreamReader(p.getInputStream()));
BufferedReader err = new BufferedReader(new InputStreamReader(p.getErrorStream()));
System.out.println("output : ");
String outLine = out.readLine();
while (outLine != null) {
System.out.println(outLine);
outLine = out.readLine();
}
System.out.println("error : ");
String errLine = err.readLine();
while (errLine != null) {
System.out.println(errLine);
errLine = err.readLine();
}
}
return true;
}
@Override
public int getPredID() {
return interpCount;
}
@Override
public boolean setVisualisationSavePath(String path) {
boolean validPath = Visualisable.super.createVisualisationDirectories(path);
if (validPath) {
visSavePath = path;
}
return validPath;
}
@Override
public boolean createVisualisation() throws Exception {
if (visSavePath == null) {
System.err.println("TDE visualisation save path not set.");
return false;
}
if (train.isMultivariate()) {
System.err.println("TDE visualisation only available for univariate series.");
return false;
}
HashMap<Integer, Double> wordLengthCounts = new HashMap<>(wordLengths.length);
HashMap<Boolean, Double> normCounts = new HashMap<>(normOptions.length);
HashMap<Integer, Double> levelsCounts = new HashMap<>(levels.length);
HashMap<Boolean, Double> IGBCounts = new HashMap<>(useIGB.length);
ArrayList<Integer> windowLengths = new ArrayList<>(classifiers.size());
double weightSum = 0;
for (int i = 0; i < classifiers.size(); i++) {
IndividualTDE cls = classifiers.get(i);
Double val = wordLengthCounts.get(cls.getWordLength());
if (val == null) val = 0.0;
wordLengthCounts.put(cls.getWordLength(), val + cls.getWeight());
Double val2 = normCounts.get(cls.getNorm());
if (val2 == null) val2 = 0.0;
normCounts.put(cls.getNorm(), val2 + cls.getWeight());
Double val3 = levelsCounts.get(cls.getLevels());
if (val3 == null) val3 = 0.0;
levelsCounts.put(cls.getLevels(), val3 + cls.getWeight());
Double val4 = IGBCounts.get(cls.getIGB());
if (val4 == null) val4 = 0.0;
IGBCounts.put(cls.getIGB(), val4 + cls.getWeight());
windowLengths.add(cls.getWindowSize());
weightSum += cls.getWeight();
}
int maxWordLength = -1;
double maxWeight1 = -1;
for (Map.Entry<Integer, Double> ent : wordLengthCounts.entrySet()) {
if (ent.getValue() > maxWeight1 || (ent.getValue() == maxWeight1 && rand.nextBoolean())) {
maxWordLength = ent.getKey();
maxWeight1 = ent.getValue();
}
}
int maxLevels = -1;
double maxWeight2 = -1;
for (Map.Entry<Integer, Double> ent : levelsCounts.entrySet()) {
if (ent.getValue() > maxWeight2 || (ent.getValue() == maxWeight2 && rand.nextBoolean())) {
maxLevels = ent.getKey();
maxWeight2 = ent.getValue();
}
}
Collections.sort(windowLengths);
int medianWindowLength;
if (windowLengths.size() % 2 == 1)
medianWindowLength = windowLengths.get(windowLengths.size() / 2);
else
medianWindowLength = (windowLengths.get(windowLengths.size() / 2 - 1) +
windowLengths.get(windowLengths.size() / 2)) / 2;
ArrayList<IndividualTDE> sortedClassifiers = new ArrayList<>(classifiers);
Collections.sort(sortedClassifiers, Collections.reverseOrder());
IndividualTDE tde = null;
int rank = 1;
for (IndividualTDE indiv : sortedClassifiers) {
if (indiv.getWordLength() == maxWordLength && indiv.getLevels() == maxLevels) {
tde = indiv;
break;
}
rank++;
}
if (tde == null) {
System.out.println("No TDE classifier with word length: " + maxWordLength + ", levels: " + maxLevels
+ ", using top weighted classifier.");
tde = sortedClassifiers.get(0);
rank = 1;
}
HashMap<SerialisableComparablePair<Byte, String>, Integer>[] classCounts = new HashMap[getNumClasses()];
for (int i = 0; i < getNumClasses(); i++) {
classCounts[i] = new HashMap<>();
}
int[] classCount = new int[getNumClasses()];
for (IndividualTDE.Bag bag : tde.getBags()) {
int cls = bag.getClassVal();
if (classCount[cls] >= 1) continue;
classCount[cls]++;
for (Map.Entry<SerialisableComparablePair<BitWord, Byte>, Integer> entry : bag.entrySet()) {
SerialisableComparablePair<BitWord, Byte> key = entry.getKey();
String word = key.var2 == -1 ? ((BitWordLong) key.var1).toStringBigram()
: ((BitWordInt) key.var1).toStringUnigram();
SerialisableComparablePair<Byte, String> newKey = new SerialisableComparablePair<>(key.var2, word);
Integer val = classCounts[cls].get(newKey);
if (val == null) val = 0;
classCounts[cls].put(newKey, val + entry.getValue());
}
}
TreeSet<SerialisableComparablePair<Byte, String>> keys = new TreeSet<>((obj1, obj2) -> {
int c1 = obj1.var1 - obj2.var1;
if (c1 != 0) {
return c1;
} else {
int c2 = obj1.var2.length() - obj2.var2.length();
if (c2 != 0) {
return c2;
} else {
return obj1.var2.compareTo(obj2.var2);
}
}
});
for (HashMap<SerialisableComparablePair<Byte, String>, Integer> map : classCounts) {
keys.addAll(map.keySet());
}
int numLevels = 1;
for (int i = 0; i < tde.getLevels(); i++) {
numLevels += Math.pow(2, i);
}
ArrayList<Integer>[][] counts = new ArrayList[numLevels][getNumClasses()];
ArrayList<String>[] words = new ArrayList[numLevels];
for (int i = 0; i < numLevels; i++) {
words[i] = new ArrayList<>();
for (int n = 0; n < getNumClasses(); n++) {
counts[i][n] = new ArrayList<>();
}
}
for (SerialisableComparablePair<Byte, String> key : keys) {
int idx = key.var1 == -1 ? numLevels - 1 : key.var1;
words[idx].add(key.var2);
for (int i = 0; i < getNumClasses(); i++) {
Integer val = classCounts[i].get(key);
if (val == null) val = 0;
counts[idx][i].add(val);
}
}
TimeSeriesInstance example = train.get(tde.getSubsampleIndices().get(0));
BitWordInt word = new BitWordInt();
double[] dft = tde.firstWordVis(example, word);
OutFile of = new OutFile(visSavePath + "/vis" + seed + ".txt");
of.writeLine(Double.toString(tde.getWeight()));
of.writeLine(rank + " " + classifiers.size());
of.writeLine(tde.getWordLength() + " " + wordLengthCounts.get(tde.getWordLength()));
of.writeLine(tde.getNorm() + " " + normCounts.get(tde.getNorm()));
of.writeLine(tde.getLevels() + " " + levelsCounts.get(tde.getLevels()));
of.writeLine(tde.getIGB() + " " + IGBCounts.get(tde.getIGB()));
of.writeLine(tde.getWindowSize() + " " + medianWindowLength);
of.writeLine(Double.toString(weightSum));
of.writeLine(Arrays.toString(classCount));
of.writeLine(Arrays.toString(example.toValueArray()[0]));
of.writeLine(Arrays.toString(dft));
of.writeLine(word.toStringUnigram());
double[][] breakpoints = tde.getBreakpoints();
of.writeString(Arrays.toString(breakpoints[0]));
for (int i = 1; i < breakpoints.length; i++) {
of.writeString(";" + Arrays.toString(breakpoints[i]));
}
of.writeLine("");
for (int i = 0; i < numLevels; i++) {
of.writeLine(words[i].toString());
for (int n = 0; n < getNumClasses(); n++) {
of.writeLine(counts[i][n].toString());
}
}
of.closeFile();
Process p = Runtime.getRuntime().exec("py src/main/python/visualisation/visTDE.py \"" +
visSavePath.replace("\\", "/") + "\" " + seed + " " + getNumClasses());
if (debug) {
System.out.println("TDE vis python output:");
BufferedReader out = new BufferedReader(new InputStreamReader(p.getInputStream()));
BufferedReader err = new BufferedReader(new InputStreamReader(p.getErrorStream()));
System.out.println("output : ");
String outLine = out.readLine();
while (outLine != null) {
System.out.println(outLine);
outLine = out.readLine();
}
System.out.println("error : ");
String errLine = err.readLine();
while (errLine != null) {
System.out.println(errLine);
errLine = err.readLine();
}
}
return true;
}
/**
* Development tests for the TDE classifier.
*
* @param args arguments, unused
* @throws Exception if tests fail
*/
public static void main(String[] args) throws Exception {
int fold = 0;
//Minimum working example
String dataset = "ItalyPowerDemand";
Instances[] data = DatasetLoading.sampleItalyPowerDemand(fold);
Instances train = data[0];
Instances test = data[1];
String dataset2 = "ERing";
Instances[] data2 = DatasetLoading.sampleERing(fold);
Instances train2 = data2[0];
Instances test2 = data2[1];
TDE c;
double accuracy;
c = new TDE();
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train);
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("TDE accuracy on " + dataset + " fold " + fold + " = " + accuracy);
System.out.println("Train accuracy on " + dataset + " fold " + fold + " = " + c.trainResults.getAcc());
c = new TDE();
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train2);
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("TDE accuracy on " + dataset2 + " fold " + fold + " = " + accuracy);
System.out.println("Train accuracy on " + dataset2 + " fold " + fold + " = " + c.trainResults.getAcc());
c = new TDE();
c.setSeed(fold);
c.setTrainTimeLimit(TimeUnit.MINUTES, 1);
c.setCleanupCheckpointFiles(true);
c.setCheckpointPath("D:\\");
c.buildClassifier(train);
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("Contract 1 Min Checkpoint TDE accuracy on " + dataset + " fold " + fold + " = "
+ accuracy);
System.out.println("Build time on " + dataset + " fold " + fold + " = " +
TimeUnit.SECONDS.convert(c.trainResults.getBuildTime(), TimeUnit.NANOSECONDS) + " seconds");
c = new TDE();
c.setSeed(fold);
c.setTrainTimeLimit(TimeUnit.MINUTES, 1);
c.setCleanupCheckpointFiles(true);
c.setCheckpointPath("D:\\");
c.setCutoff(true);
c.buildClassifier(train2);
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("Contract 1 Min Checkpoint TDE accuracy on " + dataset2 + " fold " + fold + " = "
+ accuracy);
System.out.println("Build time on " + dataset2 + " fold " + fold + " = " +
TimeUnit.SECONDS.convert(c.trainResults.getBuildTime(), TimeUnit.NANOSECONDS) + " seconds");
//Output 15/03/21
/*
TDE accuracy on ItalyPowerDemand fold 0 = 0.9543245869776482
Train accuracy on ItalyPowerDemand fold 0 = 0.9701492537313433
TDE accuracy on ERing fold 0 = 0.9629629629629629
Train accuracy on ERing fold 0 = 0.9333333333333333
Contract 1 Min Checkpoint TDE accuracy on ItalyPowerDemand fold 0 = 0.9523809523809523
Build time on ItalyPowerDemand fold 0 = 7 seconds
Contract 1 Min Checkpoint TDE accuracy on ERing fold 0 = 0.9555555555555556
Build time on ERing fold 0 = 60 seconds
*/
}
}
| 66,014 | 37.425495 | 187 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/WEASEL.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import com.carrotsearch.hppc.*;
import com.carrotsearch.hppc.cursors.DoubleIntCursor;
import com.carrotsearch.hppc.cursors.IntCursor;
import com.carrotsearch.hppc.cursors.LongDoubleCursor;
import com.carrotsearch.hppc.cursors.LongIntCursor;
import de.bwaldvogel.liblinear.*;
import edu.emory.mathcs.jtransforms.fft.DoubleFFT_1D;
import evaluation.evaluators.CrossValidationEvaluator;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import tsml.classifiers.EnhancedAbstractClassifier;
import utilities.ClassifierTools;
import weka.classifiers.Classifier;
import weka.core.*;
import java.io.Serializable;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
/**
* WEASEL Classifier
*
* @author Patrick Schaefer
*
*/
public class WEASEL extends EnhancedAbstractClassifier implements TechnicalInformationHandler {
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "P. Schaefer, U. Leser");
result.setValue(TechnicalInformation.Field.TITLE, "Fast and Accurate Time Series Classification with WEASEL");
result.setValue(TechnicalInformation.Field.JOURNAL, "CIKM");
result.setValue(TechnicalInformation.Field.YEAR, "2017");
return result;
}
public WEASELModel classifier;
// WEASEL model parameters
protected final int maxS = 4;
protected int minF = 4;
protected int maxF = 6;
protected static boolean[] NORMALIZATION = new boolean[]{true, false};
// chi-squared test
public static double chi = 0.1;
public static int limit = 1000;
// default liblinear parameters
public static double bias = 1;
public static double p = 0.1;
public static int iterations = 5000;
public static double c = 1;
public static SolverType solverType = SolverType.L2R_LR_DUAL;
//private double trainAcc = -1;
public static int MIN_WINDOW_LENGTH = 2;
public static int MAX_WINDOW_LENGTH = 350;
// ten-fold cross validation
private int folds = 10;
@Override
public ClassifierResults getTrainResults() {
return trainResults;
}
public static class WEASELModel {
public WEASELModel(){}
public WEASELModel(
boolean normed,
int features,
WEASELTransform model,
de.bwaldvogel.liblinear.Model linearModel
) {
this.normed = normed;
this.features = features;
this.weasel = model;
this.linearModel = linearModel;
}
public boolean normed;
// the best number of Fourier values to be used
public int features;
// the trained WEASEL transformation
public WEASELTransform weasel;
// the trained liblinear classifier
public de.bwaldvogel.liblinear.Model linearModel;
}
/**
*
*/
public WEASEL() {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
}
public WEASEL(int s) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
setSeed(seed);
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
sb.append(super.getParameters());
sb.append(",maxF,").append(maxF).append(",minF,").append(minF);
return sb.toString();
}
protected int getMax(Instances samples, int maxWindowSize) {
int max = 0;
for (Instance inst : samples) {
max = Math.max(instanceLength(inst), max);
}
return Math.min(maxWindowSize,max);
}
public int[] getWindowLengths(final Instances samples, boolean norm) {
int min = norm && MIN_WINDOW_LENGTH<=2? Math.max(3,MIN_WINDOW_LENGTH) : MIN_WINDOW_LENGTH;
int max = getMax(samples, MAX_WINDOW_LENGTH);
int[] wLengths = new int[max - min + 1];
int a = 0;
for (int w = min; w <= max; w+=1, a++) {
wLengths[a] = w;
}
return Arrays.copyOfRange(wLengths, 0, a);
}
protected static double[] getLabels(final WEASELTransform.BagOfBigrams[] bagOfPatternsTestSamples) {
double[] labels = new double[bagOfPatternsTestSamples.length];
for (int i = 0; i < bagOfPatternsTestSamples.length; i++) {
labels[i] = bagOfPatternsTestSamples[i].label;
}
return labels;
}
protected static Problem initLibLinearProblem(
final WEASELTransform.BagOfBigrams[] bob,
final WEASELTransform.Dictionary dict,
final double bias) {
Linear.resetRandom();
Linear.disableDebugOutput();
Problem problem = new Problem();
problem.bias = bias;
problem.y = getLabels(bob);
final FeatureNode[][] features = initLibLinear(bob, dict);
problem.n = dict.size() + 1;
problem.l = features.length;
problem.x = features;
return problem;
}
protected static FeatureNode[][] initLibLinear(
final WEASELTransform.BagOfBigrams[] bob,
final WEASELTransform.Dictionary dict) {
FeatureNode[][] featuresTrain = new FeatureNode[bob.length][];
for (int j = 0; j < bob.length; j++) {
WEASELTransform.BagOfBigrams bop = bob[j];
ArrayList<FeatureNode> features = new ArrayList<>(bop.bob.size());
for (LongIntCursor word : bop.bob) {
if (word.value > 0) {
features.add(new FeatureNode(dict.getWordChi(word.key), (word.value)));
}
}
FeatureNode[] featuresArray = features.toArray(new FeatureNode[]{});
Arrays.sort(featuresArray, new Comparator<FeatureNode>() {
public int compare(FeatureNode o1, FeatureNode o2) {
return Integer.compare(o1.index, o2.index);
}
});
featuresTrain[j] = featuresArray;
}
return featuresTrain;
}
private static void swap(int[] array, int idxA, int idxB) {
int temp = array[idxA];
array[idxA] = array[idxB];
array[idxB] = temp;
}
@SuppressWarnings("static-access")
protected static int trainLibLinear(
final Problem prob, final SolverType solverType, double c,
int iter, double p, int nr_fold) {
final Parameter param = new Parameter(solverType, c, iter, p);
ThreadLocal<Random> myRandom = new ThreadLocal<>();
myRandom.set(new Random(1));
Random random = myRandom.get();
int k;
final int l = prob.l;
final int[] perm = new int[l];
if (nr_fold > l) {
nr_fold = l;
}
final int[] fold_start = new int[nr_fold + 1];
for (k = 0; k < l; k++) {
perm[k] = k;
}
for (k = 0; k < l; k++) {
int j = k + random.nextInt(l - k);
swap(perm, k, j);
}
for (k = 0; k <= nr_fold; k++) {
fold_start[k] = k * l / nr_fold;
}
final AtomicInteger correct = new AtomicInteger(0);
final int fold = nr_fold;
Linear myLinear = new Linear();
myLinear.disableDebugOutput();
myLinear.resetRandom(); // reset random component of liblinear for reproducibility
for (int i = 0; i < fold; i++) {
int begin = fold_start[i];
int end = fold_start[i + 1];
int j, kk;
Problem subprob = new Problem();
subprob.bias = prob.bias;
subprob.n = prob.n;
subprob.l = l - (end - begin);
subprob.x = new Feature[subprob.l][];
subprob.y = new double[subprob.l];
kk = 0;
for (j = 0; j < begin; j++) {
subprob.x[kk] = prob.x[perm[j]];
subprob.y[kk] = prob.y[perm[j]];
++kk;
}
for (j = end; j < l; j++) {
subprob.x[kk] = prob.x[perm[j]];
subprob.y[kk] = prob.y[perm[j]];
++kk;
}
de.bwaldvogel.liblinear.Model submodel = myLinear.train(subprob, param);
for (j = begin; j < end; j++) {
correct.addAndGet(prob.y[perm[j]] == myLinear.predict(submodel, prob.x[perm[j]]) ? 1 : 0);
}
}
return correct.get();
}
@Override
public void buildClassifier(final Instances samples) throws Exception {
long t1=System.nanoTime();
if (samples.classIndex() != samples.numAttributes()-1)
throw new Exception("WEASEL_BuildClassifier: Class attribute not set as last attribute in dataset");
try {
int maxCorrect = -1;
int bestF = -1;
boolean bestNorm = false;
optimize:
for (final boolean mean : NORMALIZATION) {
int[] windowLengths = getWindowLengths(samples, mean);
WEASELTransform model = new WEASELTransform(maxF, maxS, windowLengths, mean);
int[][][] words = model.createWords(samples);
for (int f = minF; f <= maxF; f += 2) {
model.dict.reset();
final WEASELTransform.BagOfBigrams[] bop = new WEASELTransform.BagOfBigrams[samples.size()];
final int ff = f;
for (int w = 0; w < model.windowLengths.length; w++) {
WEASELTransform.BagOfBigrams[] bobForOneWindow = fitOneWindow(
samples,
model.windowLengths, mean,
words[w], ff, w);
mergeBobs(bop, bobForOneWindow);
}
// train liblinear
final Problem problem = initLibLinearProblem(bop, model.dict, bias);
int correct = trainLibLinear(problem, solverType, c, iterations, p, folds);
if (correct > maxCorrect) {
maxCorrect = correct;
bestF = f;
bestNorm = mean;
}
if (correct == samples.numInstances()) {
break optimize;
}
}
}
// obtain the final matrix
int[] windowLengths = getWindowLengths(samples, bestNorm);
WEASELTransform model = new WEASELTransform(maxF, maxS, windowLengths, bestNorm);
final WEASELTransform.BagOfBigrams[] bop = new WEASELTransform.BagOfBigrams[samples.size()];
for (int w = 0; w < model.windowLengths.length; w++) {
int[][] words = model.createWords(samples, w);
WEASELTransform.BagOfBigrams[] bobForOneWindow = fitOneWindow(
samples,
model.windowLengths, bestNorm,
words, bestF, w);
mergeBobs(bop, bobForOneWindow);
}
// train liblinear
Problem problem = initLibLinearProblem(bop, model.dict, bias);
de.bwaldvogel.liblinear.Model linearModel = Linear.train(problem, new Parameter(solverType, c, iterations, p));
this.classifier = new WEASELModel(
bestNorm,
bestF,
model,
linearModel
);
} catch (Exception e) {
e.printStackTrace();
}
if(getEstimateOwnPerformance()){
int numFolds=setNumberOfFolds(samples);
CrossValidationEvaluator cv = new CrossValidationEvaluator();
if (seedClassifier) {
cv.setSeed(seed);
}
cv.setNumFolds(numFolds);
WEASEL weasel = new WEASEL();
trainResults=cv.crossValidateWithStats(weasel,samples);
}
//NOTE TODO : prior to refactor, the estimate time was being included in the build time
//measurement. I have retained that here for continuity, shout at jamesl otherwise
long t2=System.nanoTime();
trainResults.setEstimatorName(getClassifierName());
trainResults.setParas(classifierName);
trainResults.setBuildTime(t2-t1);
trainResults.setParas(getParameters());
}
private WEASELTransform.BagOfBigrams[] fitOneWindow(
Instances samples,
int[] windowLengths, boolean mean,
int[][] word, int f, int w) {
WEASELTransform modelForWindow = new WEASELTransform(f, maxS, windowLengths, mean);
WEASELTransform.BagOfBigrams[] bopForWindow = modelForWindow.createBagOfPatterns(word, samples, w, f);
modelForWindow.trainChiSquared(bopForWindow, chi);
return bopForWindow;
}
private synchronized void mergeBobs(
WEASELTransform.BagOfBigrams[] bop,
WEASELTransform.BagOfBigrams[] bopForWindow) {
for (int i = 0; i < bop.length; i++) {
if (bop[i]==null) {
bop[i] = bopForWindow[i];
}
else {
bop[i].bob.putAll(bopForWindow[i].bob);
}
}
}
@Override
public double classifyInstance(Instance instance) throws Exception {
// iterate each sample to classify
final WEASELTransform.BagOfBigrams[] bagTest = new WEASELTransform.BagOfBigrams[1];
for (int w = 0; w < classifier.weasel.windowLengths.length; w++) {
int[] wordsTest = classifier.weasel.createWords(instance, w);
WEASELTransform.BagOfBigrams[] bopForWindow =
new WEASELTransform.BagOfBigrams[]{classifier.weasel.createBagOfPatterns(wordsTest, instance, w, classifier.features)};
classifier.weasel.dict.filterChiSquared(bopForWindow);
mergeBobs(bagTest, bopForWindow);
}
FeatureNode[][] features = initLibLinear(bagTest, classifier.weasel.dict);
return Linear.predict(classifier.linearModel, features[0]);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
double[] classHist = new double[instance.numClasses()];
// iterate each sample to classify
final WEASELTransform.BagOfBigrams[] bagTest = new WEASELTransform.BagOfBigrams[1];
for (int w = 0; w < classifier.weasel.windowLengths.length; w++) {
int[] wordsTest = classifier.weasel.createWords(instance, w);
WEASELTransform.BagOfBigrams[] bopForWindow =
new WEASELTransform.BagOfBigrams[]{classifier.weasel.createBagOfPatterns(wordsTest, instance, w, classifier.features)};
classifier.weasel.dict.filterChiSquared(bopForWindow);
mergeBobs(bagTest, bopForWindow);
}
FeatureNode[][] features = initLibLinear(bagTest, classifier.weasel.dict);
double[] probabilities = new double[classifier.linearModel.getNrClass()];
Linear.predictProbability(classifier.linearModel, features[0], probabilities);
// TODO do we have to remap classes to indices???
for (int i = 0; i < classifier.linearModel.getLabels().length; i++) {
classHist[classifier.linearModel.getLabels()[i]] = probabilities[i];
}
return classHist;
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
/**
* @return data of passed instance in a double array with the class value removed if present
*/
protected static double[] toArrayNoClass(Instance inst) {
int length = inst.numAttributes();
if (inst.classIndex() >= 0)
--length;
double[] data = new double[length];
for (int i=0, j=0; i < inst.numAttributes(); ++i)
if (inst.classIndex() != i)
data[j++] = inst.value(i);
return data;
}
protected static int instanceLength(Instance inst) {
int length = inst.numAttributes();
if (inst.classIndex() >= 0)
--length;
return length;
}
protected static int binlog(int bits) {
int log = 0;
if ((bits & 0xffff0000) != 0) {
bits >>>= 16;
log = 16;
}
if (bits >= 256) {
bits >>>= 8;
log += 8;
}
if (bits >= 16) {
bits >>>= 4;
log += 4;
}
if (bits >= 4) {
bits >>>= 2;
log += 2;
}
return log + (bits >>> 1);
}
/**
* WEASEL classifier to be used with known parameters, for boss with parameter search, use BOSSEnsemble.
*
* Current implementation of BitWord as of 07/11/2016 only supports alphabetsize of 4, which is the expected value
* as defined in the paper
*
* Params:
*
* @author Patrick Schaefer
*/
public static class WEASELTransform {
public int alphabetSize;
public int maxF;
public int[] windowLengths;
public boolean normMean;
public SFASupervised[] signature;
public Dictionary dict;
/**
* The WEASEL-model: a histogram of SFA word and bi-gram frequencies
*/
public static class BagOfBigrams {
public LongIntHashMap bob;
public Double label;
public BagOfBigrams(int size, Double label) {
this.bob = new LongIntHashMap(size);
this.label = label;
}
}
/**
* A dictionary that maps each SFA word to an integer.
* <p>
* Condenses the SFA word space.
*/
public static class Dictionary {
public LongIntHashMap dictChi;
public Dictionary() {
this.dictChi = new LongIntHashMap();
}
public void reset() {
this.dictChi = new LongIntHashMap();
}
public int getWordChi(long word) {
int index = 0;
if ((index = this.dictChi.indexOf(word)) > -1) {
return this.dictChi.indexGet(index);
} else {
int newWord = this.dictChi.size() + 1;
this.dictChi.put(word, newWord);
return newWord;
}
}
public int size() {
return this.dictChi.size();
}
public void filterChiSquared(final BagOfBigrams[] bagOfPatterns) {
for (int j = 0; j < bagOfPatterns.length; j++) {
LongIntHashMap oldMap = bagOfPatterns[j].bob;
bagOfPatterns[j].bob = new LongIntHashMap();
for (LongIntCursor word : oldMap) {
if (this.dictChi.containsKey(word.key) && word.value > 0) {
bagOfPatterns[j].bob.put(word.key, word.value);
}
}
}
}
}
public WEASELTransform( int maxF, int maxS,
int[] windowLengths, boolean normMean) {
this.maxF = maxF;
this.alphabetSize = maxS;
this.windowLengths = windowLengths;
this.normMean = normMean;
this.dict = new Dictionary();
this.signature = new SFASupervised[windowLengths.length];
}
/**
* Create SFA words and bigrams for all samples
*
* @param samples
* @return
*/
public int[][][] createWords(final Instances samples) {
// create bag of words for each window queryLength
final int[][][] words = new int[this.windowLengths.length][samples.numInstances()][];
for (int w = 0; w < this.windowLengths.length; w++) {
words[w] = createWords(samples, w);
};
return words;
}
/**
* Create SFA words and bigrams for a single sample
*
* @param sample
* @return
*/
public int[][] createWords(final Instance sample) {
// create bag of words for each window queryLength
final int[][] words = new int[this.windowLengths.length][];
for (int w = 0; w < windowLengths.length; w++) {
words[w] = createWords(sample, w);
};
return words;
}
/**
* Create SFA words and bigrams for all samples
*
* @param samples
* @return
*/
protected int[][] createWords(final Instances samples, final int index) {
// SFA quantization
if (this.signature[index] == null) {
this.signature[index] = new SFASupervised();
this.signature[index].fitWindowing(
samples, this.windowLengths[index], this.maxF, this.alphabetSize, this.normMean);
}
// create words
final int[][] words = new int[samples.numInstances()][];
for (int i = 0; i < samples.numInstances(); i++) {
words[i] = createWords(samples.get(i), index);
}
return words;
}
/**
* Create SFA words and bigrams for a single sample
*
* @param sample
* @return
*/
private int[] createWords(final Instance sample, final int index) {
// create words
if (instanceLength(sample) >= this.windowLengths[index]) {
return this.signature[index].transformWindowingInt(sample, this.maxF);
} else {
return new int[]{};
}
}
/**
* Implementation based on:
* https://github.com/scikit-learn/scikit-learn/blob/c957249/sklearn/feature_selection/univariate_selection.py#L170
*/
public void trainChiSquared(final BagOfBigrams[] bob, double p_limit) {
// Chi2 Test
LongIntHashMap featureCount = new LongIntHashMap(bob[0].bob.size());
DoubleIntHashMap classProb = new DoubleIntHashMap(10);
DoubleObjectHashMap<LongIntHashMap> observed = new DoubleObjectHashMap<>();
// count number of samples with this word
for (BagOfBigrams bagOfPattern : bob) {
double label = bagOfPattern.label;
int index = -1;
LongIntHashMap obs = null;
if ((index = observed.indexOf(label)) > -1) {
obs = observed.indexGet(index);
} else {
obs = new LongIntHashMap();
observed.put(label, obs);
}
for (LongIntCursor word : bagOfPattern.bob) {
if (word.value > 0) {
featureCount.putOrAdd(word.key, 1,1); //word.value, word.value);
// count observations per class for this feature
obs.putOrAdd(word.key, 1,1); //word.value, word.value);
}
}
}
// samples per class
for (BagOfBigrams bagOfPattern : bob) {
double label = bagOfPattern.label;
classProb.putOrAdd(label, 1, 1);
}
// p_value-squared: observed minus expected occurrence
LongDoubleHashMap chiSquareSum = new LongDoubleHashMap(featureCount.size());
for (DoubleIntCursor prob : classProb) {
double p = ((double)prob.value) / bob.length;
LongIntHashMap obs = observed.get(prob.key);
for (LongIntCursor feature : featureCount) {
double expected = p * feature.value;
double chi = obs.get(feature.key) - expected;
double newChi = chi * chi / expected;
if (newChi > 0) {
// build the sum among p_value-values of all classes
chiSquareSum.putOrAdd(feature.key, newChi, newChi);
}
}
}
LongHashSet chiSquare = new LongHashSet(featureCount.size());
ArrayList<PValueKey> values = new ArrayList<PValueKey>(featureCount.size());
for (LongDoubleCursor feature : chiSquareSum) {
double newChi = feature.value;
double pvalue = Statistics.chiSquaredProbability(newChi, classProb.keys().size()-1);
if (pvalue <= p_limit) {
chiSquare.add(feature.key);
values.add(new PValueKey(pvalue, feature.key));
}
}
// limit number of features per window size to avoid excessive features
if (values.size() > limit) {
// sort by p_value-squared value
Collections.sort(values, new Comparator<PValueKey>() {
@Override
public int compare(PValueKey o1, PValueKey o2) {
int comp = Double.compare(o1.pvalue, o2.pvalue);
if (comp != 0) { // tie breaker
return comp;
}
return Long.compare(o1.key, o2.key);
}
});
chiSquare.clear();
// use 100 unigrams and 100 bigrams
int countUnigram = 0;
int countBigram = 0;
for (int i = 0; i < values.size(); i++) {
// bigram?
long val = values.get(i).key;
if (val > (1l << 32) && countBigram < limit) {
chiSquare.add(val);
countBigram++;
}
// unigram?
else if (val < (1l << 32) && countUnigram < limit){
chiSquare.add(val);
countUnigram++;
}
if (countUnigram >= limit && countBigram >= limit) {
break;
}
}
}
// remove values
for (int j = 0; j < bob.length; j++) {
LongIntHashMap oldMap = bob[j].bob;
bob[j].bob = new LongIntHashMap();
for (LongIntCursor cursor : oldMap) {
if (chiSquare.contains(cursor.key)) {
bob[j].bob.put(cursor.key, cursor.value);
}
}
oldMap.clear();
}
}
static class PValueKey {
public double pvalue;
public long key;
public PValueKey(double pvalue, long key) {
this.pvalue = pvalue;
this.key = key;
}
@Override
public String toString() {
return "" + this.pvalue + ":" + this.key;
}
}
/**
* Create words and bi-grams for all window lengths
*/
public BagOfBigrams createBagOfPatterns(
final int[] words,
final Instance sample,
final int w, // index of used windowSize
final int wordLength) {
BagOfBigrams bagOfPatterns = new BagOfBigrams(words.length * 2, sample.classValue());
final byte usedBits = (byte) binlog(this.alphabetSize);
final long mask = (1L << (usedBits * wordLength)) - 1L;
int highestBit = binlog(Integer.highestOneBit(MAX_WINDOW_LENGTH))+1;
// create subsequences
for (int offset = 0; offset < words.length; offset++) {
long word = (words[offset] & mask) << highestBit | (long) w;
bagOfPatterns.bob.putOrAdd(word, 1, 1);
// add 2 grams
if (offset - this.windowLengths[w] >= 0) {
long prevWord = (words[offset - this.windowLengths[w]] & mask);
if (prevWord != 0) {
long newWord = (prevWord << 32 | word);
bagOfPatterns.bob.putOrAdd(newWord, 1, 1);
}
}
}
return bagOfPatterns;
}
/**
* Create words and bi-grams for all window lengths
*/
public BagOfBigrams[] createBagOfPatterns(
final int[][] wordsForWindowLength,
final Instances samples,
final int w, // index of used windowSize
final int wordLength) {
BagOfBigrams[] bagOfPatterns = new BagOfBigrams[samples.size()];
final byte usedBits = (byte) binlog(this.alphabetSize);
final long mask = (1L << (usedBits * wordLength)) - 1L;
int highestBit = binlog(Integer.highestOneBit(MAX_WINDOW_LENGTH))+1;
// iterate all samples
// and create a bag of pattern
for (int j = 0; j < samples.size(); j++) {
bagOfPatterns[j] = new BagOfBigrams(wordsForWindowLength[j].length * 2, samples.get(j).classValue());
// create subsequences
for (int offset = 0; offset < wordsForWindowLength[j].length; offset++) {
long word = (wordsForWindowLength[j][offset] & mask) << highestBit | (long) w;
bagOfPatterns[j].bob.putOrAdd(word, 1, 1);
// add 2 grams
if (offset - this.windowLengths[w] >= 0) {
long prevWord = (wordsForWindowLength[j][offset - this.windowLengths[w]] & mask);
if (prevWord != 0) {
long newWord = (prevWord << 32 | word);
bagOfPatterns[j].bob.putOrAdd(newWord, 1, 1);
}
}
}
}
return bagOfPatterns;
}
}
/**
* SFA using the ANOVA F-statistic to determine the best Fourier coefficients
* (those that best separate between class labels) as opposed to using the first
* ones.
*/
public static class SFASupervised {
// distribution of Fourier values
public transient ArrayList<ValueLabel>[] orderLine;
public int[] bestValues;
public int alphabetSize = 256;
public byte neededBits = (byte) binlog(this.alphabetSize);
public int wordLength = 0;
public boolean initialized = false;
public int maxWordLength;
// The Momentary Fourier Transform
public MFT transformation;
// use binning / bucketing
public double[][] bins;
public SFASupervised() {
}
@SuppressWarnings("unchecked")
private void init(int l, int alphabetSize) {
this.wordLength = l;
this.maxWordLength = l;
this.alphabetSize = alphabetSize;
this.initialized = true;
// l-dimensional bins
this.alphabetSize = alphabetSize;
this.neededBits = (byte) binlog(alphabetSize);
this.bins = new double[l][alphabetSize - 1];
for (double[] row : this.bins) {
Arrays.fill(row, Double.MAX_VALUE);
}
this.orderLine = new ArrayList[l];
for (int i = 0; i < this.orderLine.length; i++) {
this.orderLine[i] = new ArrayList<>();
}
}
class ValueLabel {
public double value;
public double label;
public ValueLabel(double key, Double label) {
this.value = key;
this.label = label != null? label : 0;
}
@Override
public String toString() {
return "" + this.value + ":" + this.label;
}
}
/**
* Extracts sliding windows from the time series and trains SFA based on the sliding windows.
* At the end of this call, the quantization bins are set.
*
* @param timeSeries A set of samples
* @param windowLength The queryLength of each sliding window
* @param wordLength the SFA word-queryLength
* @param symbols the SFA alphabet size
* @param normMean if set, the mean is subtracted from each sliding window
*/
public void fitWindowing(Instances timeSeries, int windowLength, int wordLength, int symbols, boolean normMean) {
this.transformation = new MFT(windowLength, normMean);
ArrayList<double[]> sa = new ArrayList<>(timeSeries.numInstances());
ArrayList<Double> labels = new ArrayList<>(timeSeries.numInstances());
for (Instance t : timeSeries) {
for (double[] data : getDisjointSequences(t, windowLength, normMean)) {
sa.add(data);
labels.add(t.classValue());
}
}
double[][] allSamples = new double[sa.size()][];
double[] allLabels = new double[sa.size()];
for (int i = 0; i < sa.size(); i++) {
allSamples[i] = sa.get(i);
allLabels[i] = labels.get(i);
}
fitTransform(allSamples, allLabels, wordLength, symbols, normMean);
}
/**
* Extracts disjoint subsequences
*/
public double[][] getDisjointSequences(Instance t, int windowSize, boolean normMean) {
// extract subsequences
int amount = instanceLength(t) / windowSize;
double[][] subsequences = new double[amount][windowSize];
double[] data = toArrayNoClass(t);
for (int i = 0; i < amount; i++) {
double[] subsequenceData = new double[windowSize];
System.arraycopy(data, i * windowSize, subsequenceData, 0, windowSize);
subsequences[i] = z_norm(subsequenceData, normMean);
}
return subsequences;
}
public double[] z_norm(double[] data, boolean normMean) {
double mean = 0.0;
double stddev = 0;
// get mean +stddev values
double var = 0;
for (double value : data) {
mean += value;
var += value * value;
}
mean /= (double) data.length;
double norm = 1.0 / ((double) data.length);
double buf = norm * var - mean * mean;
if (buf > 0) {
stddev = Math.sqrt(buf);
}
double inverseStddev = (stddev != 0) ? 1.0 / stddev : 1.0;
if (normMean) {
for (int i = 0; i < data.length; i++) {
data[i] = (data[i] - mean) * inverseStddev;
}
} else if (inverseStddev != 1.0) {
for (int i = 0; i < data.length; i++) {
data[i] *= inverseStddev;
}
}
return data;
}
protected double entropy(ObjectIntHashMap<Double> frequency, double total) {
double entropy = 0;
double log2 = 1.0 / Math.log(2.0);
for (IntCursor element : frequency.values()) {
double p = element.value / total;
if (p > 0) {
entropy -= p * Math.log(p) * log2;
}
}
return entropy;
}
protected double calculateInformationGain(
ObjectIntHashMap<Double> cIn, ObjectIntHashMap<Double> cOut,
double class_entropy,
double total_c_in,
double total) {
double total_c_out = (total - total_c_in);
return class_entropy
- total_c_in / total * entropy(cIn, total_c_in)
- total_c_out / total * entropy(cOut, total_c_out);
}
protected void findBestSplit(
List<ValueLabel> element,
int start,
int end,
int remainingSymbols,
List<Integer> splitPoints
) {
double bestGain = -1;
int bestPos = -1;
// class entropy
int total = end - start;
ObjectIntHashMap<Double> cIn = new ObjectIntHashMap<>();
ObjectIntHashMap<Double> cOut = new ObjectIntHashMap<>();
for (int pos = start; pos < end; pos++) {
cOut.putOrAdd(element.get(pos).label, 1, 1);
}
double class_entropy = entropy(cOut, total);
int i = start;
Double lastLabel = element.get(i).label;
i += moveElement(element, cIn, cOut, start);
for (int split = start + 1; split < end - 1; split++) {
Double label = element.get(i).label;
i += moveElement(element, cIn, cOut, split);
// only inspect changes of the label
if (!label.equals(lastLabel)) {
double gain = calculateInformationGain(cIn, cOut, class_entropy, i, total);
gain = Math.round(gain * 1000.0) / 1000.0; // round for 4 decimal places
if (gain >= bestGain) {
bestPos = split;
bestGain = gain;
}
}
lastLabel = label;
}
if (bestPos > -1) {
splitPoints.add(bestPos);
// recursive split
remainingSymbols = remainingSymbols / 2;
if (remainingSymbols > 1) {
if (bestPos - start > 2 && end - bestPos > 2) { // enough data points?
findBestSplit(element, start, bestPos, remainingSymbols, splitPoints);
findBestSplit(element, bestPos, end, remainingSymbols, splitPoints);
} else if (end - bestPos > 4) { // enough data points?
findBestSplit(element, bestPos, (end - bestPos) / 2, remainingSymbols, splitPoints);
findBestSplit(element, (end - bestPos) / 2, end, remainingSymbols, splitPoints);
} else if (bestPos - start > 4) { // enough data points?
findBestSplit(element, start, (bestPos - start) / 2, remainingSymbols, splitPoints);
findBestSplit(element, (bestPos - start) / 2, end, remainingSymbols, splitPoints);
}
}
}
}
protected int moveElement(
List<ValueLabel> element,
ObjectIntHashMap<Double> cIn, ObjectIntHashMap<Double> cOut,
int pos) {
cIn.putOrAdd(element.get(pos).label, 1, 1);
cOut.putOrAdd(element.get(pos).label, -1, -1);
return 1;
}
protected int getMaxLength(double[][] samples) {
int length = 0;
for (int i = 0; i < samples.length; i++) {
length = Math.max(samples[i].length, length);
}
return length;
}
/**
* Same as fitTransformDouble but returns the SFA words instead of the Fourier
* transformed time series.
*/
public short[][] fitTransform(double[][] samples, double[] labels, int wordLength, int symbols, boolean normMean) {
int length = getMaxLength(samples);
double[][] transformedSignal = fitTransformDouble(samples, labels, length, symbols, normMean);
Indices<Double>[] best = calcBestCoefficients(samples, labels, transformedSignal);
// use best coefficients (the ones with largest f-value)
this.bestValues = new int[Math.min(best.length, wordLength)];
this.maxWordLength = 0;
for (int i = 0; i < this.bestValues.length; i++) {
this.bestValues[i] = best[i].index;
this.maxWordLength = Math.max(best[i].index + 1, this.maxWordLength);
}
// make sure it is an even number
this.maxWordLength += this.maxWordLength % 2;
return transform(samples, labels, transformedSignal);
}
public double[][] fitTransformDouble(double[][] samples, double[] labels, int wordLength, int symbols, boolean normMean) {
if (!this.initialized) {
init(wordLength, symbols);
if (this.transformation == null) {
this.transformation = new MFT(samples[0].length, normMean);
}
}
double[][] transformedSamples = fillOrderline(samples, labels, wordLength);
divideHistogramInformationGain();
this.orderLine = null;
return transformedSamples;
}
/**
* Use information-gain to divide the orderline
*/
protected void divideHistogramInformationGain() {
// for each Fourier coefficient: split using maximal information gain
for (int i = 0; i < this.orderLine.length; i++) {
List<ValueLabel> element = this.orderLine[i];
if (!element.isEmpty()) {
ArrayList<Integer> splitPoints = new ArrayList<>();
findBestSplit(element, 0, element.size(), this.alphabetSize, splitPoints);
Collections.sort(splitPoints);
// apply the split
for (int j = 0; j < splitPoints.size(); j++) {
double value = element.get(splitPoints.get(j) + 1).value;
// double value = (element.get(splitPoints.get(j)).value + element.get(splitPoints.get(j)+1).value)/2.0;
this.bins[i][j] = value;
}
}
}
}
/**
* calculate ANOVA F-stat
* compare : https://github.com/scikit-learn/scikit-learn/blob/c957249/sklearn/feature_selection/univariate_selection.py#L121
*
* @param transformedSignal
* @return
*/
public static Indices<Double>[] calcBestCoefficients(
double[][] samples,
double[] labels,
double[][] transformedSignal) {
HashMap<Double, ArrayList<double[]>> classes = new HashMap<>();
for (int i = 0; i < samples.length; i++) {
ArrayList<double[]> allTs = classes.get(labels[i]);
if (allTs == null) {
allTs = new ArrayList<>();
classes.put(labels[i], allTs);
}
allTs.add(transformedSignal[i]);
}
double nSamples = transformedSignal.length;
double nClasses = classes.keySet().size();
// int length = 0;
// for (int i = 0; i < transformedSignal.length; i++) {
// length = Math.max(transformedSignal[i].length, length);
// }
int length = (transformedSignal != null && transformedSignal.length > 0) ? transformedSignal[0].length : 0;
double[] f = getFoneway(length, classes, nSamples, nClasses);
// sort by largest f-value
@SuppressWarnings("unchecked")
List<Indices<Double>> best = new ArrayList<>(f.length);
for (int i = 0; i < f.length; i++) {
if (!Double.isNaN(f[i])) {
best.add(new Indices<>(i, f[i]));
}
}
Collections.sort(best);
return best.toArray(new Indices[]{});
}
/**
* Fills data in the orderline
*
* @param samples A set of samples
*/
protected double[][] fillOrderline(double[][] samples, double[] labels, int l) {
double[][] transformedSamples = new double[samples.length][];
for (int i = 0; i < samples.length; i++) {
// z-normalization
z_norm(samples[i], true); // TODO needed here?
// approximation
transformedSamples[i] = this.transformation.transform(samples[i], l);
for (int j = 0; j < transformedSamples[i].length; j++) {
// round to 2 decimal places to reduce noise
double value = Math.round(transformedSamples[i][j] * 100.0) / 100.0;
this.orderLine[j].add(new ValueLabel(value, labels[i]));
}
}
// Sort ascending by value
for (List<ValueLabel> element : this.orderLine) {
Collections.sort(element, new Comparator<ValueLabel>() {
@Override
public int compare(ValueLabel o1, ValueLabel o2) {
int comp = Double.compare(o1.value, o2.value);
if (comp != 0) {
return comp;
}
return Double.compare(o1.label,o2.label);
}
});
}
return transformedSamples;
}
/**
* The one-way ANOVA tests the null hypothesis that 2 or more groups have
* the same population mean. The test is applied to samples from two or
* more groups, possibly with differing sizes.
*
* @param length
* @param classes
* @param nSamples
* @param nClasses
* @return
*/
public static double[] getFoneway(
int length,
Map<Double, ArrayList<double[]>> classes,
double nSamples,
double nClasses) {
double[] ss_alldata = new double[length];
HashMap<Double, double[]> sums_args = new HashMap<>();
for (Map.Entry<Double, ArrayList<double[]>> allTs : classes.entrySet()) {
double[] sums = new double[ss_alldata.length];
sums_args.put(allTs.getKey(), sums);
for (double[] ts : allTs.getValue()) {
for (int i = 0; i < ts.length; i++) {
ss_alldata[i] += ts[i] * ts[i];
sums[i] += ts[i];
}
}
}
double[] square_of_sums_alldata = new double[ss_alldata.length];
Map<Double, double[]> square_of_sums_args = new HashMap<>();
for (Map.Entry<Double, double[]> sums : sums_args.entrySet()) {
for (int i = 0; i < sums.getValue().length; i++) {
square_of_sums_alldata[i] += sums.getValue()[i];
}
double[] squares = new double[sums.getValue().length];
square_of_sums_args.put(sums.getKey(), squares);
for (int i = 0; i < sums.getValue().length; i++) {
squares[i] += sums.getValue()[i] * sums.getValue()[i];
}
}
for (int i = 0; i < square_of_sums_alldata.length; i++) {
square_of_sums_alldata[i] *= square_of_sums_alldata[i];
}
double[] sstot = new double[ss_alldata.length];
for (int i = 0; i < sstot.length; i++) {
sstot[i] = ss_alldata[i] - square_of_sums_alldata[i] / nSamples;
}
double[] ssbn = new double[ss_alldata.length]; // sum of squares between
double[] sswn = new double[ss_alldata.length]; // sum of squares within
for (Map.Entry<Double, double[]> sums : square_of_sums_args.entrySet()) {
double n_samples_per_class = classes.get(sums.getKey()).size();
for (int i = 0; i < sums.getValue().length; i++) {
ssbn[i] += sums.getValue()[i] / n_samples_per_class;
}
}
for (int i = 0; i < square_of_sums_alldata.length; i++) {
ssbn[i] -= square_of_sums_alldata[i] / nSamples;
}
double dfbn = nClasses - 1; // degrees of freedom between
double dfwn = nSamples - nClasses; // degrees of freedom within
double[] msb = new double[ss_alldata.length]; // variance (mean square) between classes
double[] msw = new double[ss_alldata.length]; // variance (mean square) within samples
double[] f = new double[ss_alldata.length]; // f-ratio
for (int i = 0; i < sswn.length; i++) {
sswn[i] = sstot[i] - ssbn[i];
msb[i] = ssbn[i] / dfbn;
msw[i] = sswn[i] / dfwn;
f[i] = msb[i] / msw[i];
}
return f;
}
static class Indices<E extends Comparable<E>> implements Comparable<Indices<E>> {
int index;
E value;
public Indices(int index, E value) {
this.index = index;
this.value = value;
}
public int compareTo(Indices<E> o) {
return o.value.compareTo(this.value); // descending sort!
}
@Override
public String toString() {
return "(" + this.index + ":" + this.value + ")";
}
}
/**
* Quantization of a DFT approximation to its SFA word
*
* @param approximation the DFT approximation of a time series
* @return
*/
public short[] quantization(double[] approximation) {
short[] signal = new short[Math.min(approximation.length, this.bestValues.length)];
for (int a = 0; a < signal.length; a++) {
int i = this.bestValues[a];
// lookup character:
short beta = 0;
for (beta = 0; beta < this.bins[i].length; beta++) {
if (approximation[i] < this.bins[i][beta]) {
break;
}
}
signal[a] = beta;
}
return signal;
}
/**
* Transforms a single time series to its SFA word
*
* @param timeSeries a sample
* @param approximation the DFT approximation, if available, else pass 'null'
* @return
*/
public short[] transform(double[] timeSeries, double[] approximation) {
if (!this.initialized) {
throw new RuntimeException("Please call fitTransform() first.");
}
if (approximation == null) {
// get approximation of the time series
approximation = this.transformation.transform(timeSeries, this.maxWordLength);
}
// use lookup table (bins) to get the word from the approximation
return quantization(approximation);
}
/**
* Transforms a set of time series to SFA words.
*
* @param samples a set of samples
* @param approximation the DFT approximations, if available, else pass 'null'
* @return
*/
public short[][] transform(double[][] samples, double[] labels, double[][] approximation) {
if (!this.initialized) {
throw new RuntimeException("Please call fitTransform() first.");
}
short[][] transform = new short[samples.length][];
for (int i = 0; i < transform.length; i++) {
transform[i] = transform(samples[i], approximation[i]);
}
return transform;
}
/**
* Returns a long containing the values in bytes.
*/
protected static long fromByteArrayOne(short[] bytes, int to, byte usedBits) {
int shortsPerLong = 60 / usedBits;
to = Math.min(bytes.length, to);
long bits = 0;
int start = 0;
long shiftOffset = 1;
for (int i = start, end = Math.min(to, shortsPerLong + start); i < end; i++) {
for (int j = 0, shift = 1; j < usedBits; j++, shift <<= 1) {
if ((bytes[i] & shift) != 0) {
bits |= shiftOffset;
}
shiftOffset <<= 1;
}
}
return bits;
}
protected static long createWord(short[] words, int features, byte usedBits) {
return fromByteArrayOne(words, features, usedBits);
}
/**
* Extracts sliding windows from a time series and transforms it to its SFA
* word.
* <p>
* Returns the SFA words as a single int (compacts the characters into one
* int).
*/
public int[] transformWindowingInt(Instance ts, int wordLength) {
short[][] words = transformWindowing(ts);
int[] intWords = new int[words.length];
for (int i = 0; i < words.length; i++) {
intWords[i] = (int) createWord(words[i], wordLength, this.neededBits);
}
return intWords;
}
/**
* Extracts sliding windows from a time series and transforms it to its SFA
* word.
* <p>
* Returns the SFA words as short[] (from Fourier transformed windows). Each
* short corresponds to one character.
*
* @param timeSeries a sample
* @return
*/
public short[][] transformWindowing(Instance timeSeries) {
double[][] mft = this.transformation.transformWindowing(timeSeries, this.maxWordLength);
short[][] words = new short[mft.length][];
for (int i = 0; i < mft.length; i++) {
words[i] = quantization(mft[i]);
}
return words;
}
}
/**
* The Momentary Fourier Transform is alternative algorithm of
* the Discrete Fourier Transform for overlapping windows. It has
* a constant computational complexity for in the window queryLength n as
* opposed to O(n log n) for the Fast Fourier Transform algorithm.
* <p>
* It was first published in:
* Albrecht, S., Cumming, I., Dudas, J.: The momentary fourier transformation
* derived from recursive matrix transformations. In: Digital Signal Processing
* Proceedings, 1997., IEEE (1997)
*
*/
public static class MFT implements Serializable {
private static final long serialVersionUID = 8508604292241736378L;
private int windowSize = 0;
private int startOffset = 0;
private transient DoubleFFT_1D fft = null;
public MFT(int windowSize, boolean normMean) {
this.windowSize = windowSize;
this.fft = new DoubleFFT_1D(this.windowSize);
// ignore DC value?
this.startOffset = normMean ? 2 : 0;
}
public double[] transform(double[] series, int wordLength/*, boolean normalize*/) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
//only calculating first wordlength/2 coefficients (output values),
//and skipping first coefficient if the data is to be normalised
// int n = windowSize;
// int outputLength = wordLength/2;
//
// double[] dft = new double[wordLength];
// double twoPi = 2.0 * Math.PI / n;
//
// int startOffset2 = normalize? 0 : startOffset/2;
// for (int k = startOffset2; k < outputLength; k++) { // For each output element
// float sumreal = 0;
// float sumimag = 0;
// for (int t = 0; t < Math.min(this.windowSize, series.length); t++) { // For each input element
// sumreal += series[t]*Math.cos(twoPi * t * k);
// sumimag += series[t]*Math.sin(twoPi * t * k);
// }
// dft[(k-startOffset2)*2] = sumreal;
// dft[(k-startOffset2)*2+1] = normalize? -sumimag : sumimag;
// }
// if (startOffset == 0) {
// dft[1] = 0;
// }
// return dft;
double[] data = new double[this.windowSize];
System.arraycopy(series, 0, data, 0, Math.min(this.windowSize, series.length));
this.fft.realForward(data);
data[1] = 0; // DC-coefficient imaginary part
// make it even length for uneven windowSize
double[] copy = new double[wordLength];
int length = Math.min(this.windowSize - this.startOffset, wordLength);
System.arraycopy(data, this.startOffset, copy, 0, length);
// norming
int sign = 1;
for (int i = 0; i < copy.length; i++) {
copy[i] *= sign;
sign *= -1;
}
return copy;
}
/**
* Transforms a time series, extracting windows and using *momentary* fourier
* transform for each window. Results in one Fourier transform for each
* window. Returns only the first l/2 Fourier coefficients for each window.
*
* @param timeSeries the time series to be transformed
* @param l the number of Fourier values to use (equal to l/2 Fourier
* coefficients). If l is uneven, l+1 Fourier values are returned. If
* windowSize is smaller than l, only the first windowSize Fourier
* values are set.
* @return returns only the first l/2 Fourier coefficients for each window.
*/
public double[][] transformWindowing(Instance timeSeries, int l) {
int wordLength = Math.min(windowSize, l + this.startOffset);
wordLength += wordLength%2; // make it even
double[] phis = new double[wordLength];
for (int u = 0; u < phis.length; u += 2) {
double uHalve = -u / 2;
phis[u] = realPartEPhi(uHalve, this.windowSize);
phis[u + 1] = complexPartEPhi(uHalve, this.windowSize);
}
// means and stddev for each sliding window
int end = Math.max(1, instanceLength(timeSeries) - this.windowSize + 1);
double[] means = new double[end];
double[] stds = new double[end];
calcIncrementalMeanStddev(this.windowSize, toArrayNoClass(timeSeries), means, stds);
double[][] transformed = new double[end][];
// holds the DFT of each sliding window
double[] mftData = new double[wordLength];
double[] data = toArrayNoClass(timeSeries);
for (int t = 0; t < end; t++) {
// use the MFT
if (t > 0) {
for (int k = 0; k < wordLength; k += 2) {
double real1 = (mftData[k] + data[t + this.windowSize - 1] - data[t - 1]);
double imag1 = (mftData[k + 1]);
double real = complexMultiplyRealPart(real1, imag1, phis[k], phis[k + 1]);
double imag = complexMultiplyImagPart(real1, imag1, phis[k], phis[k + 1]);
mftData[k] = real;
mftData[k + 1] = imag;
}
}
// use the DFT for the first offset
else {
double[] dft = new double[this.windowSize];
double[] data2 = toArrayNoClass(timeSeries);
System.arraycopy(data2, 0, dft, 0, Math.min(this.windowSize, data.length));
this.fft.realForward(dft);
dft[1] = 0; // DC-coefficient imag part
// if windowSize > mftData.queryLength, the remaining data should be 0 now.
System.arraycopy(dft, 0, mftData, 0, Math.min(mftData.length, dft.length));
// double[] dft = new double[this.windowSize];
// System.arraycopy(toArrayNoClass(timeSeries), 0, dft, 0, Math.min(this.windowSize, instanceLength(timeSeries)));
// dft = transform(dft, dft.length, true);
// dft[1] = 0; // DC-coefficient imag part
//
// // if windowSize > mftData.queryLength, the remaining data should be 0 now.
// System.arraycopy(dft, 0, mftData, 0, Math.min(mftData.length, dft.length));
}
double[] copy = new double[l];
System.arraycopy(mftData, this.startOffset, copy, 0, Math.min(l, mftData.length-this.startOffset));
transformed[t] = normalizeFT(copy, stds[t]);
}
return transformed;
}
/**
* Gets the means and stddevs for all sliding windows of a time series
*/
public void calcIncrementalMeanStddev(
int windowLength,
double[] tsData,
double[] means,
double[] stds) {
double sum = 0;
double squareSum = 0;
// it is faster to multiply than to divide
double rWindowLength = 1.0 / (double) windowLength;
for (int ww = 0; ww < Math.min(tsData.length, windowLength); ww++) {
sum += tsData[ww];
squareSum += tsData[ww] * tsData[ww];
}
// first window
means[0] = sum * rWindowLength;
double buf = squareSum * rWindowLength - means[0] * means[0];
stds[0] = buf > 0 ? Math.sqrt(buf) : 0;
// remaining windows
for (int w = 1, end = tsData.length - windowLength + 1; w < end; w++) {
sum += tsData[w + windowLength - 1] - tsData[w - 1];
means[w] = sum * rWindowLength;
squareSum += tsData[w + windowLength - 1] * tsData[w + windowLength - 1] - tsData[w - 1] * tsData[w - 1];
buf = squareSum * rWindowLength - means[w] * means[w];
stds[w] = buf > 0 ? Math.sqrt(buf) : 0;
}
}
/**
* Calculate the real part of a multiplication of two complex numbers
*/
private double complexMultiplyRealPart(double r1, double im1, double r2, double im2) {
return r1 * r2 - im1 * im2;
}
/**
* Caluculate the imaginary part of a multiplication of two complex numbers
*/
private double complexMultiplyImagPart(double r1, double im1, double r2, double im2) {
return r1 * im2 + r2 * im1;
}
/**
* Real part of e^(2*pi*u/M)
*/
private double realPartEPhi(double u, double M) {
return Math.cos(2 * Math.PI * u / M);
}
/**
* Imaginary part of e^(2*pi*u/M)
*/
private double complexPartEPhi(double u, double M) {
return -Math.sin(2 * Math.PI * u / M);
}
/**
* Apply normalization to the Fourier coefficients to allow lower bounding in Euclidean space
*/
private double[] normalizeFT(double[] copy, double std) {
double normalisingFactor = std > 0 ? 1.0 / std : 1.0;
int sign = 1;
for (int i = 0; i < copy.length; i++) {
copy[i] *= sign * normalisingFactor;
sign *= -1;
}
return copy;
}
}
public static void main(String[] args) throws Exception{
//Minimum working example
for (String dataset : new String[]{ "Coffee",
"ECG200",
"FaceFour",
"OliveOil",
"GunPoint",
"Beef",
"DiatomSizeReduction",
"CBF",
"ECGFiveDays",
"TwoLeadECG",
"MoteStrain",
"ItalyPowerDemand"}) {
Instances train = DatasetLoading.loadDataNullable("/Users/bzcschae/workspace/TSC_TONY_new/TimeSeriesClassification/TSCProblems/" + dataset + "/" + dataset + "_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("/Users/bzcschae/workspace/TSC_TONY_new/TimeSeriesClassification/TSCProblems/" + dataset + "/" + dataset + "_TEST.arff");
Classifier c = new WEASEL();
c.buildClassifier(train);
double accuracy = ClassifierTools.accuracy(test, c);
System.out.println("WEASEL accuracy on " + dataset + " fold 0 = " + accuracy);
}
}
} | 57,604 | 31.86081 | 178 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/cBOSS.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import tsml.classifiers.Checkpointable;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.classifiers.MultiThreadable;
import tsml.classifiers.TrainTimeContractable;
import utilities.ClassifierTools;
import utilities.samplers.RandomIndexSampler;
import utilities.samplers.RandomRoundRobinIndexSampler;
import utilities.samplers.RandomStratifiedIndexSampler;
import utilities.samplers.Sampler;
import weka.classifiers.functions.GaussianProcesses;
import weka.core.*;
import java.io.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static utilities.multivariate_tools.MultivariateInstanceTools.*;
import static weka.core.Utils.sum;
/**
* cBOSS classifier with parameter search and ensembling for univariate and
* multivariate time series classification.
* If parameters are known, use the class BOSSIndividual and directly provide them.
* <p>
* Options to change the method of ensembling to randomly select parameters with or without a filter.
* Has the capability to contract train time and checkpoint when using a random ensemble.
* <p>
* Alphabetsize fixed to four and maximum wordLength of 16.
*
* @author Matthew Middlehurst
* <p>
* Implementation based on the algorithm described in getTechnicalInformation()
*/
public class cBOSS extends EnhancedAbstractClassifier implements TrainTimeContractable,
Checkpointable, TechnicalInformationHandler, MultiThreadable {
private ArrayList<Double>[] paramAccuracy;
private ArrayList<Double>[] paramTime;
private ArrayList<Double>[] paramMemory;
private int ensembleSize = 50;
private int ensembleSizePerChannel = -1;
private boolean randomCVAccEnsemble = false;
private boolean useWeights = false;
private boolean useFastTrainEstimate = false;
private int maxEvalPerClass = -1;
private int maxEval = 500;
private double maxWinLenProportion = 1;
private double maxWinSearchProportion = 0.25;
private boolean reduceTrainInstances = false;
private double trainProportion = -1;
private int maxTrainInstances = 1000;
private boolean stratifiedSubsample = false;
private boolean cutoff = false;
private transient LinkedList<IndividualBOSS>[] classifiers;
private int numSeries;
private int[] numClassifiers;
private int currentSeries = 0;
private boolean isMultivariate = false;
private final int[] wordLengths = {16, 14, 12, 10, 8};
private final int[] alphabetSize = {4};
private final boolean[] normOptions = {true, false};
private final double correctThreshold = 0.92;
private int maxEnsembleSize = 500;
private boolean bayesianParameterSelection = false;
private int initialRandomParameters = 20;
private int[] initialParameterCount;
private Instances[] parameterPool;
private Instances[] prevParameters;
private String checkpointPath;
private boolean checkpoint = false;
private long checkpointTime = 0;
private long checkpointTimeDiff = 0;
private boolean cleanupCheckpointFiles = false;
private boolean loadAndFinish = false;
private long trainContractTimeNanos = 0;
private boolean trainTimeContract = false;
private boolean underContractTime = false;
//cBOSS CV acc variables, stored as field for checkpointing.
private int[] classifiersBuilt;
private int[] lowestAccIdx;
private double[] lowestAcc;
private boolean fullTrainCVEstimate = false;
private double[][] trainDistributions;
private double[] idxSubsampleCount;
private ArrayList<Integer> latestTrainPreds;
private ArrayList<Integer> latestTrainIdx;
private ArrayList<ArrayList<Integer>>[] filterTrainPreds;
private ArrayList<ArrayList<Integer>>[] filterTrainIdx;
private Instances seriesHeader;
private transient Instances train;
private double ensembleCvAcc = -1;
private double[] ensembleCvPreds = null;
private int numThreads = 1;
private boolean multiThread = false;
private ExecutorService ex;
protected static final long serialVersionUID = 22554L;
public cBOSS() {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
useRecommendedSettings();
}
public cBOSS(boolean useRecommendedSettings) {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
if (useRecommendedSettings) useRecommendedSettings();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "M. Middlehurst, W. Vickers and A. Bagnall");
result.setValue(TechnicalInformation.Field.TITLE, "Scalable dictionary classifiers for time series " +
"classification");
result.setValue(TechnicalInformation.Field.JOURNAL, "International Conference on Intelligent Data " +
"Engineering and Automated Learning");
result.setValue(TechnicalInformation.Field.PAGES, "11-19");
result.setValue(TechnicalInformation.Field.YEAR, "2020");
return result;
}
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
result.setMinimumNumberInstances(2);
// attributes
result.enable(Capabilities.Capability.RELATIONAL_ATTRIBUTES);
result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES);
// class
result.enable(Capabilities.Capability.NOMINAL_CLASS);
return result;
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
sb.append(super.getParameters());
sb.append(",numSeries,").append(numSeries);
for (int n = 0; n < numSeries; n++) {
sb.append(",numclassifiers,").append(n).append(",").append(numClassifiers[n]);
for (int i = 0; i < numClassifiers[n]; ++i) {
IndividualBOSS boss = classifiers[n].get(i);
sb.append(",windowSize,").append(boss.getWindowSize()).append(",wordLength,").append(boss.getWordLength());
sb.append(",alphabetSize,").append(boss.getAlphabetSize()).append(",norm,").append(boss.isNorm());
}
}
return sb.toString();
}
private void useRecommendedSettings() {
ensembleSize = 250;
maxEnsembleSize = 50;
randomCVAccEnsemble = true;
useWeights = true;
reduceTrainInstances = true;
trainProportion = 0.7;
//bayesianParameterSelection = true;
}
//pass in an enum of hour, minute, day, and the amount of them.
@Override
public void setTrainTimeLimit(long amount) {
printLineDebug(" cBOSS setting contract to " + amount);
if (amount > 0) {
trainContractTimeNanos = amount;
trainTimeContract = true;
} else
trainTimeContract = false;
}
@Override
public void enableMultiThreading(int numThreads) {
if (numThreads > 1) {
this.numThreads = numThreads;
multiThread = true;
} else {
this.numThreads = 1;
multiThread = false;
}
}
//Set the path where checkpointed versions will be stored
@Override //Checkpointable
public boolean setCheckpointPath(String path) {
boolean validPath = Checkpointable.super.createDirectories(path);
if (validPath) {
checkpointPath = path;
checkpoint = true;
}
return validPath;
}
//Define how to copy from a loaded object to this object
@Override
public void copyFromSerObject(Object obj) throws Exception {
if (!(obj instanceof cBOSS))
throw new Exception("The SER file is not an instance of cBOSS");
cBOSS saved = ((cBOSS) obj);
System.out.println("Loading cBOSS" + seed + ".ser");
//copy over variables from serialised object
paramAccuracy = saved.paramAccuracy;
paramTime = saved.paramTime;
paramMemory = saved.paramMemory;
ensembleSize = saved.ensembleSize;
seed = saved.seed;
ensembleSizePerChannel = saved.ensembleSizePerChannel;
rand = saved.rand;
randomCVAccEnsemble = saved.randomCVAccEnsemble;
useWeights = saved.useWeights;
useFastTrainEstimate = saved.useFastTrainEstimate;
maxEvalPerClass = saved.maxEvalPerClass;
maxEval = saved.maxEval;
maxWinLenProportion = saved.maxWinLenProportion;
maxWinSearchProportion = saved.maxWinSearchProportion;
reduceTrainInstances = saved.reduceTrainInstances;
trainProportion = saved.trainProportion;
maxTrainInstances = saved.maxTrainInstances;
stratifiedSubsample = saved.stratifiedSubsample;
cutoff = saved.cutoff;
// loadAndFinish = saved.loadAndFinish;
numSeries = saved.numSeries;
numClassifiers = saved.numClassifiers;
currentSeries = saved.currentSeries;
isMultivariate = saved.isMultivariate;
// wordLengths = saved.wordLengths;
// alphabetSize = saved.alphabetSize;
// correctThreshold = saved.correctThreshold;
maxEnsembleSize = saved.maxEnsembleSize;
bayesianParameterSelection = saved.bayesianParameterSelection;
initialRandomParameters = saved.initialRandomParameters;
initialParameterCount = saved.initialParameterCount;
parameterPool = saved.parameterPool;
prevParameters = saved.prevParameters;
// checkpointPath = saved.checkpointPath;
// serPath = saved.serPath;
// checkpoint = saved.checkpoint;
checkpointTime = saved.checkpointTime;
// checkpointTimeDiff = checkpointTimeDiff;
cleanupCheckpointFiles = saved.cleanupCheckpointFiles;
trainContractTimeNanos = saved.trainContractTimeNanos;
trainTimeContract = saved.trainTimeContract;
underContractTime = saved.underContractTime;
classifiersBuilt = saved.classifiersBuilt;
lowestAccIdx = saved.lowestAccIdx;
lowestAcc = saved.lowestAcc;
fullTrainCVEstimate = saved.fullTrainCVEstimate;
trainDistributions = saved.trainDistributions;
idxSubsampleCount = saved.idxSubsampleCount;
latestTrainPreds = saved.latestTrainPreds;
latestTrainIdx = saved.latestTrainIdx;
filterTrainPreds = saved.filterTrainPreds;
filterTrainIdx = saved.filterTrainIdx;
seriesHeader = saved.seriesHeader;
trainResults = saved.trainResults;
ensembleCvAcc = saved.ensembleCvAcc;
ensembleCvPreds = saved.ensembleCvPreds;
numThreads = saved.numThreads;
multiThread = saved.multiThread;
numClasses = saved.numClasses;
//load in each serisalised classifier
classifiers = new LinkedList[numSeries];
for (int n = 0; n < numSeries; n++) {
classifiers[n] = new LinkedList();
for (int i = 0; i < saved.numClassifiers[n]; i++) {
System.out.println("Loading cBOSSIndividual" + seed + n + "-" + i + ".ser");
FileInputStream fis = new FileInputStream(checkpointPath + "cBOSSIndividual" + seed + n + "-" + i + ".ser");
try (ObjectInputStream in = new ObjectInputStream(fis)) {
Object indv = in.readObject();
if (!(indv instanceof IndividualBOSS))
throw new Exception("The SER file " + n + "-" + i + " is not an instance of cBOSSIndividual");
IndividualBOSS ser = ((IndividualBOSS) indv);
classifiers[n].add(ser);
}
}
}
checkpointTimeDiff = saved.checkpointTimeDiff + (System.nanoTime() - checkpointTime);
}
@Override
public ClassifierResults getTrainResults() {
// trainResults.setAcc(ensembleCvAcc);
return trainResults;
}
public void setEnsembleSize(int size) {
ensembleSize = size;
}
public void setMaxEnsembleSize(int size) {
maxEnsembleSize = size;
}
public void setRandomCVAccEnsemble(boolean b) {
randomCVAccEnsemble = b;
}
public void useWeights(boolean b) {
useWeights = b;
}
public void setFastTrainEstimate(boolean b) {
useFastTrainEstimate = b;
}
public void setMaxEval(int i) {
maxEval = i;
}
public void setMaxEvalPerClass(int i) {
maxEvalPerClass = i;
}
public void setReduceTrainInstances(boolean b) {
reduceTrainInstances = b;
}
public void setTrainProportion(double d) {
trainProportion = d;
}
public void setMaxTrainInstances(int i) {
maxTrainInstances = i;
}
public void setCleanupCheckpointFiles(boolean b) {
cleanupCheckpointFiles = b;
}
public void setFullTrainCVEstimate(boolean b) {
fullTrainCVEstimate = b;
}
public void setCutoff(boolean b) {
cutoff = b;
}
public void cleanupCheckpointFiles(boolean b) {
cleanupCheckpointFiles = b;
}
public void loadAndFinish(boolean b) {
loadAndFinish = b;
}
public void setMaxWinLenProportion(double d) {
maxWinLenProportion = d;
}
public void setMaxWinSearchProportion(double d) {
maxWinSearchProportion = d;
}
public void setBayesianParameterSelection(boolean b) {
bayesianParameterSelection = b;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
// can classifier handle the data?
getCapabilities().testWithFail(data);
trainResults.setBuildTime(System.nanoTime());
long startTime = System.nanoTime();
if (data.checkForAttributeType(Attribute.RELATIONAL)) {
isMultivariate = true;
}
//Window length settings
int seriesLength = isMultivariate ? channelLength(data) - 1 : data.numAttributes() - 1; //minus class attribute
int minWindow = 10;
int maxWindow = (int) (seriesLength * maxWinLenProportion);
if (maxWindow < minWindow) minWindow = maxWindow / 2;
//whats the max number of window sizes that should be searched through
double maxWindowSearches = seriesLength * maxWinSearchProportion;
int winInc = (int) ((maxWindow - minWindow) / maxWindowSearches);
if (winInc < 1) winInc = 1;
//path checkpoint files will be saved to
// checkpointPath = checkpointPath + "/" + checkpointName(data.relationName()) + "/";
File file = new File(checkpointPath + "cBOSS" + seed + ".ser");
//if checkpointing and serialised files exist load said files
if (checkpoint && file.exists()) {
//path checkpoint files will be saved to
printLineDebug("Loading from checkpoint file");
loadFromFile(checkpointPath + "cBOSS" + seed + ".ser");
// checkpointTimeElapsed -= System.nanoTime()-t1;
}
//initialise variables
else {
if (data.classIndex() != data.numAttributes() - 1)
throw new Exception("BOSS_BuildClassifier: Class attribute not set as last attribute in dataset");
printLineDebug("Building cBOSS target number of classifiers = " + ensembleSize);
numClasses = data.numClasses();
//Multivariate
if (isMultivariate) {
numSeries = numDimensions(data);
classifiers = new LinkedList[numSeries];
for (int n = 0; n < numSeries; n++) {
classifiers[n] = new LinkedList<>();
}
numClassifiers = new int[numSeries];
if (ensembleSizePerChannel > 0) {
ensembleSize = ensembleSizePerChannel * numSeries;
}
}
//Univariate
else {
numSeries = 1;
classifiers = new LinkedList[1];
classifiers[0] = new LinkedList<>();
numClassifiers = new int[1];
}
if (maxEvalPerClass > 0) {
maxEval = numClasses * maxEvalPerClass;
}
rand = new Random(seed);
parameterPool = uniqueParameters(minWindow, maxWindow, winInc);
if (randomCVAccEnsemble) {
classifiersBuilt = new int[numSeries];
lowestAccIdx = new int[numSeries];
lowestAcc = new double[numSeries];
for (int i = 0; i < numSeries; i++) lowestAcc[i] = Double.MAX_VALUE;
if (getEstimateOwnPerformance()) {
filterTrainPreds = new ArrayList[numSeries];
filterTrainIdx = new ArrayList[numSeries];
for (int n = 0; n < numSeries; n++) {
filterTrainPreds[n] = new ArrayList();
filterTrainIdx[n] = new ArrayList();
}
}
}
if (getEstimateOwnPerformance()) {
trainDistributions = new double[data.numInstances()][numClasses];
idxSubsampleCount = new double[data.numInstances()];
}
}
train = data;
if (multiThread) {
if (numThreads == 1) numThreads = Runtime.getRuntime().availableProcessors();
if (ex == null) ex = Executors.newFixedThreadPool(numThreads);
}
//required to deal with multivariate datasets, each channel is split into its own instances
Instances[] series;
//Multivariate
if (isMultivariate) {
series = splitMultivariateInstances(data);
seriesHeader = new Instances(series[0], 0);
}
//Univariate
else {
series = new Instances[1];
series[0] = data;
}
//Contracting
if (trainTimeContract) {
ensembleSize = 0;
underContractTime = true;
}
//If checkpointing and flag is set stop building.
if (!(checkpoint && loadAndFinish)) {
//Randomly selected ensemble with accuracy filter
if (randomCVAccEnsemble) {
buildRandomCVAccBOSS(series);
}
//Randomly selected ensemble
else {
buildRandomBOSS(series);
}
}
//end train time in nanoseconds
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime() - trainResults.getBuildTime() - checkpointTimeDiff);
//Estimate train accuracy
if (getEstimateOwnPerformance()) {
long start = System.nanoTime();
ensembleCvAcc = findEnsembleTrainAcc(data);
long end = System.nanoTime();
trainResults.setErrorEstimateTime(end - start);
}
trainResults.setBuildPlusEstimateTime(trainResults.getBuildTime() + trainResults.getErrorEstimateTime());
trainResults.setParas(getParameters());
//delete any serialised files and holding folder for checkpointing on completion
if (checkpoint && cleanupCheckpointFiles) {
checkpointCleanup();
}
trainResults.setParas(getParameters());
if (randomCVAccEnsemble)
printLineDebug("*************** Finished cBOSS Build with " + classifiersBuilt[0] + " Base BOSS evaluated " +
"*************** in " + (System.nanoTime() - startTime) / 1000000000 + " Seconds. Number retained = " + classifiers[0].size());
}
private void buildRandomCVAccBOSS(Instances[] series) throws Exception {
//build classifiers up to a set size
while ((withinTrainContract(trainResults.getBuildTime()) || sum(classifiersBuilt) < ensembleSize) &&
parameterPool[numSeries - 1].size() > 0) {
long indivBuildTime = System.nanoTime();
boolean checkpointChange = false;
double[] parameters = selectParameters();
if (parameters == null) continue;
IndividualBOSS boss = new IndividualBOSS((int) parameters[0], (int) parameters[1], (int) parameters[2], parameters[3] == 1, multiThread, numThreads, ex);
Instances data = resampleData(series[currentSeries], boss);
boss.cleanAfterBuild = true;
boss.seed = seed;
boss.buildClassifier(data);
boss.accuracy = individualTrainAcc(boss, data, numClassifiers[currentSeries] < maxEnsembleSize ? -99999999
: lowestAcc[currentSeries]);
if (useWeights) {
boss.weight = Math.pow(boss.accuracy, 4);
if (boss.weight == 0) boss.weight = Double.MIN_VALUE;
}
if (bayesianParameterSelection) paramAccuracy[currentSeries].add(boss.accuracy);
if (trainTimeContract) paramTime[currentSeries].add((double) (System.nanoTime() - indivBuildTime));
if (numClassifiers[currentSeries] < maxEnsembleSize) {
if (boss.accuracy < lowestAcc[currentSeries]) {
lowestAccIdx[currentSeries] = classifiersBuilt[currentSeries];
lowestAcc[currentSeries] = boss.accuracy;
}
classifiers[currentSeries].add(boss);
numClassifiers[currentSeries]++;
if (getEstimateOwnPerformance()) {
filterTrainPreds[currentSeries].add(latestTrainPreds);
filterTrainIdx[currentSeries].add(latestTrainIdx);
}
} else if (boss.accuracy > lowestAcc[currentSeries]) {
double[] newLowestAcc = findMinEnsembleAcc();
lowestAccIdx[currentSeries] = (int) newLowestAcc[0];
lowestAcc[currentSeries] = newLowestAcc[1];
classifiers[currentSeries].remove(lowestAccIdx[currentSeries]);
classifiers[currentSeries].add(lowestAccIdx[currentSeries], boss);
if (getEstimateOwnPerformance()) {
filterTrainPreds[currentSeries].remove(lowestAccIdx[currentSeries]);
filterTrainIdx[currentSeries].remove(lowestAccIdx[currentSeries]);
filterTrainPreds[currentSeries].add(lowestAccIdx[currentSeries], latestTrainPreds);
filterTrainIdx[currentSeries].add(lowestAccIdx[currentSeries], latestTrainIdx);
}
checkpointChange = true;
}
classifiersBuilt[currentSeries]++;
int prev = currentSeries;
if (isMultivariate) {
nextSeries();
}
if (checkpoint) {
if (classifiersBuilt[currentSeries] <= maxEnsembleSize) {
checkpoint(prev, -1, true);
} else {
checkpoint(prev, lowestAccIdx[prev], checkpointChange);
}
}
}
if (cutoff) {
for (int n = 0; n < numSeries; n++) {
double maxAcc = 0;
for (int i = 0; i < classifiers[n].size(); i++) {
if (classifiers[n].get(i).accuracy > maxAcc) {
maxAcc = classifiers[n].get(i).accuracy;
}
}
for (int i = 0; i < classifiers[n].size(); i++) {
IndividualBOSS b = classifiers[n].get(i);
if (b.accuracy < maxAcc * correctThreshold) {
classifiers[currentSeries].remove(i);
if (getEstimateOwnPerformance()) {
filterTrainPreds[n].remove(i);
filterTrainIdx[n].remove(i);
}
numClassifiers[n]--;
i--;
}
}
}
}
if (getEstimateOwnPerformance()) {
for (int n = 0; n < numSeries; n++) {
for (int i = 0; i < filterTrainIdx[n].size(); i++) {
ArrayList<Integer> trainIdx = filterTrainIdx[n].get(i);
ArrayList<Integer> trainPreds = filterTrainPreds[n].get(i);
double weight = classifiers[n].get(i).weight;
for (int g = 0; g < trainIdx.size(); g++) {
idxSubsampleCount[trainIdx.get(g)] += weight;
trainDistributions[trainIdx.get(g)][trainPreds.get(g)] += weight;
}
}
}
filterTrainPreds = null;
filterTrainIdx = null;
latestTrainPreds = null;
latestTrainIdx = null;
for (int i = 0; i < trainDistributions.length; i++) {
if (idxSubsampleCount[i] > 0) {
for (int n = 0; n < trainDistributions[i].length; n++) {
trainDistributions[i][n] /= idxSubsampleCount[i];
}
}
}
}
}
private void buildRandomBOSS(Instances[] series) throws Exception {
//build classifiers up to a set size
while (((withinTrainContract(trainResults.getBuildTime()) && numClassifiers[numSeries - 1] < maxEnsembleSize)
|| sum(numClassifiers) < ensembleSize) && parameterPool[numSeries - 1].size() > 0) {
long indivBuildTime = System.nanoTime();
double[] parameters = selectParameters();
if (parameters == null) continue;
IndividualBOSS boss = new IndividualBOSS((int) parameters[0], (int) parameters[1], (int) parameters[2],
parameters[3] == 1, multiThread, numThreads, ex);
Instances data = resampleData(series[currentSeries], boss);
boss.cleanAfterBuild = true;
boss.seed = seed;
boss.buildClassifier(data);
classifiers[currentSeries].add(boss);
numClassifiers[currentSeries]++;
if (useWeights) {
if (boss.accuracy == -1) boss.accuracy = individualTrainAcc(boss, data, -99999999);
boss.weight = Math.pow(boss.accuracy, 4);
if (boss.weight == 0) boss.weight = Double.MIN_VALUE;
}
if (bayesianParameterSelection) {
if (boss.accuracy == -1) boss.accuracy = individualTrainAcc(boss, data, -99999999);
paramAccuracy[currentSeries].add(boss.accuracy);
}
if (trainTimeContract) paramTime[currentSeries].add((double) (System.nanoTime() - indivBuildTime));
// if (memoryContract) paramMemory[currentSeries].add((double)SizeOf.deepSizeOf(boss));
if (getEstimateOwnPerformance()) {
if (boss.accuracy == -1) boss.accuracy = individualTrainAcc(boss, data, -99999999);
for (int i = 0; i < latestTrainIdx.size(); i++) {
idxSubsampleCount[latestTrainIdx.get(i)] += boss.weight;
trainDistributions[latestTrainIdx.get(i)][latestTrainPreds.get(i)] += boss.weight;
}
}
int prev = currentSeries;
if (isMultivariate) {
nextSeries();
}
if (checkpoint) {
checkpoint(prev, -1, true);
}
}
if (getEstimateOwnPerformance()) {
latestTrainPreds = null;
latestTrainIdx = null;
for (int i = 0; i < trainDistributions.length; i++) {
if (idxSubsampleCount[i] > 0) {
for (int n = 0; n < trainDistributions[i].length; n++) {
trainDistributions[i][n] /= idxSubsampleCount[i];
}
}
}
}
}
private void checkpoint(int seriesNo, int classifierNo, boolean saveIndiv) {
if (checkpointPath != null) {
try {
File f = new File(checkpointPath);
if (!f.isDirectory())
f.mkdirs();
//time the checkpoint occured
checkpointTime = System.nanoTime();
if (saveIndiv) {
if (seriesNo >= 0) {
if (classifierNo < 0) classifierNo = classifiers[seriesNo].size() - 1;
//save the last build individual classifier
IndividualBOSS indiv = classifiers[seriesNo].get(classifierNo);
FileOutputStream fos = new FileOutputStream(checkpointPath + "cBOSSIndividual" + seed + seriesNo + "-" + classifierNo + ".ser");
try (ObjectOutputStream out = new ObjectOutputStream(fos)) {
out.writeObject(indiv);
out.close();
fos.close();
}
}
}
//dont take into account time spent serialising into build time
checkpointTimeDiff += System.nanoTime() - checkpointTime;
checkpointTime = System.nanoTime();
//save this, classifiers and train data not included
saveToFile(checkpointPath + "cBOSS" + seed + "temp.ser");
File file = new File(checkpointPath + "cBOSS" + seed + "temp.ser");
File file2 = new File(checkpointPath + "cBOSS" + seed + ".ser");
file2.delete();
file.renameTo(file2);
checkpointTimeDiff += System.nanoTime() - checkpointTime;
} catch (Exception e) {
e.printStackTrace();
System.out.println("Serialisation to " + checkpointPath + " FAILED");
}
}
}
private void checkpointCleanup() {
File f = new File(checkpointPath);
String[] files = f.list();
for (String file : files) {
File f2 = new File(f.getPath() + "\\" + file);
f2.delete();
}
f.delete();
}
private String checkpointName(String datasetName) {
String name = datasetName + seed + "cBOSS";
if (trainTimeContract) {
name += ("TTC" + trainContractTimeNanos);
} else if (isMultivariate && ensembleSizePerChannel > 0) {
name += ("PC" + (ensembleSizePerChannel * numSeries));
} else {
name += ("S" + ensembleSize);
}
if (randomCVAccEnsemble) {
name += ("M" + maxEnsembleSize);
}
if (useWeights) {
name += "W";
}
return name;
}
@Override
public boolean withinTrainContract(long start) {
if (trainContractTimeNanos <= 0) return true; //Not contracted
return System.nanoTime() - start - checkpointTimeDiff < trainContractTimeNanos;
}
//[0] = index, [1] = acc
private double[] findMinEnsembleAcc() {
double minAcc = Double.MAX_VALUE;
int minAccInd = 0;
for (int i = 0; i < classifiers[currentSeries].size(); ++i) {
double curacc = classifiers[currentSeries].get(i).accuracy;
if (curacc < minAcc) {
minAcc = curacc;
minAccInd = i;
}
}
return new double[]{minAccInd, minAcc};
}
private Instances[] uniqueParameters(int minWindow, int maxWindow, int winInc) {
Instances[] parameterPool = new Instances[numSeries];
ArrayList<double[]> possibleParameters = new ArrayList();
for (Boolean normalise : normOptions) {
for (Integer alphSize : alphabetSize) {
for (int winSize = minWindow; winSize <= maxWindow; winSize += winInc) {
for (Integer wordLen : wordLengths) {
double[] parameters = {wordLen, alphSize, winSize, normalise ? 1 : 0};
possibleParameters.add(parameters);
}
}
}
}
int numAtts = possibleParameters.get(0).length + 1;
ArrayList<Attribute> atts = new ArrayList<>(numAtts);
for (int i = 0; i < numAtts; i++) {
atts.add(new Attribute("att" + i));
}
prevParameters = new Instances[numSeries];
initialParameterCount = new int[numSeries];
for (int n = 0; n < numSeries; n++) {
parameterPool[n] = new Instances("params", atts, possibleParameters.size());
parameterPool[n].setClassIndex(numAtts - 1);
prevParameters[n] = new Instances(parameterPool[n], 0);
prevParameters[n].setClassIndex(numAtts - 1);
for (int i = 0; i < possibleParameters.size(); i++) {
DenseInstance inst = new DenseInstance(1, possibleParameters.get(i));
inst.insertAttributeAt(numAtts - 1);
parameterPool[n].add(inst);
}
}
if (bayesianParameterSelection) {
paramAccuracy = new ArrayList[numSeries];
for (int i = 0; i < numSeries; i++) {
paramAccuracy[i] = new ArrayList<>();
}
}
if (trainTimeContract) {
paramTime = new ArrayList[numSeries];
for (int i = 0; i < numSeries; i++) {
paramTime[i] = new ArrayList<>();
}
}
return parameterPool;
}
private double[] selectParameters() throws Exception {
Instance params;
if (trainTimeContract) {
if (prevParameters[currentSeries].size() > 0) {
for (int i = 0; i < paramTime[currentSeries].size(); i++) {
prevParameters[currentSeries].get(i).setClassValue(paramTime[currentSeries].get(i));
}
GaussianProcesses gp = new GaussianProcesses();
gp.buildClassifier(prevParameters[currentSeries]);
long remainingTime = trainContractTimeNanos - (System.nanoTime() - trainResults.getBuildTime() - checkpointTimeDiff);
for (int i = 0; i < parameterPool[currentSeries].size(); i++) {
double pred = gp.classifyInstance(parameterPool[currentSeries].get(i));
if (pred > remainingTime) {
parameterPool[currentSeries].remove(i);
i--;
}
}
}
}
if (parameterPool[currentSeries].size() == 0) {
return null;
}
if (bayesianParameterSelection) {
if (initialParameterCount[currentSeries] < initialRandomParameters) {
initialParameterCount[currentSeries]++;
params = parameterPool[currentSeries].remove(rand.nextInt(parameterPool[currentSeries].size()));
} else {
for (int i = 0; i < paramAccuracy[currentSeries].size(); i++) {
prevParameters[currentSeries].get(i).setClassValue(paramAccuracy[currentSeries].get(i));
}
GaussianProcesses gp = new GaussianProcesses();
gp.buildClassifier(prevParameters[currentSeries]);
int bestIndex = 0;
double bestAcc = -1;
for (int i = 0; i < parameterPool[currentSeries].numInstances(); i++) {
double pred = gp.classifyInstance(parameterPool[currentSeries].get(i));
if (pred > bestAcc) {
bestIndex = i;
bestAcc = pred;
}
}
params = parameterPool[currentSeries].remove(bestIndex);
}
} else {
params = parameterPool[currentSeries].remove(rand.nextInt(parameterPool[currentSeries].size()));
}
prevParameters[currentSeries].add(params);
return params.toDoubleArray();
}
private Instances resampleData(Instances series, IndividualBOSS boss) {
Instances data;
int newSize;
if (trainProportion > 0) {
newSize = (int) (series.numInstances() * trainProportion);
} else {
newSize = maxTrainInstances;
}
if (reduceTrainInstances && series.numInstances() > newSize) {
Sampler sampler;
if (stratifiedSubsample) {
sampler = new RandomStratifiedIndexSampler(rand);
} else {
sampler = new RandomIndexSampler(rand);
}
sampler.setInstances(series);
data = new Instances(series, newSize);
boss.subsampleIndices = new ArrayList<>(newSize);
for (int i = 0; i < newSize; i++) {
int n = (Integer) sampler.next();
data.add(series.get(n));
boss.subsampleIndices.add(n);
}
} else {
data = series;
}
return data;
}
private double individualTrainAcc(IndividualBOSS boss, Instances series, double lowestAcc) throws Exception {
int[] indicies;
if (getEstimateOwnPerformance()) {
latestTrainPreds = new ArrayList();
latestTrainIdx = new ArrayList();
}
if (useFastTrainEstimate && maxEval < series.numInstances()) {
RandomRoundRobinIndexSampler sampler = new RandomRoundRobinIndexSampler(rand);
sampler.setInstances(series);
indicies = new int[maxEval];
for (int i = 0; i < maxEval; ++i) {
int subsampleIndex = sampler.next();
indicies[i] = subsampleIndex;
}
} else {
indicies = new int[series.numInstances()];
for (int i = 0; i < series.numInstances(); ++i) {
indicies[i] = i;
}
}
int correct = 0;
int numInst = indicies.length;
int requiredCorrect = (int) (lowestAcc * numInst);
if (multiThread) {
ArrayList<Future<Double>> futures = new ArrayList<>(numInst);
for (int i = 0; i < numInst; ++i)
futures.add(ex.submit(boss.new TrainNearestNeighbourThread(i)));
int idx = 0;
for (Future<Double> f : futures) {
if (f.get() == series.get(idx).classValue()) {
++correct;
}
idx++;
}
} else {
for (int i = 0; i < numInst; ++i) {
if (correct + numInst - i < requiredCorrect) {
return -1;
}
double c = boss.classifyInstance(indicies[i]); //classify series i, while ignoring its corresponding histogram i
if (c == series.get(indicies[i]).classValue()) {
++correct;
}
if (getEstimateOwnPerformance()) {
latestTrainPreds.add((int) c);
if (boss.subsampleIndices != null) {
latestTrainIdx.add(boss.subsampleIndices.get(indicies[i]));
} else {
latestTrainIdx.add(indicies[i]);
}
}
}
}
return (double) correct / (double) numInst;
}
public void nextSeries() {
if (currentSeries == numSeries - 1) {
currentSeries = 0;
} else {
currentSeries++;
}
}
private double findEnsembleTrainAcc(Instances data) throws Exception {
this.ensembleCvPreds = new double[data.numInstances()];
int totalClassifers = sum(numClassifiers);
double correct = 0;
trainResults.setEstimatorName(getClassifierName());
trainResults.setDatasetName(data.relationName());
trainResults.setFoldID(seed);
trainResults.setSplit("train");
trainResults.setParas(getParameters());
if (idxSubsampleCount == null) idxSubsampleCount = new double[train.numInstances()];
for (int i = 0; i < data.numInstances(); ++i) {
double[] probs;
if (idxSubsampleCount[i] > 0 && (!fullTrainCVEstimate || idxSubsampleCount[i] == totalClassifers)) {
probs = trainDistributions[i];
} else {
probs = distributionForInstance(i);
}
int maxClass = findIndexOfMax(probs, rand);
if (maxClass == data.get(i).classValue())
++correct;
this.ensembleCvPreds[i] = maxClass;
trainResults.addPrediction(data.get(i).classValue(), probs, maxClass, -1, "");
}
trainResults.finaliseResults();
return correct / data.numInstances();
}
public double getTrainAcc() {
if (ensembleCvAcc >= 0) {
return this.ensembleCvAcc;
}
try {
return this.findEnsembleTrainAcc(train);
} catch (Exception e) {
e.printStackTrace();
}
return -1;
}
public double[] getTrainPreds() {
if (this.ensembleCvPreds == null) {
try {
this.findEnsembleTrainAcc(train);
} catch (Exception e) {
e.printStackTrace();
}
}
return this.ensembleCvPreds;
}
//potentially scuffed when train set is subsampled, will have to revisit and discuss if this is a viable option
//for estimation anyway.
private double[] distributionForInstance(int test) throws Exception {
double[] classHist = new double[numClasses];
//get sum of all channels, votes from each are weighted the same.
double sum = 0;
for (int n = 0; n < numSeries; n++) {
for (IndividualBOSS classifier : classifiers[n]) {
double classification;
if (classifier.subsampleIndices == null) {
classification = classifier.classifyInstance(test);
} else if (classifier.subsampleIndices.contains(test)) {
classification = classifier.classifyInstance(classifier.subsampleIndices.indexOf(test));
} else if (fullTrainCVEstimate) {
Instance series = train.get(test);
if (isMultivariate) {
series = splitMultivariateInstance(series)[n];
series.setDataset(seriesHeader);
}
classification = classifier.classifyInstance(series);
} else {
continue;
}
classHist[(int) classification] += classifier.weight;
sum += classifier.weight;
}
}
double[] distributions = new double[numClasses];
if (sum != 0) {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += (classHist[i] / sum);
} else {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += 1.0 / numClasses;
}
return distributions;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
double[] probs = distributionForInstance(instance);
return findIndexOfMax(probs, rand);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
double[] classHist = new double[numClasses];
//get sum of all channels, votes from each are weighted the same.
double sum = 0;
Instance[] series;
//Multivariate
if (isMultivariate) {
series = splitMultivariateInstanceWithClassVal(instance);
}
//Univariate
else {
series = new Instance[1];
series[0] = instance;
}
if (multiThread) {
ArrayList<Future<Double>>[] futures = new ArrayList[numSeries];
for (int n = 0; n < numSeries; n++) {
futures[n] = new ArrayList<>(numClassifiers[n]);
for (IndividualBOSS classifier : classifiers[n]) {
futures[n].add(ex.submit(classifier.new TestNearestNeighbourThread(instance)));
}
}
for (int n = 0; n < numSeries; n++) {
int idx = 0;
for (Future<Double> f : futures[n]) {
double weight = classifiers[n].get(idx).weight;
classHist[f.get().intValue()] += weight;
sum += weight;
idx++;
}
}
} else {
for (int n = 0; n < numSeries; n++) {
for (IndividualBOSS classifier : classifiers[n]) {
double classification = classifier.classifyInstance(series[n]);
classHist[(int) classification] += classifier.weight;
sum += classifier.weight;
}
}
}
double[] distributions = new double[numClasses];
if (sum != 0) {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += classHist[i] / sum;
} else {
for (int i = 0; i < classHist.length; ++i)
distributions[i] += 1.0 / numClasses;
}
return distributions;
}
public static void main(String[] args) throws Exception {
int fold = 0;
//Minimum working example
String dataset = "ItalyPowerDemand";
Instances[] data = DatasetLoading.sampleItalyPowerDemand(fold);
Instances train = data[0];
Instances test = data[1];
String dataset2 = "ERing";
Instances[] data2 = DatasetLoading.sampleERing(fold);
Instances train2 = data2[0];
Instances test2 = data2[1];
cBOSS c;
double accuracy;
c = new cBOSS(false);
c.useRecommendedSettings();
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train);
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("CVAcc CAWPE BOSS accuracy on " + dataset + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.useRecommendedSettings();
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train2);
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("CVAcc CAWPE BOSS accuracy on " + dataset2 + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.useRecommendedSettings();
c.bayesianParameterSelection = true;
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train);
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("Bayesian CVAcc CAWPE BOSS accuracy on " + dataset + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.useRecommendedSettings();
c.bayesianParameterSelection = true;
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train2);
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("Bayesian CVAcc CAWPE BOSS accuracy on " + dataset2 + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.ensembleSize = 250;
c.setMaxEnsembleSize(50);
c.setRandomCVAccEnsemble(true);
c.setSeed(fold);
c.useFastTrainEstimate = true;
c.reduceTrainInstances = true;
c.setMaxEvalPerClass(50);
c.setMaxTrainInstances(500);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train);
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("FastMax CVAcc BOSS accuracy on " + dataset + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.ensembleSize = 250;
c.setMaxEnsembleSize(50);
c.setRandomCVAccEnsemble(true);
c.setSeed(fold);
c.useFastTrainEstimate = true;
c.reduceTrainInstances = true;
c.setMaxEvalPerClass(50);
c.setMaxTrainInstances(500);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train2);
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("FastMax CVAcc BOSS accuracy on " + dataset2 + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.ensembleSize = 100;
c.useWeights(true);
c.setSeed(fold);
c.setReduceTrainInstances(true);
c.setTrainProportion(0.7);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train);
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("CAWPE Subsample BOSS accuracy on " + dataset + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.ensembleSize = 100;
c.useWeights(true);
c.setSeed(fold);
c.setReduceTrainInstances(true);
c.setTrainProportion(0.7);
c.setEstimateOwnPerformance(true);
c.buildClassifier(train2);
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("CAWPE Subsample BOSS accuracy on " + dataset2 + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers));
c = new cBOSS(false);
c.setTrainTimeLimit(TimeUnit.MINUTES, 1);
c.setCleanupCheckpointFiles(true);
c.setCheckpointPath("D:\\");
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
long startTime = System.nanoTime();
c.buildClassifier(train);
long endTime = System.nanoTime() - startTime;
accuracy = ClassifierTools.accuracy(test, c);
System.out.println("Contract 1 Min Checkpoint BOSS accuracy on " + dataset + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers) + " in " + endTime * 1e-9 + " seconds");
c = new cBOSS(false);
c.setTrainTimeLimit(TimeUnit.MINUTES, 1);
c.setCleanupCheckpointFiles(true);
c.setCheckpointPath("D:\\");
c.setSeed(fold);
c.setEstimateOwnPerformance(true);
long startTime2 = System.nanoTime();
c.buildClassifier(train2);
long endTime2 = System.nanoTime() - startTime2;
accuracy = ClassifierTools.accuracy(test2, c);
System.out.println("Contract 1 Min Checkpoint BOSS accuracy on " + dataset2 + " fold " + fold + " = " + accuracy + " numClassifiers = " + Arrays.toString(c.numClassifiers) + " in " + endTime2 * 1e-9 + " seconds");
//Output 15/03/21
/*
CVAcc CAWPE BOSS accuracy on ItalyPowerDemand fold 0 = 0.923226433430515 numClassifiers = [50]
CVAcc CAWPE BOSS accuracy on ERing fold 0 = 0.8777777777777778 numClassifiers = [50, 50, 50, 50]
Bayesian CVAcc CAWPE BOSS accuracy on ItalyPowerDemand fold 0 = 0.9300291545189504 numClassifiers = [50]
Bayesian CVAcc CAWPE BOSS accuracy on ERing fold 0 = 0.8777777777777778 numClassifiers = [50, 50, 50, 50]
FastMax CVAcc BOSS accuracy on ItalyPowerDemand fold 0 = 0.8415937803692906 numClassifiers = [50]
FastMax CVAcc BOSS accuracy on ERing fold 0 = 0.8888888888888888 numClassifiers = [50, 50, 50, 50]
CAWPE Subsample BOSS accuracy on ItalyPowerDemand fold 0 = 0.9271137026239067 numClassifiers = [80]
CAWPE Subsample BOSS accuracy on ERing fold 0 = 0.9 numClassifiers = [190, 190, 190, 190]
Contract 1 Min Checkpoint BOSS accuracy on ItalyPowerDemand fold 0 = 0.6958211856171039 numClassifiers = [80] in 1.1385895000000001 seconds
Contract 1 Min Checkpoint BOSS accuracy on ERing fold 0 = 0.5259259259259259 numClassifiers = [190, 190, 190, 190] in 13.5186788 seconds
*/
}
}
| 53,497 | 37.158345 | 221 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/bitword/BitWord.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based.bitword;
import java.io.Serializable;
/**
* Interface for BitWord classes
*
* @author Matthew Middlehurst
*/
public interface BitWord extends Comparable<BitWord>, Serializable {
Number getWord();
byte getLength();
void setWord(Number word);
void setLength(byte length);
void push(int letter);
void shorten(int amount);
}
| 1,157 | 30.297297 | 76 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/bitword/BitWordInt.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based.bitword;
import java.io.Serializable;
import java.util.Arrays;
/**
* Provides a simple skeleton implementation for bit packed words, as a replacement for Java Strings.
*
* Currently only supports alphabet size <= 4, wordlength <= 16
*
* As of 2016/03/05 only incorporated into SFA/BOSS, TODO implement into SAX
*
* @author James Large
*/
public class BitWordInt implements BitWord {
protected static final long serialVersionUID = 1L;
public enum PrintFormat {
RAW, //simple decimal integer value
BINARY, //as 32 bit binary string
LETTERS, //as char string, has unpacking cost
STRING; //as string of actual characters
}
private static final String[] alphabet = { "a","b","c","d","e","f","g","h","i","j" };
private static String[] alphabetSymbols = { "a","b","c","d" };
private int word;
private byte length;
//ALPHABETSIZE CURRENTLY ASSUMED TO BE 4, THEREFORE 2 BITS PER LETTER, AND MAX WORD LENGTH 16
public static final int WORD_SPACE = 32; //length of int
public static final int BITS_PER_LETTER = 2;
public static final int MAX_LENGTH = 16; //WORD_SPACE/BITS_PER_LETTER;
//masks
private static final int POP_MASK = 0b11;
private static final int [] LETTER_MASKS = { //again, assumes alphabetsize = 4 still
0b00000000000000000000000000000011,
0b00000000000000000000000000001100,
0b00000000000000000000000000110000,
0b00000000000000000000000011000000,
0b00000000000000000000001100000000,
0b00000000000000000000110000000000,
0b00000000000000000011000000000000,
0b00000000000000001100000000000000,
0b00000000000000110000000000000000,
0b00000000000011000000000000000000,
0b00000000001100000000000000000000,
0b00000000110000000000000000000000,
0b00000011000000000000000000000000,
0b00001100000000000000000000000000,
0b00110000000000000000000000000000,
0b11000000000000000000000000000000
};
public BitWordInt() {
word = 0;
length = 0;
}
public BitWordInt(BitWord bw) {
this.word = bw.getWord().intValue();
this.length = bw.getLength();
}
public BitWordInt(BitWordInt bw) {
this.word = bw.word;
this.length = bw.length;
}
public BitWordInt(int [] letters) throws Exception {
setWord(letters);
}
public Number getWord() { return word; }
public byte getLength() { return length; }
public void setWord(Number word) { this.word = word.intValue(); }
public void setWord(int [] letters) throws Exception {
if (letters.length > MAX_LENGTH)
throw new Exception("requested word length exceeds max(" + MAX_LENGTH + "): " + letters.length);
word = 0;
length = (byte)letters.length;
packAll(letters);
}
public void setLength(byte length) { this.length = length; }
public void push(int letter) {
word = (word << BITS_PER_LETTER) | letter;
++length;
}
public int pop() {
int letter = word & POP_MASK;
shorten(1);
return letter;
}
public void packAll(int [] letters) {
for (int i = 0; i < letters.length; ++i)
push(letters[i]);
}
public int[] unpackAll() {
int [] letters = new int[length];
int shift = WORD_SPACE-(length*BITS_PER_LETTER); //first shift, increment latter
for (int i = length-1; i > -1; --i) {
//left shift to left end to remove earlier letters, right shift to right end to remove latter
letters[length-1-i] = (word << shift) >>> (WORD_SPACE-BITS_PER_LETTER);
shift += BITS_PER_LETTER;
}
return letters;
}
public void shortenByFourierCoefficient() {
shorten(2); //1 real/imag pair
}
public void shorten(int amount) {
length -= amount;
word >>>= amount*BITS_PER_LETTER;
}
@Override
public int compareTo(BitWord o) { return Long.compare(word, o.getWord().longValue()); }
@Override
public boolean equals(Object other) {
if (other instanceof BitWord)
return compareTo((BitWord)other) == 0;
return false;
}
@Override
public int hashCode() {
return Integer.hashCode(word);
}
public String buildString() {
int [] letters = unpackAll();
StringBuilder word = new StringBuilder();
for (int i = 0; i < letters.length; ++i)
word.append(alphabet[letters[i]]);
return word.toString();
}
@Override
public String toString() {
return Arrays.toString(unpackAll());
}
public String toString(PrintFormat format) {
switch (format) {
case RAW:
return String.valueOf(Integer.toString(word));
case BINARY:
return String.format("%"+WORD_SPACE+"s", Integer.toBinaryString(word)).replace(' ', '0');
case LETTERS: {
return Arrays.toString(unpackAll());
}
case STRING: {
return buildString();
}
default:
return "err"; //impossible with enum, but must have return
}
}
public String toStringUnigram() {
long[] letters = new long[length];
int shift = WORD_SPACE-(length*BITS_PER_LETTER);
for (int i = length-1; i > -1; --i) {
letters[length-1-i] = (word << shift) >>> (WORD_SPACE-BITS_PER_LETTER);
shift += BITS_PER_LETTER;
}
StringBuilder str = new StringBuilder();
for (int i = 0; i < length; i++){
str.append((char)('A'+letters[i]));
}
return str.toString();
}
public static void main(String [] args) throws Exception {
quickTest();
//buildMasks();
}
private static void buildMasks() {
for (int i = 0; i < 16; ++i) {
System.out.print("0b");
for (int j = 15; j > i; --j)
System.out.print("00");
System.out.print("11");
for (int j = 0; j < i; ++ j)
System.out.print("00");
System.out.println(",");
}
}
private static void quickTest() throws Exception {
int [] letters = {0,1,2,3,2,1,2,0,1};
BitWordInt w = new BitWordInt(letters);
System.out.println(Arrays.toString(letters));
// System.out.println(w);
// System.out.println(w.toString(PrintFormat.RAW));
// System.out.println(w.toString(PrintFormat.BINARY));
System.out.println(w.toString(PrintFormat.LETTERS));
w.shortenByFourierCoefficient();
System.out.println(w.toString(PrintFormat.LETTERS));
System.out.println(" ");
int [] letters2 = {0,1,2,3,2,1,2,0};
BitWordInt w2 = new BitWordInt(letters2);
System.out.println(Arrays.toString(letters2));
// System.out.println(w2);
// System.out.println(w2.toString(PrintFormat.RAW));
// System.out.println(w2.toString(PrintFormat.BINARY));
System.out.println(w2.toString(PrintFormat.LETTERS));
w2.shortenByFourierCoefficient();
System.out.println(w2.toString(PrintFormat.LETTERS));
}
}
| 8,220 | 31.239216 | 108 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/bitword/BitWordLong.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based.bitword;
import java.io.Serializable;
import java.util.Arrays;
/**
* Provides a simple skeleton implementation for bit packed words, as a replacement for Java Strings.
*
* Currently only supports alphabet size <= 4, wordlength <= 16
*
* Used by TDE, stripped down version of original BitWord class using a long to store the word.
*
* @author James Large, updated by Matthew Middlehurst
*/
public class BitWordLong implements BitWord {
protected static final long serialVersionUID = 1L;
public enum PrintFormat {
RAW, //simple decimal integer value
BINARY, //as 32 bit binary string
LETTERS, //as char string, has unpacking cost
STRING; //as string of actual characters
}
private static final String[] alphabet = { "a","b","c","d","e","f","g","h","i","j" };
private static String[] alphabetSymbols = { "a","b","c","d" };
private long word;
private byte length;
//ALPHABETSIZE CURRENTLY ASSUMED TO BE 4, THEREFORE 2 BITS PER LETTER, AND MAX WORD LENGTH 16
public static final int WORD_SPACE = 64; //length of long
public static final int BITS_PER_LETTER = 2;
public static final int MAX_LENGTH = 32; //WORD_SPACE/BITS_PER_LETTER;
//masks
private static final int POP_MASK = 0b11;
private static final int [] LETTER_MASKS = { //again, assumes alphabetsize = 4 still
0b00000000000000000000000000000011,
0b00000000000000000000000000001100,
0b00000000000000000000000000110000,
0b00000000000000000000000011000000,
0b00000000000000000000001100000000,
0b00000000000000000000110000000000,
0b00000000000000000011000000000000,
0b00000000000000001100000000000000,
0b00000000000000110000000000000000,
0b00000000000011000000000000000000,
0b00000000001100000000000000000000,
0b00000000110000000000000000000000,
0b00000011000000000000000000000000,
0b00001100000000000000000000000000,
0b00110000000000000000000000000000,
0b11000000000000000000000000000000
};
public BitWordLong() {
word = 0L;
length = 0;
}
public BitWordLong(BitWord bw) {
this.word = bw.getWord().longValue();
this.length = bw.getLength();
}
public BitWordLong(BitWordLong bw) {
this.word = bw.word;
this.length = bw.length;
}
public BitWordLong(int [] letters) throws Exception {
setWord(letters);
}
public BitWordLong(BitWord bw1, BitWord bw2){
if (bw2 instanceof BitWordInt)
word = (bw1.getWord().longValue() << 32) | Integer.toUnsignedLong(bw2.getWord().intValue());
else
word = (bw1.getWord().longValue() << 32) | bw2.getWord().longValue();
length = 32;
}
public Number getWord() { return word; }
public byte getLength() { return length; }
public void setWord(Number word) { this.word = word.longValue(); }
public void setWord(int [] letters) throws Exception {
if (letters.length > MAX_LENGTH)
throw new Exception("requested word length exceeds max(" + MAX_LENGTH + "): " + letters.length);
word = 0;
length = (byte)letters.length;
packAll(letters);
}
public void setLength(byte length) { this.length = length; }
public void push(int letter) {
word = (word << BITS_PER_LETTER) | letter;
++length;
}
public long pop() {
long letter = word & POP_MASK;
shorten(1);
return letter;
}
public void packAll(int [] letters) {
for (int i = 0; i < letters.length; ++i)
push(letters[i]);
}
public int[] unpackAll() {
int[] letters = new int[length];
int shift = WORD_SPACE-(length*BITS_PER_LETTER); //first shift, increment latter
for (int i = length-1; i > -1; --i) {
//left shift to left end to remove earlier letters, right shift to right end to remove latter
letters[length-1-i] = (int)((word << shift) >>> (WORD_SPACE-BITS_PER_LETTER));
shift += BITS_PER_LETTER;
}
return letters;
}
public void shortenByFourierCoefficient() {
shorten(2); //1 real/imag pair
}
public void shorten(int amount) {
length -= amount;
word = word >>> amount*BITS_PER_LETTER;
}
@Override
public int compareTo(BitWord o) {
return Long.compare(word, o.getWord().longValue());
}
@Override
public boolean equals(Object other) {
if (other instanceof BitWord)
return compareTo((BitWord)other) == 0;
return false;
}
@Override
public int hashCode() {
return Long.hashCode(word);
}
public String buildString() {
int [] letters = unpackAll();
StringBuilder word = new StringBuilder();
for (int i = 0; i < letters.length; ++i)
word.append(alphabet[letters[i]]);
return word.toString();
}
@Override
public String toString() {
return Arrays.toString(unpackAll());
}
public String toString(BitWordLong.PrintFormat format) {
switch (format) {
case RAW:
return String.valueOf(Long.toString(word));
case BINARY:
return String.format("%"+WORD_SPACE+"s", Long.toBinaryString(word)).replace(' ', '0');
case LETTERS: {
return Arrays.toString(unpackAll());
}
case STRING: {
return buildString();
}
default:
return "err"; //impossible with enum, but must have return
}
}
public String toStringUnigram() {
long[] letters = new long[length];
int shift = WORD_SPACE-(length*BITS_PER_LETTER);
for (int i = length-1; i > -1; --i) {
letters[length-1-i] = (word << shift) >>> (WORD_SPACE-BITS_PER_LETTER);
shift += BITS_PER_LETTER;
}
StringBuilder str = new StringBuilder();
for (int i = 0; i < length; i++){
str.append((char)('A'+letters[i]));
}
return str.toString();
}
public String toStringBigram() {
long[] letters = new long[length];
int shift = WORD_SPACE-(length*BITS_PER_LETTER);
for (int i = length-1; i > -1; --i) {
letters[length-1-i] = (word << shift) >>> (WORD_SPACE-BITS_PER_LETTER);
shift += BITS_PER_LETTER;
}
long[] letters2 = new long[length];
int shift2 = WORD_SPACE/2-(length*2);
for (int i = length-1; i > -1; --i) {
letters2[length-1-i] = (word << shift2) >>> (WORD_SPACE-BITS_PER_LETTER);
shift2 += BITS_PER_LETTER;
}
StringBuilder str = new StringBuilder();
for (int i = 0; i < length; i++){
str.append((char)('A'+letters2[i]));
}
str.append("+");
for (int i = 0; i < length; i++){
str.append((char)('A'+letters[i]));
}
return str.toString();
}
public static void main(String [] args) throws Exception {
int [] letters = {2,1,2,3,2,1,2,0,1,1,2,3,1,2,3,1};
BitWordLong b = new BitWordLong();
for (int i = 0; i < letters.length; i++){
b.push(letters[i]);
System.out.println(b.toString(BitWordLong.PrintFormat.BINARY));
}
// b.shorten(6);
// System.out.println(b.toString(BitWordLong.PrintFormat.BINARY));
b.word = (b.word << 32) | b.word;
System.out.println(b.toString(BitWordLong.PrintFormat.BINARY));
System.out.println();
BitWordInt b2 = new BitWordInt();
for (int i = 0; i < letters.length; i++){
b2.push(letters[i]);
System.out.println(b2.toString(BitWordInt.PrintFormat.BINARY));
}
BitWordLong b3 = new BitWordLong();
System.out.println(b3.toString(BitWordLong.PrintFormat.BINARY));
b3.word = (b2.getWord().longValue() << 32);
System.out.println(b3.toString(BitWordLong.PrintFormat.BINARY));
b3.word = b.word | Integer.toUnsignedLong(b2.getWord().intValue());
System.out.println(b3.toString(BitWordLong.PrintFormat.BINARY));
System.out.println();
System.out.println(b.word);
System.out.println(b2.getWord());
System.out.println(b.compareTo(b2));
System.out.println();
System.out.println(Long.hashCode(10000L));
System.out.println(Long.hashCode(10000000000001L));
System.out.println(Long.hashCode(1316137241L));
}
}
| 9,593 | 31.856164 | 108 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/boss_variants/BOSSC45.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based.boss_variants;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.*;
import tsml.classifiers.dictionary_based.IndividualBOSS;
import utilities.InstanceTools;
import tsml.classifiers.SaveParameterInfo;
import weka.core.Capabilities;
import weka.classifiers.Classifier;
import weka.core.TechnicalInformation;
import java.util.Map.Entry;
import tsml.classifiers.dictionary_based.bitword.BitWordInt;
import utilities.ClassifierTools;
import experiments.data.DatasetLoading;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.EnhancedAbstractClassifier;
import weka.classifiers.trees.J48;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
/**
* BOSS + C45 tree classifier with parameter search and ensembling, if parameters are
* known, use nested 'BOSSC45Individual' classifier and directly provide them.
*
* Intended use is with the default constructor, however can force the normalisation
* parameter to true/false by passing a boolean, e.g c = new BOSSEnsemble(true)
*
* Alphabetsize fixed to four
*
* @author James Large
*
*
* TEMPORARY WORK AROUND, CONSULT TODO: test cases may contain words not found anywhere
* in the train set, and therefore not found in the dictionary (i.e list of attributes
* in the instance/s). Currently words found in test instances that are not in the train set are
* IGNORED.
*
* Largely poor accuracy likely due to this
*
* BOSS implementation based on the algorithm described in getTechnicalInformation()
* C45 done using the WEKA implementation 'weka.classifiers.trees.J48'
*/
public class BOSSC45 extends EnhancedAbstractClassifier implements SaveParameterInfo {
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "P. Schafer");
result.setValue(TechnicalInformation.Field.TITLE, "The BOSS is concerned with time series classification in the presence of noise");
result.setValue(TechnicalInformation.Field.JOURNAL, "Data Mining and Knowledge Discovery");
result.setValue(TechnicalInformation.Field.VOLUME, "29");
result.setValue(TechnicalInformation.Field.NUMBER,"6");
result.setValue(TechnicalInformation.Field.PAGES, "1505-1530");
result.setValue(TechnicalInformation.Field.YEAR, "2015");
return result;
}
private List<BOSSWindow> classifiers;
private int numCVFolds = 10;
private final double correctThreshold = 0.92;
private int maxEnsembleSize = 50;
private final Integer[] wordLengths = { 16, 14, 12, 10, 8 };
private final int alphabetSize = 4;
//private boolean norm;
public enum SerialiseOptions {
//dont do any seriealising, run as normal
NONE,
//serialise the final boss classifiers which made it into ensemble (does not serialise the entire BOSSEnsemble object)
//slight runtime cost
STORE,
//serialise the final boss classifiers, and delete from main memory. reload each from ser file when needed in classification.
//the most memory used at any one time is therefore ~2 individual boss classifiers during training.
//massive runtime cost, order of magnitude
STORE_LOAD
};
private SerialiseOptions serOption = SerialiseOptions.NONE;
private static String serFileLoc = "BOSSWindowSers\\";
private boolean[] normOptions;
/**
* Providing a particular value for normalisation will force that option, if
* whether to normalise should be a parameter to be searched, use default constructor
*
* @param normalise whether or not to normalise by dropping the first Fourier coefficient
*/
public BOSSC45(boolean normalise) {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
normOptions = new boolean[] { normalise };
}
/**
* During buildClassifier(...), will search through normalisation as well as
* window size and word length if no particular normalisation option is provided
*/
public BOSSC45() {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
normOptions = new boolean[] { true, false };
}
public static class BOSSWindow implements Comparable<BOSSWindow>, Serializable {
private BOSSC45Individual classifier;
public double accuracy;
public String filename;
private static final long serialVersionUID = 2L;
public BOSSWindow(String filename) {
this.filename = filename;
}
public BOSSWindow(BOSSC45Individual classifer, double accuracy, String dataset) {
this.classifier = classifer;
this.accuracy = accuracy;
buildFileName(dataset);
}
public double classifyInstance(Instance inst) throws Exception {
return classifier.classifyInstance(inst);
}
public double classifyInstance(int test) throws Exception {
return classifier.classifyInstance(test);
}
private void buildFileName(String dataset) {
filename = serFileLoc + dataset + "_" + classifier.getWindowSize() + "_" + classifier.getWordLength() + "_" + classifier.getAlphabetSize() + "_" + classifier.isNorm() + ".ser";
}
public boolean storeAndClearClassifier() {
try {
ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));
out.writeObject(this);
out.close();
clearClassifier();
return true;
}catch(IOException e) {
System.out.print("Error serialiszing to " + filename);
e.printStackTrace();
return false;
}
}
public boolean store() {
try {
ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));
out.writeObject(this);
out.close();
return true;
}catch(IOException e) {
System.out.print("Error serialiszing to " + filename);
e.printStackTrace();
return false;
}
}
public void clearClassifier() {
classifier = null;
}
public boolean load() {
BOSSWindow bw = null;
try {
ObjectInputStream in = new ObjectInputStream(new FileInputStream(filename));
bw = (BOSSWindow) in.readObject();
in.close();
this.accuracy = bw.accuracy;
this.classifier = bw.classifier;
return true;
}catch(IOException i) {
System.out.print("Error deserialiszing from " + filename);
i.printStackTrace();
return false;
}catch(ClassNotFoundException c) {
System.out.println("BOSSWindow class not found");
c.printStackTrace();
return false;
}
}
public boolean deleteSerFile() {
try {
File f = new File(filename);
return f.delete();
} catch(SecurityException s) {
System.out.println("Unable to delete, access denied: " + filename);
s.printStackTrace();
return false;
}
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize }
*/
public int[] getParameters() { return classifier.getParameters(); }
public int getWindowSize() { return classifier.getWindowSize(); }
public int getWordLength() { return classifier.getWordLength(); }
public int getAlphabetSize() { return classifier.getAlphabetSize(); }
public boolean isNorm() { return classifier.isNorm(); }
@Override
public int compareTo(BOSSWindow other) {
if (this.accuracy > other.accuracy)
return 1;
if (this.accuracy == other.accuracy)
return 0;
return -1;
}
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
BOSSWindow first = classifiers.get(0);
sb.append("windowSize=").append(first.getWindowSize()).append("/wordLength=").append(first.getWordLength());
sb.append("/alphabetSize=").append(first.getAlphabetSize()).append("/norm=").append(first.isNorm());
for (int i = 1; i < classifiers.size(); ++i) {
BOSSWindow boss = classifiers.get(i);
sb.append(",windowSize=").append(boss.getWindowSize()).append("/wordLength=").append(boss.getWordLength());
sb.append("/alphabetSize=").append(boss.getAlphabetSize()).append("/norm=").append(boss.isNorm());
}
return sb.toString();
}
@Override
public int setNumberOfFolds(Instances data){
return data.numInstances();
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize } for each BOSSWindow in this *built* classifier
*/
public int[][] getParametersValues() {
int[][] params = new int[classifiers.size()][];
int i = 0;
for (BOSSWindow boss : classifiers)
params[i++] = boss.getParameters();
return params;
}
public void setSerOption(SerialiseOptions option) {
serOption = option;
}
public void setSerFileLoc(String path) {
serFileLoc = path;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSSEnsemble_BuildClassifier: Class attribute not set as last attribute in dataset");
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD) {
DateFormat dateFormat = new SimpleDateFormat("yyyyMMddHHmmss");
Date date = new Date();
serFileLoc += data.relationName() + "_" + dateFormat.format(date) + "\\";
File f = new File(serFileLoc);
if (!f.isDirectory())
f.mkdirs();
}
if (data.numInstances() < numCVFolds)
numCVFolds = data.numInstances();
classifiers = new LinkedList<BOSSWindow>();
int numSeries = data.numInstances();
int seriesLength = data.numAttributes()-1; //minus class attribute
int minWindow = 10;
int maxWindow = seriesLength;
//int winInc = 1; //check every window size in range
// //whats the max number of window sizes that should be searched through
//double maxWindowSearches = Math.min(200, Math.sqrt(seriesLength));
double maxWindowSearches = seriesLength/4.0;
int winInc = (int)((maxWindow - minWindow) / maxWindowSearches);
if (winInc < 1) winInc = 1;
//keep track of current max window size accuracy, constantly check for correctthreshold to discard to save space
double maxAcc = -1.0;
double minMaxAcc = -1.0; //the acc of the worst member to make it into the final ensemble as it stands
for (boolean normalise : normOptions) {
for (int winSize = minWindow; winSize <= maxWindow; winSize += winInc) {
BOSSC45Individual boss = new BOSSC45Individual(wordLengths[0], alphabetSize, winSize, normalise);
boss.buildBags=true;
boss.buildForest=false;
boss.buildClassifier(data); //initial setup for this windowsize, with max word length
BOSSC45Individual bestClassifierForWinSize = null;
double bestAccForWinSize = -1.0;
//find best word length for this window size
for (Integer wordLen : wordLengths) {
boss = boss.buildShortenedBags(wordLen); //in first iteration, same lengths (wordLengths[0]), will do nothing
double acc = boss.findCVAcc(numCVFolds);
if (acc >= bestAccForWinSize) {
bestAccForWinSize = acc;
bestClassifierForWinSize = boss;
}
}
//if not within correct threshold of the current max, dont bother storing at all
if (makesItIntoEnsemble(bestAccForWinSize, maxAcc, minMaxAcc, classifiers.size())) {
BOSSWindow bw = new BOSSWindow(bestClassifierForWinSize, bestAccForWinSize, data.relationName());
bw.classifier.clean();
if (serOption == SerialiseOptions.STORE)
bw.store();
else if (serOption == SerialiseOptions.STORE_LOAD)
bw.storeAndClearClassifier();
classifiers.add(bw);
if (bestAccForWinSize > maxAcc) {
maxAcc = bestAccForWinSize;
//get rid of any extras that dont fall within the final max threshold
Iterator<BOSSWindow> it = classifiers.iterator();
while (it.hasNext()) {
BOSSWindow b = it.next();
if (b.accuracy < maxAcc * correctThreshold) {
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD)
b.deleteSerFile();
it.remove();
}
}
}
while (classifiers.size() > maxEnsembleSize) {
//cull the 'worst of the best' until back under the max size
int minAccInd = (int)findMinEnsembleAcc()[0];
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD)
classifiers.get(minAccInd).deleteSerFile();
classifiers.remove(minAccInd);
}
minMaxAcc = findMinEnsembleAcc()[1]; //new 'worst of the best' acc
}
}
}
//final members known, now build the final trees on the full train data (rather than train/validation set)
for (BOSSWindow window : classifiers)
window.classifier.buildFullForest();
if (getEstimateOwnPerformance())
findEnsembleTrainAcc(data);
}
//[0] = index, [1] = acc
private double[] findMinEnsembleAcc() {
double minAcc = Double.MIN_VALUE;
int minAccInd = 0;
for (int i = 0; i < classifiers.size(); ++i) {
double curacc = classifiers.get(i).accuracy;
if (curacc < minAcc) {
minAcc = curacc;
minAccInd = i;
}
}
return new double[] { minAccInd, minAcc };
}
private boolean makesItIntoEnsemble(double acc, double maxAcc, double minMaxAcc, int curEnsembleSize) {
if (acc >= maxAcc * correctThreshold) {
if (curEnsembleSize >= maxEnsembleSize)
return acc > minMaxAcc;
else
return true;
}
return false;
}
private double findEnsembleTrainAcc(Instances data) throws Exception {
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setEstimatorName(getClassifierName());
trainResults.setDatasetName(data.relationName());
trainResults.setFoldID(seed);
trainResults.setSplit("train");
trainResults.setParas(getParameters());
double correct = 0;
for (int i = 0; i < data.numInstances(); ++i) {
long predTime = System.nanoTime();
//classify series i, while ignoring its corresponding histogram i
double[] probs = distributionForInstance(i, data.numClasses());
predTime = System.nanoTime() - predTime;
int maxClass = findIndexOfMax(probs, rand);
if (maxClass == data.get(i).classValue())
++correct;
trainResults.addPrediction(data.get(i).classValue(), probs, maxClass, predTime, "");
}
trainResults.finaliseResults();
double result = correct / data.numInstances();
return result;
}
/**
* Classify the train instance at index 'test', whilst ignoring the corresponding bags
* in each of the members of the ensemble, for use in CV of BOSSEnsemble
*/
public double classifyInstance(int test, int numclasses) throws Exception {
double[] dist = distributionForInstance(test, numclasses);
return findIndexOfMax(dist, rand);
}
public double[] distributionForInstance(int test, int numclasses) throws Exception {
double[] classHist = new double[numclasses];
//get votes from all windows
double sum = 0;
for (BOSSWindow classifier : classifiers) {
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.load();
double classification = classifier.classifyInstance(test);
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.clearClassifier();
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
return findIndexOfMax(dist, rand);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
double[] classHist = new double[instance.numClasses()];
//get votes from all windows
double sum = 0;
for (BOSSWindow classifier : classifiers) {
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.load();
double classification = classifier.classifyInstance(instance);
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.clearClassifier();
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void main(String[] args) throws Exception{
//Minimum working example
String dataset = "BeetleFly";
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TEST.arff");
Classifier c = new BOSSC45();
c.buildClassifier(train);
double accuracy = ClassifierTools.accuracy(test, c);
System.out.println("BOSSC45 accuracy on " + dataset + " fold 0 = " + accuracy);
//Other examples/tests
// detailedFold0Test(dataset);
// resampleTest(dataset, 5);
}
public static void detailedFold0Test(String dset) {
System.out.println("BOSSC45 DetailedTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
System.out.println(train.relationName());
BOSSC45 boss = new BOSSC45();
//TRAINING
System.out.println("Training starting");
long start = System.nanoTime();
boss.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
//RESULTS OF TRAINING
System.out.println("Ensemble Size: " + boss.classifiers.size());
System.out.println("Param sets: ");
int[][] params = boss.getParametersValues();
for (int i = 0; i < params.length; ++i)
System.out.println(i + ": " + params[i][0] + " " + params[i][1] + " " + params[i][2] + " " + boss.classifiers.get(i).isNorm() + " " + boss.classifiers.get(i).accuracy);
//TESTING
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, boss);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
public static void resampleTest(String dset, int resamples) throws Exception {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
Classifier c = new BOSSC45();
//c.setCVPath("C:\\tempproject\\BOSSEnsembleCVtest.csv");
double [] accs = new double[resamples];
for(int i=0;i<resamples;i++){
Instances[] data=InstanceTools.resampleTrainAndTestInstances(train, test, i);
c.buildClassifier(data[0]);
accs[i]= ClassifierTools.accuracy(data[1], c);
if (i==0)
System.out.print(accs[i]);
else
System.out.print("," + accs[i]);
}
double mean = 0;
for(int i=0;i<resamples;i++)
mean += accs[i];
mean/=resamples;
System.out.println("\n\nBOSSEnsemble mean acc over " + resamples + " resamples: " + mean);
}
/**
* TEMPORARY WORK AROUND, CONSULT TODO: test cases may contain words not found anywhere
* in the train set, and therefore not found in the dictionary (i.e list of attributes
* in the instance/s). Currently words found in test instances that are not in the train set are
* IGNORED.
*
* Largely poor accuracy likely due to this
*
* @author James Large
*/
public static class BOSSC45Individual extends IndividualBOSS {
public boolean buildBags = true;
public boolean buildForest = true;
public Instances bagInsts;
public J48 tree = new J48();
private Attribute classAttribute;
public BOSSC45Individual(int wordLength, int alphabetSize, int windowSize, boolean normalise) {
super(wordLength, alphabetSize, windowSize, normalise);
}
public BOSSC45Individual(BOSSC45Individual boss, int wordLength) {
super(boss, wordLength);
classAttribute = boss.classAttribute;
}
public Instances bagsToInstances() {
//build attribute info, each bag may have different keys etc
//need to build common vector of all keys found in all bags
//this is the main source of memory problems, most bags will have many unique
//keys of value 1 due to noise, thus FULL keyset is much larger than
//each individuals bag's keyset
ArrayList<Attribute> attInfo = new ArrayList<>();
Set<String> wordsFound = new HashSet<>();
for (Bag bag : bags)
for (Entry<BitWordInt,Integer> entry : bag.entrySet())
wordsFound.add(entry.getKey().toString());
for (String word : wordsFound)
attInfo.add(new Attribute(word));
//classvals must not be numeric...
attInfo.add(classAttribute);
//atts found, now populate all values
Instances bagInsts = new Instances("", attInfo, bags.size());
bagInsts.setClassIndex(attInfo.size()-1);
int i = 0;
for (Bag bag : bags) {
//init all values to 0, + class value at the end
double[] init = new double[attInfo.size()];
init[init.length-1] = bag.getClassVal();
bagInsts.add(new DenseInstance(1, init));
for (Entry<BitWordInt,Integer> entry : bag.entrySet())
bagInsts.get(i).setValue(bagInsts.attribute(entry.getKey().toString()), entry.getValue());
i++;
}
return bagInsts;
}
@Override
public void buildClassifier(Instances data) throws Exception {
classAttribute = data.classAttribute();
if (buildBags)
super.buildClassifier(data);
if (buildForest)
buildFullForest();
//accomodates some speed ups when using cv in ensemble
}
public void buildFullForest() throws Exception {
//first pass, building normal bags (ArrayList<Bag>)
//then converting to instances for use in rotf
//TODO potentially build bags straight into instances form, (mem/very slight speed saving)
//however the mem savings are made anyway when clean() is called after training
//and would require a big overhaul
if (bagInsts == null)
bagInsts = bagsToInstances();
if (tree == null)
tree = new J48();
tree.buildClassifier(bagInsts);
}
public double findCVAcc(int numFolds) throws Exception {
if (bagInsts == null)
bagInsts = bagsToInstances();
if (tree == null)
tree = new J48();
return ClassifierTools.crossValidationWithStats(tree, bagInsts, numFolds)[0][0];
}
@Override
public BOSSC45Individual buildShortenedBags(int newWordLength) throws Exception {
if (newWordLength == wordLength) //case of first iteration of word length search in ensemble
return this;
if (newWordLength > wordLength)
throw new Exception("Cannot incrementally INCREASE word length, current:"+wordLength+", requested:"+newWordLength);
if (newWordLength < 2)
throw new Exception("Invalid wordlength requested, current:"+wordLength+", requested:"+newWordLength);
BOSSC45Individual newBoss = new BOSSC45Individual(this, newWordLength);
//build hists with new word length from SFA words, and copy over the class values of original insts
for (int i = 0; i < bags.size(); ++i) {
Bag newBag = createBagFromWords(newWordLength, SFAwords[i]);
newBag.setClassVal(bags.get(i).getClassVal());
newBoss.bags.add(newBag);
}
return newBoss;
}
@Override
public void clean() {
//null out things that are not needed after training to save memory
super.clean();
bags = null;
tree = null;
//bagInsts = null; //needed to build full forest if this makes it into the final ensemble
}
@Override
public double classifyInstance(Instance instance) throws Exception {
Bag testBag = BOSSTransform(instance);
//convert bag to instance
double[] init = new double[bagInsts.numAttributes()];
init[init.length-1] = testBag.getClassVal();
//TEMPORARILY create it on the end of the train insts to easily copy over the attribute data.
bagInsts.add(new DenseInstance(1, init));
for (Entry<BitWordInt,Integer> entry : testBag.entrySet()) {
Attribute att = bagInsts.attribute(entry.getKey().toString());
if (att != null)
bagInsts.get(bagInsts.size()-1).setValue(att, entry.getValue());
}
Instance testInst = bagInsts.remove(bagInsts.size()-1);
return tree.classifyInstance(testInst);
}
//hacky work around to allow for use of ClassifierTools.
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
Bag testBag = BOSSTransform(instance);
//convert bag to instance
double[] init = new double[bagInsts.numAttributes()];
init[init.length-1] = testBag.getClassVal();
//TEMPORARILY create it on the end of the train isnts to easily copy over the attribute data.
bagInsts.add(new DenseInstance(1, init));
for (Entry<BitWordInt,Integer> entry : testBag.entrySet()) {
Attribute att = bagInsts.attribute(entry.getKey().toString());
if (att != null)
bagInsts.get(bagInsts.numInstances()-1).setValue(att, entry.getValue());
}
Instance testInst = bagInsts.remove(bagInsts.size()-1);
return tree.distributionForInstance(testInst);
}
}
} | 31,320 | 39.155128 | 188 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/boss_variants/BOSSSpatialPyramids_BD.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based.boss_variants;
import fileIO.OutFile;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import tsml.classifiers.dictionary_based.bitword.BitWord;
import tsml.classifiers.dictionary_based.bitword.BitWordInt;
import utilities.InstanceTools;
import tsml.classifiers.SaveParameterInfo;
import weka.core.TechnicalInformation;
import utilities.generic_storage.ComparablePair;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map.Entry;
import utilities.ClassifierTools;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.classifiers.Classifier;
import experiments.data.DatasetLoading;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.EnhancedAbstractClassifier;
/**
* BOSSSpatialPyramids_BD classifier that force the use of boss distance. This is to be removed
* and the functionality to select distance functions (correctly..) incorporated into
* BOSSSpatialPyramids
*
* BOSSSpatialPyramids classifier with parameter search and ensembling, if parameters are known,
* use nested 'BOSSSpatialPyramidsIndividual' classifier and directly provide them.
*
* Intended use is with the default constructor, however can force the normalisation
* parameter to true/false by passing a boolean, e.g c = new BOSSSpatialPyramids(true)
*
* Params: normalise? (i.e should first fourier coefficient(mean value) be discarded)
* Alphabetsize fixed to four
*
* @author James Large
*
* Base algorithm information found in BOSS.java
* Spatial Pyramids based on the algorithm described in getTechnicalInformation()
*/
public class BOSSSpatialPyramids_BD extends EnhancedAbstractClassifier implements SaveParameterInfo {
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "Lazebnik, Svetlana and Schmid, Cordelia and Ponce, Jean");
result.setValue(TechnicalInformation.Field.TITLE, "Beyond bags of features: Spatial pyramid matching for recognizing natural scene categories");
result.setValue(TechnicalInformation.Field.BOOKTITLE, "Computer Vision and Pattern Recognition, 2006 IEEE Computer Society Conference on");
result.setValue(TechnicalInformation.Field.VOLUME, "2");
result.setValue(TechnicalInformation.Field.PAGES, "2169--2178");
result.setValue(TechnicalInformation.Field.YEAR, "2006");
return result;
}
private List<BOSSWindow> classifiers;
private final double correctThreshold = 0.92;
// private int maxEnsembleSize = Integer.MAX_VALUE;
private int maxEnsembleSize = 100;
private final Integer[] wordLengths = { 16, 14, 12, 10, 8 };
private final Integer[] levels = { 1, 2, 3 };
private final int alphabetSize = 4;
public enum SerialiseOptions {
//dont do any seriealising, run as normal
NONE,
//serialise the final boss classifiers which made it into ensemble (does not serialise the entire BOSSEnsembleSP_Redo object)
//slight runtime cost
STORE,
//serialise the final boss classifiers, and delete from main memory. reload each from ser file when needed in classification.
//the most memory used at any one time is therefore ~2 individual boss classifiers during training.
//massive runtime cost, order of magnitude
STORE_LOAD
};
private SerialiseOptions serOption = SerialiseOptions.NONE;
private static String serFileLoc = "BOSSWindowSers\\";
private boolean[] normOptions;
/**
* Providing a particular value for normalisation will force that option, if
* whether to normalise should be a parameter to be searched, use default constructor
*
* @param normalise whether or not to normalise by dropping the first Fourier coefficient
*/
public BOSSSpatialPyramids_BD(boolean normalise) {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
normOptions = new boolean[] { normalise };
}
/**
* During buildClassifier(...), will search through normalisation as well as
* window size and word length if no particular normalisation option is provided
*/
public BOSSSpatialPyramids_BD() {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
normOptions = new boolean[] { true, false };
}
public static class BOSSWindow implements Comparable<BOSSWindow>, Serializable {
private BOSSSpatialPyramidsIndividual classifier;
public double accuracy;
public String filename;
private static final long serialVersionUID = 2L;
public BOSSWindow(String filename) {
this.filename = filename;
}
public BOSSWindow(BOSSSpatialPyramidsIndividual classifer, double accuracy, String dataset) {
this.classifier = classifer;
this.accuracy = accuracy;
buildFileName(dataset);
}
public double classifyInstance(Instance inst) throws Exception {
return classifier.classifyInstance(inst);
}
public double classifyInstance(int test) throws Exception {
return classifier.classifyInstance(test);
}
private void buildFileName(String dataset) {
filename = serFileLoc + dataset + "_" + classifier.windowSize + "_" + classifier.wordLength + "_" + classifier.alphabetSize + "_" + classifier.norm + ".ser";
}
public boolean storeAndClearClassifier() {
try {
ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));
out.writeObject(this);
out.close();
clearClassifier();
return true;
}catch(IOException e) {
System.out.print("Error serialiszing to " + filename);
e.printStackTrace();
return false;
}
}
public boolean store() {
try {
ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));
out.writeObject(this);
out.close();
return true;
}catch(IOException e) {
System.out.print("Error serialiszing to " + filename);
e.printStackTrace();
return false;
}
}
public void clearClassifier() {
classifier = null;
}
public boolean load() {
BOSSWindow bw = null;
try {
ObjectInputStream in = new ObjectInputStream(new FileInputStream(filename));
bw = (BOSSWindow) in.readObject();
in.close();
this.accuracy = bw.accuracy;
this.classifier = bw.classifier;
return true;
}catch(IOException i) {
System.out.print("Error deserialiszing from " + filename);
i.printStackTrace();
return false;
}catch(ClassNotFoundException c) {
System.out.println("BOSSWindow class not found");
c.printStackTrace();
return false;
}
}
public boolean deleteSerFile() {
try {
File f = new File(filename);
return f.delete();
} catch(SecurityException s) {
System.out.println("Unable to delete, access denied: " + filename);
s.printStackTrace();
return false;
}
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize }
*/
public int[] getParameters() { return classifier.getParameters(); }
public int getWindowSize() { return classifier.getWindowSize(); }
public int getWordLength() { return classifier.getWordLength(); }
public int getAlphabetSize() { return classifier.getAlphabetSize(); }
public boolean isNorm() { return classifier.isNorm(); }
public double getLevelWeighting() { return classifier.getLevelWeighting(); }
public int getLevels() { return classifier.getLevels(); }
@Override
public int compareTo(BOSSWindow other) {
if (this.accuracy > other.accuracy)
return 1;
if (this.accuracy == other.accuracy)
return 0;
return -1;
}
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
BOSSWindow first = classifiers.get(0);
sb.append("windowSize=").append(first.getWindowSize()).append("/wordLength=").append(first.getWordLength());
sb.append("/alphabetSize=").append(first.getAlphabetSize()).append("/norm=").append(first.isNorm());
for (int i = 1; i < classifiers.size(); ++i) {
BOSSWindow boss = classifiers.get(i);
sb.append(",windowSize=").append(boss.getWindowSize()).append("/wordLength=").append(boss.getWordLength());
sb.append("/alphabetSize=").append(boss.getAlphabetSize()).append("/norm=").append(boss.isNorm());
}
return sb.toString();
}
@Override
public int setNumberOfFolds(Instances data){
return data.numInstances();
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize } for each BOSSWindow in this *built* classifier
*/
public int[][] getParametersValues() {
int[][] params = new int[classifiers.size()][];
int i = 0;
for (BOSSWindow boss : classifiers)
params[i++] = boss.getParameters();
return params;
}
public void setSerOption(SerialiseOptions option) {
serOption = option;
}
public void setSerFileLoc(String path) {
serFileLoc = path;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSSEnsembleSP_BuildClassifier: Class attribute not set as last attribute in dataset");
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD) {
DateFormat dateFormat = new SimpleDateFormat("yyyyMMddHHmmss");
Date date = new Date();
serFileLoc += data.relationName() + "_" + dateFormat.format(date) + "\\";
File f = new File(serFileLoc);
if (!f.isDirectory())
f.mkdirs();
}
classifiers = new LinkedList<BOSSWindow>();
int numSeries = data.numInstances();
int seriesLength = data.numAttributes()-1; //minus class attribute
int minWindow = 10;
int maxWindow = seriesLength;
//int winInc = 1; //check every window size in range
// //whats the max number of window sizes that should be searched through
//double maxWindowSearches = Math.min(200, Math.sqrt(seriesLength));
double maxWindowSearches = seriesLength/4.0;
int winInc = (int)((maxWindow - minWindow) / maxWindowSearches);
if (winInc < 1) winInc = 1;
//keep track of current max window size accuracy, constantly check for correctthreshold to discard to save space
double maxAcc = -1.0;
//the acc of the worst member to make it into the final ensemble as it stands
double minMaxAcc = -1.0;
for (boolean normalise : normOptions) {
for (int winSize = minWindow; winSize <= maxWindow; winSize += winInc) {
BOSSSpatialPyramidsIndividual boss = new BOSSSpatialPyramidsIndividual(wordLengths[0], alphabetSize, winSize, normalise, levels[0]); //1 level, find best 'normal' boss classifier
boss.buildClassifier(data); //initial setup for this windowsize, with max word length
BOSSSpatialPyramidsIndividual bestClassifierForWinSize = null;
double bestAccForWinSize = -1.0;
//find best word length for this window size
for (Integer wordLen : wordLengths) {
boss = boss.buildShortenedSPBags(wordLen); //in first iteration, same lengths (wordLengths[0]), will do nothing
int correct = 0;
for (int i = 0; i < numSeries; ++i) {
double c = boss.classifyInstance(i); //classify series i, while ignoring its corresponding histogram i
if (c == data.get(i).classValue())
++correct;
}
double acc = (double)correct/(double)numSeries;
if (acc >= bestAccForWinSize) {
bestAccForWinSize = acc;
bestClassifierForWinSize = boss;
}
}
//best 'normal' boss classifier found, now find the best number of levels
//effectively determining whether the feature this member/classifier specialises in is
//local or global
int bestLevels = bestClassifierForWinSize.getLevels();
for (int l = 1; l < levels.length; ++l) { //skip first, already found acc for it before
bestClassifierForWinSize.changeNumLevels(levels[l]);
int correct = 0;
for (int i = 0; i < numSeries; ++i) {
double c = bestClassifierForWinSize.classifyInstance(i); //classify series i, while ignoring its corresponding histogram i
if (c == data.get(i).classValue())
++correct;
}
double acc = (double)correct/(double)numSeries;
if (acc > bestAccForWinSize) { //only store if >, not >= (favours lower levels = less space)
bestAccForWinSize = acc;
bestLevels = levels[l];
}
}
if (makesItIntoEnsemble(bestAccForWinSize, maxAcc, minMaxAcc, classifiers.size())) {
bestClassifierForWinSize.changeNumLevels(bestLevels);
BOSSWindow bw = new BOSSWindow(bestClassifierForWinSize, bestAccForWinSize, data.relationName());
bw.classifier.clean();
if (serOption == SerialiseOptions.STORE)
bw.store();
else if (serOption == SerialiseOptions.STORE_LOAD)
bw.storeAndClearClassifier();
classifiers.add(bw);
if (bestAccForWinSize > maxAcc) {
maxAcc = bestAccForWinSize;
//get rid of any extras that dont fall within the final max threshold
Iterator<BOSSWindow> it = classifiers.iterator();
while (it.hasNext()) {
BOSSWindow b = it.next();
if (b.accuracy < maxAcc * correctThreshold) {
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD)
b.deleteSerFile();
it.remove();
}
}
}
while (classifiers.size() > maxEnsembleSize) {
//cull the 'worst of the best' until back under the max size
int minAccInd = (int)findMinEnsembleAcc()[0];
if (serOption == SerialiseOptions.STORE || serOption == SerialiseOptions.STORE_LOAD)
classifiers.get(minAccInd).deleteSerFile();
classifiers.remove(minAccInd);
}
minMaxAcc = findMinEnsembleAcc()[1]; //new 'worst of the best' acc
}
}
}
if (getEstimateOwnPerformance())
findEnsembleTrainAcc(data);
}
//[0] = index, [1] = acc
private double[] findMinEnsembleAcc() {
double minAcc = Double.MIN_VALUE;
int minAccInd = 0;
for (int i = 0; i < classifiers.size(); ++i) {
double curacc = classifiers.get(i).accuracy;
if (curacc < minAcc) {
minAcc = curacc;
minAccInd = i;
}
}
return new double[] { minAccInd, minAcc };
}
private boolean makesItIntoEnsemble(double acc, double maxAcc, double minMaxAcc, int curEnsembleSize) {
if (acc >= maxAcc * correctThreshold) {
if (curEnsembleSize >= maxEnsembleSize)
return acc > minMaxAcc;
else
return true;
}
return false;
}
private double findEnsembleTrainAcc(Instances data) throws Exception {
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setEstimatorName(getClassifierName());
trainResults.setDatasetName(data.relationName());
trainResults.setFoldID(seed);
trainResults.setSplit("train");
trainResults.setParas(getParameters());
double correct = 0;
for (int i = 0; i < data.numInstances(); ++i) {
long predTime = System.nanoTime();
//classify series i, while ignoring its corresponding histogram i
double[] probs = distributionForInstance(i, data.numClasses());
predTime = System.nanoTime() - predTime;
int maxClass = findIndexOfMax(probs, rand);
if (maxClass == data.get(i).classValue())
++correct;
trainResults.addPrediction(data.get(i).classValue(), probs, maxClass, predTime, "");
}
trainResults.finaliseResults();
double result = correct / data.numInstances();
return result;
}
/**
* Classify the train instance at index 'test', whilst ignoring the corresponding bags
* in each of the members of the ensemble, for use in CV of BOSSEnsembleSP_Redo
*/
public double classifyInstance(int test, int numclasses) throws Exception {
double[] dist = distributionForInstance(test, numclasses);
return findIndexOfMax(dist, rand);
}
public double[] distributionForInstance(int test, int numclasses) throws Exception {
double[] classHist = new double[numclasses];
//get votes from all windows
double sum = 0;
for (BOSSWindow classifier : classifiers) {
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.load();
double classification = classifier.classifyInstance(test);
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.clearClassifier();
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
return findIndexOfMax(dist, rand);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
double[] classHist = new double[instance.numClasses()];
double sum = 0;
//get votes from all windows
for (BOSSWindow classifier : classifiers) {
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.load();
double classification = classifier.classifyInstance(instance);
if (serOption == SerialiseOptions.STORE_LOAD)
classifier.clearClassifier();
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void bossspImageMakingCode() throws Exception {
int levels = 3;
int quadrants = (int)(Math.pow(2, levels)) - 1;
BOSSSpatialPyramids_BD boss = new BOSSSpatialPyramids_BD(false);
// String dset = "Lightning7";
String dset = "BeetleFly";
Instances train = DatasetLoading.loadDataNullable(dset+"_TRAIN.arff");
boss.buildClassifier(train);
//finding an example bag with 3 levels
BOSSSpatialPyramidsIndividual.SPBag bag = null;
for (BOSSWindow bw : boss.classifiers)
if (bw.getLevels() == levels)
bag = bw.classifier.bags.get(0);
if (bag == null)
throw new Exception("fuck");
//separating out the levels
BOSSSpatialPyramidsIndividual.SPBag[] bagquadrants = new BOSSSpatialPyramidsIndividual.SPBag[quadrants];
for (int i =0; i < quadrants; ++i) {
bagquadrants[i] = (BOSSSpatialPyramidsIndividual.SPBag) bag.clone();
Iterator<Entry<ComparablePair<BitWord, Integer>, Double>> iter = bagquadrants[i].entrySet().iterator();
while (iter.hasNext()) {
Entry<ComparablePair<BitWord, Integer>, Double> entry = iter.next();
if (entry.getKey().var2 != i)
iter.remove();
}
}
//building the dense histograms back up... holy shit
double[][] hists = new double[quadrants][bagquadrants[0].size()];
int wordindex = 0;
for (Entry<ComparablePair<BitWord, Integer>, Double> level0entry : bagquadrants[0].entrySet()) {
hists[0][wordindex] = level0entry.getValue();
for (int i =1; i < quadrants; ++i)
for (Entry<ComparablePair<BitWord, Integer>, Double> entry : bagquadrants[i].entrySet())
if (entry.getKey().var1 == level0entry.getKey().var1)
hists[i][wordindex] = entry.getValue();
wordindex++;
}
//filewriting
OutFile out = new OutFile(dset+"hists.csv");
// headers
for (Entry<ComparablePair<BitWord, Integer>, Double> entry : bag.entrySet())
if (entry.getKey().var2 == 0) //lowest level
out.writeString("," + ((BitWordInt)entry.getKey().var1).buildString());
out.writeLine("");
// values
for (double[] hist : hists) {
for (double d : hist)
out.writeString("," + d);
out.writeLine("");
out.writeLine("");
out.writeLine("");
}
out.closeFile();
}
public static void main(String[] args) throws Exception{
// //Minimum working example
// String dataset = "ItalyPowerDemand";
//// Instances train = ClassifierTools.loadDataThrowable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TRAIN.arff");
//// Instances test = ClassifierTools.loadDataThrowable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TEST.arff");
////
//// Classifier c = new BOSSSpatialPyramids_BD();
//// c.buildClassifier(train);
//// double accuracy = ClassifierTools.accuracy(test, c);
////
//// System.out.println("BOSSEnsembleSP accuracy on " + dataset + " fold 0 = " + accuracy);
//
// //Other examples/tests
//// detailedFold0Test(dataset);
// resampleTest(dataset, 25);
bossspImageMakingCode();
}
public static void detailedFold0Test(String dset) {
System.out.println("BOSSEnsembleSPDetailedTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
System.out.println(train.relationName());
BOSSSpatialPyramids_BD boss = new BOSSSpatialPyramids_BD();
//TRAINING
System.out.println("Training starting");
long start = System.nanoTime();
boss.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
//RESULTS OF TRAINING
System.out.println("Ensemble Size: " + boss.classifiers.size());
System.out.println("Param sets: ");
int[][] params = boss.getParametersValues();
for (int i = 0; i < params.length; ++i)
System.out.println(i + ": " + params[i][0] + " " + params[i][1] + " " + params[i][2] + " " + boss.classifiers.get(i).isNorm() + " " + boss.classifiers.get(i).getLevels() + " " + boss.classifiers.get(i).accuracy);
//TESTING
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, boss);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
public static void resampleTest(String dset, int resamples) throws Exception {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
Classifier c = new BOSSSpatialPyramids_BD();
//c.setCVPath("C:\\tempproject\\BOSSEnsembleCVtest.csv");
double [] accs = new double[resamples];
for(int i=0;i<resamples;i++){
Instances[] data=InstanceTools.resampleTrainAndTestInstances(train, test, i);
c.buildClassifier(data[0]);
accs[i]= ClassifierTools.accuracy(data[1], c);
if (i==0)
System.out.print(accs[i]);
else
System.out.print("," + accs[i]);
}
double mean = 0;
for(int i=0;i<resamples;i++)
mean += accs[i];
mean/=resamples;
System.out.println("\n\nBOSSEnsembleSP mean acc over " + resamples + " resamples: " + mean);
}
/**
* BOSSSpatialPyramidsIndividual classifier to be used with known parameters,
* for boss with parameter search, use BOSSSpatialPyramids.
*
* Params: wordLength, alphabetSize, windowLength, normalise?
*
* @author James Large.
*/
public static class BOSSSpatialPyramidsIndividual implements Classifier, Serializable {
protected BitWordInt[][] SFAwords; //all sfa words found in original buildClassifier(), no numerosity reduction/shortening applied
public ArrayList<SPBag> bags; //histograms of words of the current wordlength with numerosity reduction applied (if selected)
protected double[/*letterindex*/][/*breakpointsforletter*/] breakpoints;
protected double inverseSqrtWindowSize;
protected int windowSize;
protected int wordLength;
protected int alphabetSize;
protected boolean norm;
protected int levels = 0;
protected double levelWeighting = 0.5;
protected int seriesLength;
protected boolean numerosityReduction = true;
protected static final long serialVersionUID = 1L;
public BOSSSpatialPyramidsIndividual(int wordLength, int alphabetSize, int windowSize, boolean normalise, int levels) {
this.wordLength = wordLength;
this.alphabetSize = alphabetSize;
this.windowSize = windowSize;
this.inverseSqrtWindowSize = 1.0 / Math.sqrt(windowSize);
this.norm = normalise;
this.levels = levels;
}
/**
* Used when shortening histograms, copies 'meta' data over, but with shorter
* word length, actual shortening happens separately
*/
public BOSSSpatialPyramidsIndividual(BOSSSpatialPyramidsIndividual boss, int wordLength) {
this.wordLength = wordLength;
this.windowSize = boss.windowSize;
this.inverseSqrtWindowSize = boss.inverseSqrtWindowSize;
this.alphabetSize = boss.alphabetSize;
this.norm = boss.norm;
this.numerosityReduction = boss.numerosityReduction;
//this.alphabet = boss.alphabet;
this.SFAwords = boss.SFAwords;
this.breakpoints = boss.breakpoints;
this.levelWeighting = boss.levelWeighting;
this.levels = boss.levels;
this.seriesLength = boss.seriesLength;
bags = new ArrayList<>(boss.bags.size());
}
//map of <word, level> => count
public static class SPBag extends HashMap<ComparablePair<BitWord, Integer>, Double> {
double classVal;
public SPBag() {
super();
}
public SPBag(int classValue) {
super();
classVal = classValue;
}
public double getClassVal() { return classVal; }
public void setClassVal(double classVal) { this.classVal = classVal; }
}
public int getWindowSize() { return windowSize; }
public int getWordLength() { return wordLength; }
public int getAlphabetSize() { return alphabetSize; }
public boolean isNorm() { return norm; }
public int getLevels() { return levels; }
public double getLevelWeighting() { return levelWeighting; }
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize, normalise? }
*/
public int[] getParameters() {
return new int[] { wordLength, alphabetSize, windowSize };
}
public void clean() {
SFAwords = null;
}
protected double[][] slidingWindow(double[] data) {
int numWindows = data.length-windowSize+1;
double[][] subSequences = new double[numWindows][windowSize];
for (int windowStart = 0; windowStart < numWindows; ++windowStart) {
//copy the elements windowStart to windowStart+windowSize from data into
//the subsequence matrix at row windowStart
System.arraycopy(data,windowStart,subSequences[windowStart],0,windowSize);
}
return subSequences;
}
protected double[][] performDFT(double[][] windows) {
double[][] dfts = new double[windows.length][wordLength];
for (int i = 0; i < windows.length; ++i) {
dfts[i] = DFT(windows[i]);
}
return dfts;
}
protected double stdDev(double[] series) {
double sum = 0.0;
double squareSum = 0.0;
for (int i = 0; i < windowSize; i++) {
sum += series[i];
squareSum += series[i]*series[i];
}
double mean = sum / series.length;
double variance = squareSum / series.length - mean*mean;
return variance > 0 ? Math.sqrt(variance) : 1.0;
}
/**
* Performs DFT but calculates only wordLength/2 coefficients instead of the
* full transform, and skips the first coefficient if it is to be normalised
*
* @return double[] size wordLength, { real1, imag1, ... realwl/2, imagwl/2 }
*/
protected double[] DFT(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
//normalize the disjoint windows and sliding windows by dividing them by their standard deviation
//all Fourier coefficients are divided by sqrt(windowSize)
double normalisingFactor = inverseSqrtWindowSize / stdDev(series);
double[] dft=new double[outputLength*2];
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(2*Math.PI * t * k / n);
sumimag += -series[t]*Math.sin(2*Math.PI * t * k / n);
}
dft[(k-start)*2] = sumreal * normalisingFactor;
dft[(k-start)*2+1] = sumimag * normalisingFactor;
}
return dft;
}
private double[] DFTunnormed(double[] series) {
//taken from FFT.java but
//return just a double[] size n, { real1, imag1, ... realn/2, imagn/2 }
//instead of Complex[] size n/2
int n=series.length;
int outputLength = wordLength/2;
int start = (norm ? 1 : 0);
//normalize the disjoint windows and sliding windows by dividing them by their standard deviation
//all Fourier coefficients are divided by sqrt(windowSize)
double[] dft = new double[outputLength*2];
double twoPi = 2*Math.PI / n;
for (int k = start; k < start + outputLength; k++) { // For each output element
float sumreal = 0;
float sumimag = 0;
for (int t = 0; t < n; t++) { // For each input element
sumreal += series[t]*Math.cos(twoPi * t * k);
sumimag += -series[t]*Math.sin(twoPi * t * k);
}
dft[(k-start)*2] = sumreal;
dft[(k-start)*2+1] = sumimag;
}
return dft;
}
private double[] normalizeDFT(double[] dft, double std) {
double normalisingFactor = (std > 0? 1.0 / std : 1.0) * inverseSqrtWindowSize;
for (int i = 0; i < dft.length; i++) {
dft[i] *= normalisingFactor;
}
return dft;
}
private double[][] performMFT(double[] series) {
// ignore DC value?
int startOffset = norm ? 2 : 0;
int l = wordLength;
l = l + l % 2; // make it even
double[] phis = new double[l];
for (int u = 0; u < phis.length; u += 2) {
double uHalve = -(u + startOffset) / 2;
phis[u] = realephi(uHalve, windowSize);
phis[u + 1] = complexephi(uHalve, windowSize);
}
// means and stddev for each sliding window
int end = Math.max(1, series.length - windowSize + 1);
double[] means = new double[end];
double[] stds = new double[end];
calcIncreamentalMeanStddev(windowSize, series, means, stds);
// holds the DFT of each sliding window
double[][] transformed = new double[end][];
double[] mftData = null;
for (int t = 0; t < end; t++) {
// use the MFT
if (t > 0) {
for (int k = 0; k < l; k += 2) {
double real1 = (mftData[k] + series[t + windowSize - 1] - series[t - 1]);
double imag1 = (mftData[k + 1]);
double real = complexMulReal(real1, imag1, phis[k], phis[k + 1]);
double imag = complexMulImag(real1, imag1, phis[k], phis[k + 1]);
mftData[k] = real;
mftData[k + 1] = imag;
}
} // use the DFT for the first offset
else {
mftData = Arrays.copyOf(series, windowSize);
mftData = DFTunnormed(mftData);
}
// normalization for lower bounding
transformed[t] = normalizeDFT(Arrays.copyOf(mftData, l), stds[t]);
}
return transformed;
}
private void calcIncreamentalMeanStddev(int windowLength, double[] series, double[] means, double[] stds) {
double sum = 0;
double squareSum = 0;
// it is faster to multiply than to divide
double rWindowLength = 1.0 / (double) windowLength;
double[] tsData = series;
for (int ww = 0; ww < windowLength; ww++) {
sum += tsData[ww];
squareSum += tsData[ww] * tsData[ww];
}
means[0] = sum * rWindowLength;
double buf = squareSum * rWindowLength - means[0] * means[0];
stds[0] = buf > 0 ? Math.sqrt(buf) : 0;
for (int w = 1, end = tsData.length - windowLength + 1; w < end; w++) {
sum += tsData[w + windowLength - 1] - tsData[w - 1];
means[w] = sum * rWindowLength;
squareSum += tsData[w + windowLength - 1] * tsData[w + windowLength - 1] - tsData[w - 1] * tsData[w - 1];
buf = squareSum * rWindowLength - means[w] * means[w];
stds[w] = buf > 0 ? Math.sqrt(buf) : 0;
}
}
private static double complexMulReal(double r1, double im1, double r2, double im2) {
return r1 * r2 - im1 * im2;
}
private static double complexMulImag(double r1, double im1, double r2, double im2) {
return r1 * im2 + r2 * im1;
}
private static double realephi(double u, double M) {
return Math.cos(2 * Math.PI * u / M);
}
private static double complexephi(double u, double M) {
return -Math.sin(2 * Math.PI * u / M);
}
protected double[][] disjointWindows(double [] data) {
int amount = (int)Math.ceil(data.length/(double)windowSize);
double[][] subSequences = new double[amount][windowSize];
for (int win = 0; win < amount; ++win) {
int offset = Math.min(win*windowSize, data.length-windowSize);
//copy the elements windowStart to windowStart+windowSize from data into
//the subsequence matrix at position windowStart
System.arraycopy(data,offset,subSequences[win],0,windowSize);
}
return subSequences;
}
protected double[][] MCB(Instances data) {
double[][][] dfts = new double[data.numInstances()][][];
int sample = 0;
for (Instance inst : data) {
dfts[sample++] = performDFT(disjointWindows(toArrayNoClass(inst))); //approximation
}
int numInsts = dfts.length;
int numWindowsPerInst = dfts[0].length;
int totalNumWindows = numInsts*numWindowsPerInst;
breakpoints = new double[wordLength][alphabetSize];
for (int letter = 0; letter < wordLength; ++letter) { //for each dft coeff
//extract this column from all windows in all instances
double[] column = new double[totalNumWindows];
for (int inst = 0; inst < numInsts; ++inst)
for (int window = 0; window < numWindowsPerInst; ++window) {
//rounding dft coefficients to reduce noise
column[(inst * numWindowsPerInst) + window] = Math.round(dfts[inst][window][letter]*100.0)/100.0;
}
//sort, and run through to find breakpoints for equi-depth bins
Arrays.sort(column);
double binIndex = 0;
double targetBinDepth = (double)totalNumWindows / (double)alphabetSize;
for (int bp = 0; bp < alphabetSize-1; ++bp) {
binIndex += targetBinDepth;
breakpoints[letter][bp] = column[(int)binIndex];
}
breakpoints[letter][alphabetSize-1] = Double.MAX_VALUE; //last one can always = infinity
}
return breakpoints;
}
/**
* Builds a brand new boss bag from the passed fourier transformed data, rather than from
* looking up existing transforms from earlier builds.
*
* to be used e.g to transform new test instances
*/
protected SPBag createSPBagSingle(double[][] dfts) {
SPBag bag = new SPBag();
BitWordInt lastWord = new BitWordInt();
int wInd = 0;
int trivialMatchCount = 0;
for (double[] d : dfts) {
BitWordInt word = createWord(d);
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position of the elongated pattern to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
applyPyramidWeights(bag);
return bag;
}
protected BitWordInt createWord(double[] dft) {
BitWordInt word = new BitWordInt();
for (int l = 0; l < wordLength; ++l) {//for each letter
for (int bp = 0; bp < alphabetSize; ++bp) {//run through breakpoints until right one found
if (dft[l] <= breakpoints[l][bp]) {
word.push(bp); //add corresponding letter to word
break;
}
}
}
return word;
}
/**
* Assumes class index, if present, is last
* @return data of passed instance in a double array with the class value removed if present
*/
protected static double[] toArrayNoClass(Instance inst) {
int length = inst.numAttributes();
if (inst.classIndex() >= 0)
--length;
double[] data = new double[length];
for (int i=0, j=0; i < inst.numAttributes(); ++i)
if (inst.classIndex() != i)
data[j++] = inst.value(i);
return data;
}
/**
* @return BOSSSpatialPyramidsTransform-ed bag, built using current parameters
*/
public SPBag BOSSSpatialPyramidsTransform(Instance inst) {
double[][] mfts = performMFT(toArrayNoClass(inst)); //approximation
SPBag bag2 = createSPBagSingle(mfts); //discretisation/bagging
bag2.setClassVal(inst.classValue());
return bag2;
}
/**
* Shortens all bags in this BOSSSpatialPyramids_Redo instance (histograms) to the newWordLength, if wordlengths
* are same, instance is UNCHANGED
*
* @param newWordLength wordLength to shorten it to
* @return new boss classifier with newWordLength, or passed in classifier if wordlengths are same
*/
public BOSSSpatialPyramidsIndividual buildShortenedSPBags(int newWordLength) throws Exception {
if (newWordLength == wordLength) //case of first iteration of word length search in ensemble
return this;
if (newWordLength > wordLength)
throw new Exception("Cannot incrementally INCREASE word length, current:"+wordLength+", requested:"+newWordLength);
if (newWordLength < 2)
throw new Exception("Invalid wordlength requested, current:"+wordLength+", requested:"+newWordLength);
BOSSSpatialPyramidsIndividual newBoss = new BOSSSpatialPyramidsIndividual(this, newWordLength);
//build hists with new word length from SFA words, and copy over the class values of original insts
for (int i = 0; i < bags.size(); ++i) {
SPBag newSPBag = createSPBagFromWords(newWordLength, SFAwords[i], true);
newSPBag.setClassVal(bags.get(i).getClassVal());
newBoss.bags.add(newSPBag);
}
return newBoss;
}
protected SPBag shortenSPBag(int newWordLength, int bagIndex) {
SPBag newSPBag = new SPBag();
for (BitWordInt word : SFAwords[bagIndex]) {
BitWordInt shortWord = new BitWordInt(word);
shortWord.shortenByFourierCoefficient();
Double val = newSPBag.get(shortWord);
if (val == null)
val = 0.0;
newSPBag.put(new ComparablePair<BitWord, Integer>(shortWord, 0), val + 1.0);
}
return newSPBag;
}
/**
* Builds a bag from the set of words for a pre-transformed series of a given wordlength.
* @param wordLengthSearching if true, length of each SFAwords word assumed to be 16,
* and need to shorten it to whatever actual value needed in this particular version of the
* classifier. if false, this is a standalone classifier with pre-defined wordlength (etc),
* and therefore sfawords are that particular length already, no need to shorten
*/
protected SPBag createSPBagFromWords(int thisWordLength, BitWordInt[] words, boolean wordLengthSearching) {
SPBag bag = new SPBag();
BitWordInt lastWord = new BitWordInt();
int wInd = 0;
int trivialMatchCount = 0; //keeps track of how many words have been the same so far
for (BitWordInt w : words) {
BitWordInt word = new BitWordInt(w);
if (wordLengthSearching)
word.shorten(16-thisWordLength); //TODO hack, word.length=16=maxwordlength, wordLength of 'this' BOSSSpatialPyramids instance unreliable, length of SFAwords = maxlength
//add to bag, unless num reduction applies
if (numerosityReduction && word.equals(lastWord)) {
++trivialMatchCount;
++wInd;
}
else {
//if a run of equivalent words, those words essentially representing the same
//elongated pattern. still apply numerosity reduction, however use the central
//time position to represent its position
addWordToPyramid(word, wInd - (trivialMatchCount/2), bag);
lastWord = word;
trivialMatchCount = 0;
++wInd;
}
}
applyPyramidWeights(bag);
return bag;
}
protected void changeNumLevels(int newLevels) {
//curently, simply remaking bags from words
//alternatively: un-weight all bags, add(run through SFAwords again)/remove levels, re-weight all
if (newLevels == this.levels)
return;
this.levels = newLevels;
for (int inst = 0; inst < bags.size(); ++inst) {
SPBag bag = createSPBagFromWords(wordLength, SFAwords[inst], true); //rebuild bag
bag.setClassVal(bags.get(inst).classVal);
bags.set(inst, bag); //overwrite old
}
}
protected void applyPyramidWeights(SPBag bag) {
for (Entry<ComparablePair<BitWord, Integer>, Double> ent : bag.entrySet()) {
//find level that this quadrant is on
int quadrant = ent.getKey().var2;
int qEnd = 0;
int level = 0;
while (qEnd < quadrant) {
int numQuadrants = (int)Math.pow(2, ++level);
qEnd+=numQuadrants;
}
double val = ent.getValue() * (Math.pow(levelWeighting, levels-level-1)); //weighting ^ (levels - level)
bag.put(ent.getKey(), val);
}
}
protected void addWordToPyramid(BitWordInt word, int wInd, SPBag bag) {
int qStart = 0; //for this level, whats the start index for quadrants
//e.g level 0 = 0
// level 1 = 1
// level 2 = 3
for (int l = 0; l < levels; ++l) {
//need to do the cell finding thing in the regular grid
int numQuadrants = (int)Math.pow(2, l);
int quadrantSize = seriesLength / numQuadrants;
int pos = wInd + (windowSize/2); //use the middle of the window as its position
int quadrant = qStart + (pos/quadrantSize);
ComparablePair<BitWord, Integer> key = new ComparablePair<>(word, quadrant);
Double val = bag.get(key);
if (val == null)
val = 0.0;
bag.put(key, ++val);
qStart += numQuadrants;
}
}
protected BitWordInt[] createSFAwords(Instance inst) throws Exception {
double[][] dfts2 = performMFT(toArrayNoClass(inst)); //approximation
BitWordInt[] words2 = new BitWordInt[dfts2.length];
for (int window = 0; window < dfts2.length; ++window)
words2[window] = createWord(dfts2[window]);//discretisation
return words2;
}
@Override
public void buildClassifier(Instances data) throws Exception {
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSSSpatialPyramids_BuildClassifier: Class attribute not set as last attribute in dataset");
seriesLength = data.numAttributes()-1;
breakpoints = MCB(data); //breakpoints to be used for making sfa words for train AND test data
SFAwords = new BitWordInt[data.numInstances()][];
bags = new ArrayList<>(data.numInstances());
for (int inst = 0; inst < data.numInstances(); ++inst) {
SFAwords[inst] = createSFAwords(data.get(inst));
SPBag bag = createSPBagFromWords(wordLength, SFAwords[inst], false);
bag.setClassVal(data.get(inst).classValue());
bags.add(bag);
}
}
/**
* Computes BOSSSpatialPyramids distance between two bags d(test, train), is NON-SYMETRIC operation, ie d(a,b) != d(b,a)
* @return distance FROM instA TO instB
*/
public double BOSSSpatialPyramidsDistance(SPBag instA, SPBag instB) {
double dist = 0.0;
//find dist only from values in instA
for (Entry<ComparablePair<BitWord, Integer>, Double> entry : instA.entrySet()) {
Double valA = entry.getValue();
Double valB = instB.get(entry.getKey());
if (valB == null)
valB = 0.0;
dist += (valA-valB)*(valA-valB);
}
return dist;
}
/**
* Computes BOSSSpatialPyramids distance between two bags d(test, train), is NON-SYMETRIC operation, ie d(a,b) != d(b,a).
*
* Quits early if the dist-so-far is greater than bestDist (assumed is in fact the dist still squared), and returns Double.MAX_VALUE
*
* @return distance FROM instA TO instB, or Double.MAX_VALUE if it would be greater than bestDist
*/
public double BOSSSpatialPyramidsDistance(SPBag instA, SPBag instB, double bestDist) {
double dist = 0.0;
//find dist only from values in instA
for (Entry<ComparablePair<BitWord, Integer>, Double> entry : instA.entrySet()) {
Double valA = entry.getValue();
Double valB = instB.get(entry.getKey());
if (valB == null)
valB = 0.0;
dist += (valA-valB)*(valA-valB);
if (dist > bestDist)
return Double.MAX_VALUE;
}
return dist;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
SPBag testSPBag = BOSSSpatialPyramidsTransform(instance);
double bestDist = Double.MAX_VALUE;
double nn = -1.0;
for (int i = 0; i < bags.size(); ++i) {
double dist = BOSSSpatialPyramidsDistance(testSPBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
/**
* Used within BOSSSpatialPyramidsEnsemble as part of a leave-one-out crossvalidation, to skip having to rebuild
* the classifier every time (since the n histograms would be identical each time anyway), therefore this classifies
* the instance at the index passed while ignoring its own corresponding histogram
*
* @param test index of instance to classify
* @return classification
*/
public double classifyInstance(int test) {
double bestDist = Double.MAX_VALUE;
double nn = -1.0;
SPBag testSPBag = bags.get(test);
for (int i = 0; i < bags.size(); ++i) {
if (i == test) //skip 'this' one, leave-one-out
continue;
double dist = BOSSSpatialPyramidsDistance(testSPBag, bags.get(i), bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags.get(i).getClassVal();
}
}
return nn;
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void detailedFold0Test(String dset) {
System.out.println("BOSSSpatialPyramidsIndividual DetailedTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
System.out.println(train.relationName());
int windowSize = 10;
int alphabetSize = 4;
int wordLength = 58;
int levels = 2;
boolean norm = true;
BOSSSpatialPyramidsIndividual boss = new BOSSSpatialPyramidsIndividual(windowSize, alphabetSize, wordLength, norm, levels);
System.out.println(boss.getWordLength() + " " + boss.getAlphabetSize() + " " + boss.getWindowSize() + " " + boss.isNorm());
System.out.println("Training starting");
long start = System.nanoTime();
boss.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
System.out.println("Breakpoints: ");
for (int i = 0; i < boss.breakpoints.length; i++) {
System.out.print("Letter " + i + ": ");
for (int j = 0; j < boss.breakpoints[i].length; j++) {
System.out.print(boss.breakpoints[i][j] + " ");
}
System.out.println("");
}
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, boss);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
}
} | 59,224 | 40.185675 | 228 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/dictionary_based/boss_variants/BoTSWEnsemble.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.dictionary_based.boss_variants;
import java.util.Iterator;
import java.util.LinkedList;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import utilities.ClassifierTools;
import utilities.InstanceTools;
import tsml.classifiers.SaveParameterInfo;
import utilities.Timer;
import weka.classifiers.Classifier;
import weka.classifiers.functions.LibSVM;
import experiments.data.DatasetLoading;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.EnhancedAbstractClassifier;
import weka.clusterers.SimpleKMeans;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.SelectedTag;
import weka.core.TechnicalInformation;
/**
* BoTSW classifier with parameter search and ensembling, if parameters are known,
* use 'BoTSW.java' classifier and directly provide them.
*
* Will use euclidean distance by default. Can use histogram intersection by calling
* setDistanceFunction(...) prior to the call to buildClassifier()
*
* If svm wanted, call setUseSVM(true). Precise SVM implementation/accuracy could not be recreated,
* likely due to differences in implementation between java/WEKA c++/CV, and likewise there is a
* small difference in kmeans, epsilon value ignored
*
* Structure:
* buildClassifier() contains ensemble building and training
* classifyInstance() classifies a single test instance via majority vote of all ensemble members
*
* in the nested class BoTSW:
* buildClassifier() trains a BoTSW classifier with a given parameter set
* classifyInstance() classifies a single test instance with a given parameter set
*
* @author James Large
*
* Implementation based on the algorithm described in getTechnicalInformation()
*/
public class BoTSWEnsemble extends EnhancedAbstractClassifier implements SaveParameterInfo {
public BoTSWEnsemble() {
super(CAN_ESTIMATE_OWN_PERFORMANCE);
}
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "Bailly, Adeline and Malinowski, Simon and Tavenard, Romain and Guyet, Thomas and Chapel, Laetitia");
result.setValue(TechnicalInformation.Field.TITLE, "Bag-of-Temporal-SIFT-Words for Time Series Classification");
result.setValue(TechnicalInformation.Field.JOURNAL, "ECML/PKDD Workshop on Advanced Analytics and Learning on Temporal Data");
result.setValue(TechnicalInformation.Field.YEAR, "2015");
return result;
}
private List<BoTSWWindow> classifiers;
private final double correctThreshold = 0.92;
private int maxEnsembleSize = Integer.MAX_VALUE;
private final Integer[] n_bRanges = { 4, 8, 12, 16, 20 };
private final Integer[] aRanges = { 4, 8 };
private final Integer[] kRanges = { 32, 64, 128, 256, 512, 1024 };
private final Integer[] csvmRanges = {1, 10, 100}; //not currently used, using 1NN
private BoTSW.DistFunction dist = BoTSW.DistFunction.EUCLIDEAN_DISTANCE;
private Instances train;
private double ensembleCvAcc = -1;
public static class BoTSWWindow implements Comparable<BoTSWWindow> {
private BoTSW classifier;
public double accuracy;
private static final long serialVersionUID = 2L;
public BoTSWWindow(BoTSW classifer, double accuracy, String dataset) {
this.classifier = classifer;
this.accuracy = accuracy;
}
public double classifyInstance(Instance inst) throws Exception {
return classifier.classifyInstance(inst);
}
public double classifyInstance(int test) throws Exception {
return classifier.classifyInstance(test);
}
public void clearClassifier() {
classifier = null;
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize }
*/
public String getParameters() { return classifier.getParameters(); }
public int[] getParametersValues() { return classifier.getParametersValues(); }
public int getNB() { return classifier.params.n_b; }
public int getA() { return classifier.params.a; }
public int getK() { return classifier.params.k; }
@Override
public int compareTo(BoTSWWindow other) {
if (this.accuracy > other.accuracy)
return 1;
if (this.accuracy == other.accuracy)
return 0;
return -1;
}
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
BoTSWWindow first = classifiers.get(0);
sb.append(first.getParameters());
for (int i = 1; i < classifiers.size(); ++i) {
BoTSWWindow botsw = classifiers.get(i);
sb.append(",").append(botsw.getParameters());
}
return sb.toString();
}
@Override
public int setNumberOfFolds(Instances data){
return data.numInstances();
}
/**
* @return { numIntervals(word length), alphabetSize, slidingWindowSize } for each BoTSWWindow in this *built* classifier
*/
public int[][] getParametersValues() {
int[][] params = new int[classifiers.size()][];
int i = 0;
for (BoTSWWindow botsw : classifiers)
params[i++] = botsw.getParametersValues();
return params;
}
public void setMaxEnsembleSize(int max) {
maxEnsembleSize = max;
}
public void setDistanceFunction(BoTSW.DistFunction dist) {
this.dist = dist;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
//Timer.PRINT = true; //timer will ignore print request by default, similar behaviour to NDEBUG
this.train=data;
if (data.classIndex() != data.numAttributes()-1)
throw new Exception("BOSSEnsemble_BuildClassifier: Class attribute not set as last attribute in dataset");
classifiers = new LinkedList<BoTSWWindow>();
int numSeries = data.numInstances();
//keep track of current max window size accuracy, constantly check for correctthreshold to discard to save space
double maxAcc = -1.0;
double minMaxAcc = -1.0; //the acc of the worst member to make it into the final ensemble as it stands
boolean firstBuild = true;
BoTSW.FeatureDiscoveryData[] fdData = null; //keypoint location and guassian of series data
for (Integer n_b : n_bRanges) {
for (Integer a : aRanges) {
if (n_b*a > data.numAttributes()-1)
continue; //series not long enough to provide suffient gradient data for these params
BoTSW botsw = new BoTSW(n_b, a, kRanges[0]);
botsw.setSearchingForK(true);
if (firstBuild) {
botsw.buildClassifier(data); //initial setup for these params
fdData = botsw.fdData;
firstBuild = false; //save the guassian and keypoint data for all series,
//these are same regardless of (the searched) parameters for a given dataset,
//so only compute once
}
else {
botsw.giveFeatureDiscoveryData(fdData);
botsw.buildClassifier(data);
}
//save the feature data (dependent on n_b and a) for reuse when searching for value of k
Instances featureData = new Instances(botsw.clusterData); //constructor creates fresh copy
boolean firstk = true;
for (Integer k : kRanges) {
if (firstk) //of this loop
firstk = false; //do nothing here, next loop go to the else
else {
botsw = new BoTSW(n_b, a, k);
botsw.setSearchingForK(true);
botsw.giveFeatureData(featureData);
botsw.buildClassifier(data);
//classifier now does not need to extract/describes features again
//simply clusters with new value of k
}
botsw.setDistanceFunction(dist);
int correct = 0;
for (int i = 0; i < numSeries; ++i) {
double c = botsw.classifyInstance(i); //classify series i, while ignoring its corresponding histogram i
if (c == data.get(i).classValue())
++correct;
}
double acc = (double)correct/(double)numSeries;
//if not within correct threshold of the current max, dont bother storing at all
if (makesItIntoEnsemble(acc, maxAcc, minMaxAcc, classifiers.size())) {
BoTSWWindow bw = new BoTSWWindow(botsw, acc, data.relationName());
//bw.classifier.clean();
classifiers.add(bw);
if (acc > maxAcc) {
maxAcc = acc;
//get rid of any extras that dont fall within the new max threshold
Iterator<BoTSWWindow> it = classifiers.iterator();
while (it.hasNext()) {
BoTSWWindow b = it.next();
if (b.accuracy < maxAcc * correctThreshold)
it.remove();
}
}
while (classifiers.size() > maxEnsembleSize) {
//cull the 'worst of the best' until back under the max size
int minAccInd = (int)findMinEnsembleAcc()[0];
classifiers.remove(minAccInd);
}
minMaxAcc = findMinEnsembleAcc()[1]; //new 'worst of the best' acc
}
}
}
}
if (getEstimateOwnPerformance())
ensembleCvAcc = findEnsembleTrainAcc(data);
}
//[0] = index, [1] = acc
private double[] findMinEnsembleAcc() {
double minAcc = Double.MIN_VALUE;
int minAccInd = 0;
for (int i = 0; i < classifiers.size(); ++i) {
double curacc = classifiers.get(i).accuracy;
if (curacc < minAcc) {
minAcc = curacc;
minAccInd = i;
}
}
return new double[] { minAccInd, minAcc };
}
private boolean makesItIntoEnsemble(double acc, double maxAcc, double minMaxAcc, int curEnsembleSize) {
if (acc >= maxAcc * correctThreshold) {
if (curEnsembleSize >= maxEnsembleSize)
return acc > minMaxAcc;
else
return true;
}
return false;
}
private double findEnsembleTrainAcc(Instances data) throws Exception {
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setEstimatorName(getClassifierName());
trainResults.setDatasetName(data.relationName());
trainResults.setFoldID(seed);
trainResults.setSplit("train");
trainResults.setParas(getParameters());
double correct = 0;
for (int i = 0; i < data.numInstances(); ++i) {
long predTime = System.nanoTime();
//classify series i, while ignoring its corresponding histogram i
double[] probs = distributionForInstance(i, data.numClasses());
predTime = System.nanoTime() - predTime;
int maxClass = findIndexOfMax(probs, rand);
if (maxClass == data.get(i).classValue())
++correct;
trainResults.addPrediction(data.get(i).classValue(), probs, maxClass, predTime, "");
}
trainResults.finaliseResults();
double result = correct / data.numInstances();
return result;
}
public double getEnsembleCvAcc(){
if(ensembleCvAcc>=0){
return this.ensembleCvAcc;
}
try{
return this.findEnsembleTrainAcc(train);
}catch(Exception e){
e.printStackTrace();
}
return -1;
}
/**
* Classify the train instance at index 'test', whilst ignoring the corresponding bags
* in each of the members of the ensemble, for use in CV of BoTSWEnsemble
*/
public double classifyInstance(int test, int numclasses) throws Exception {
double[] dist = distributionForInstance(test, numclasses);
return findIndexOfMax(dist, rand);
}
public double[] distributionForInstance(int test, int numclasses) throws Exception {
double[] classHist = new double[numclasses];
//get votes from all windows
double sum = 0;
for (BoTSWWindow classifier : classifiers) {
double classification = classifier.classifyInstance(test);
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
return findIndexOfMax(dist, rand);
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
double[] classHist = new double[instance.numClasses()];
//get votes from all windows
double sum = 0;
for (BoTSWWindow classifier : classifiers) {
double classification = classifier.classifyInstance(instance);
classHist[(int)classification]++;
sum++;
}
if (sum != 0)
for (int i = 0; i < classHist.length; ++i)
classHist[i] /= sum;
return classHist;
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public static void main(String[] args) throws Exception{
//Minimum working example
String dataset = "ItalyPowerDemand";
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dataset+"\\"+dataset+"_TEST.arff");
Classifier c = new BoTSWEnsemble();
((BoTSWEnsemble)c).dist = BoTSW.DistFunction.BOSS_DISTANCE;
c.buildClassifier(train);
double accuracy = ClassifierTools.accuracy(test, c);
System.out.println("BoTSWEnsemble accuracy on " + dataset + " fold 0 = " + accuracy);
//Other examples/tests
// detailedFold0Test(dataset);
// resampleTest(dataset, 25);
}
public static void detailedFold0Test(String dset) {
System.out.println("BoTSWEnsemble DetailedTest\n");
try {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
System.out.println(train.relationName());
BoTSWEnsemble botsw = new BoTSWEnsemble();
//TRAINING
System.out.println("Training starting");
long start = System.nanoTime();
botsw.buildClassifier(train);
double trainTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Training done (" + trainTime + "s)");
//RESULTS OF TRAINING
System.out.println("Ensemble Size: " + botsw.classifiers.size());
System.out.println("Param sets: ");
int count = 0;
for (BoTSWWindow window : botsw.classifiers)
System.out.println(count++ + ": " + window.getNB() + " " + window.getA() + " " + window.getK() + " " + window.accuracy);
//TESTING
System.out.println("\nTesting starting");
start = System.nanoTime();
double acc = ClassifierTools.accuracy(test, botsw);
double testTime = (System.nanoTime() - start) / 1000000000.0; //seconds
System.out.println("Testing done (" + testTime + "s)");
System.out.println("\nACC: " + acc);
}
catch (Exception e) {
System.out.println(e);
e.printStackTrace();
}
}
public static void resampleTest(String dset, int resamples) throws Exception {
Instances train = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TRAIN.arff");
Instances test = DatasetLoading.loadDataNullable("C:\\TSC Problems\\"+dset+"\\"+dset+"_TEST.arff");
Classifier c = new BoTSWEnsemble();
//c.setCVPath("C:\\tempproject\\BOSSEnsembleCVtest.csv");
double [] accs = new double[resamples];
for(int i=0;i<resamples;i++){
Instances[] data=InstanceTools.resampleTrainAndTestInstances(train, test, i);
c.buildClassifier(data[0]);
accs[i]= ClassifierTools.accuracy(data[1], c);
if (i==0)
System.out.print(accs[i]);
else
System.out.print("," + accs[i]);
}
double mean = 0;
for(int i=0;i<resamples;i++)
mean += accs[i];
mean/=resamples;
System.out.println("\n\nBoTSWEnsemble mean acc over " + resamples + " resamples: " + mean);
}
/**
* Bag of Temporal SIFT Words classifier to be used with known parameters,
* for botsw with parameter search, use BoTSWEnsemble.
*
* Will use euclidean distance by default. Can use histogram intersection by calling
* setDistanceFunction(...) prior to the call to buildClassifier()
*
* If svm wanted, call setUseSVM(true). Precise SVM implementation/accuracy could not be recreated,
* likely due to differences in implementation between java/WEKA c++/CV, and likewise
* for kmeans, epsilon value ignored
*
* Params: n_b, a, k, c_svm(if using svm)
*
* @author James Large
*
* Implementation based on the algorithm described in getTechnicalInformation() and original code
*/
public static class BoTSW implements Classifier, Serializable, SaveParameterInfo{
public SimpleKMeans kmeans = null;
public LibSVM svm = null;
public Params params;
public BoTSW_Bag[] bags;
public FeatureDiscoveryData[] fdData;
private boolean useSVM = false;
private boolean clusteringDataPreBuilt = false;
private boolean preprocessDataPreBuilt = false;
private boolean searchingForK = false;
DistFunction distFunc = DistFunction.EUCLIDEAN_DISTANCE;
public enum DistFunction { //TODO replace with a distance function interface and move to more general package
EUCLIDEAN_DISTANCE,
HISTOGRAM_INTERSECTION,
BOSS_DISTANCE
}
//keeping these as members so that when header info is found in training, can be reused for testing
public Instances clusterData;
public Instances bagData;
private String trainCVPath; //SaveCVAccuracy
private boolean trainCV=false;
public BoTSW() {
params = new Params(4, 4, 32, 1); //default smallest of each
}
public BoTSW(int n_b, int a, int k) {
params = new Params(n_b, a, k);
}
public BoTSW(int n_b, int a, int k, int c_svm) {
params = new Params(n_b, a, k, c_svm);
}
@Override
public String getParameters() {
StringBuilder sb = new StringBuilder();
sb.append("n_b=").append(params.n_b).append("/a=").append(params.a);
sb.append("/k=").append(params.k);
return sb.toString();
}
public static class Params {
//set (or searched for) by user)
public int n_b;
public int a;
public int k;
public double c_svm;
public int denseSampleRate = 0;
public int n_sc; //calculated via series length
//fixed
public final double k_sc = 1.257013374521;
public final double sigma = 1.6;
//normalise booleans ommitted, assuming SSR/l2 norm
public Params(int n_b, int a, int k) {
this.n_b = n_b;
this.a = a;
this.k = k;
this.c_svm = 1;
this.n_sc = 0;
}
public Params(int n_b, int a, int k, double c_svm) {
this.n_b = n_b;
this.a = a;
this.k = k;
this.c_svm = c_svm;
this.n_sc = 0;
}
public Params(int n_b, int a, int k, double c_svm, int n_sc) {
this.n_b = n_b;
this.a = a;
this.k = k;
this.c_svm = c_svm;
this.n_sc = n_sc;
}
public int calcNumScales(int seriesLength) {
int max_sc = (int)(Math.log(0.125 * seriesLength / sigma) / Math.log(k_sc));
if(n_sc == 0 || n_sc > max_sc)
n_sc = max_sc;
return n_sc;
}
//num features for handoutlines as example... == 622307000
//num doubles with max n_b and a == 9969120000
//min memory requirement for that == 76058 MB
//therefore in Params.calcDenseSampleRate(), set sample rate to m/100
public int calcDenseSampleRate(int seriesLength) {
// denseSampleRate = (int)((Math.log(seriesLength-200)) / 2);
denseSampleRate = seriesLength / 100;
if (denseSampleRate < 1) denseSampleRate = 1;
return denseSampleRate;
}
public void setDenseSampleRate(int rate) {
denseSampleRate = rate;
}
@Override
public String toString() {
return n_b + "_" + a + "_" + k;
}
}
public static class FeatureDiscoveryData {
public ArrayList<KeyPoint> keypoints;
public GuassianData gdata;
public FeatureDiscoveryData(ArrayList<KeyPoint> keypoints, GuassianData gdata) {
this.keypoints = keypoints;
this.gdata = gdata;
}
}
public static class GuassianData {
public double[][] guassSeries;
public double[][] DoGs;
}
public static class KeyPoint {
public int time;
public int scale;
public KeyPoint(int time, int scale) {
this.time = time;
this.scale = scale;
}
}
public static class BoTSW_Bag {
double[] hist;
double classValue;
public BoTSW_Bag(double[] hist, double classValue) {
this.hist = hist;
this.classValue = classValue;
}
}
public void setDistanceFunction(DistFunction d) {
distFunc = d;
}
public void setSearchingForK(boolean b) {
searchingForK = b;
}
public void setUseSVM(boolean use) {
useSVM = use;
}
public int[] getParametersValues() {
return new int[] {params.n_b, params.a, params.k};
}
public void giveFeatureDiscoveryData(FeatureDiscoveryData[] data) {
fdData = data;
preprocessDataPreBuilt = true;
}
public void giveFeatureData(Instances features) {
clusterData = features;
clusteringDataPreBuilt = true;
preprocessDataPreBuilt = true;
}
@Override
public void buildClassifier(Instances data) throws Exception {
data = new Instances(data);
if (params.n_sc == 0) //has not already been set
params.calcNumScales(data.numAttributes()-1);
if (params.denseSampleRate == 0)
params.calcDenseSampleRate(data.numAttributes()-1);
if (!preprocessDataPreBuilt) {
//build the guassian and keypoint location data if not already built
fdData = new FeatureDiscoveryData[data.numInstances()];
for (int i = 0; i < data.numInstances(); ++i) {
GuassianData gdata = findDoGs(toArrayNoClass(data.get(i)), params.sigma, params.k_sc, params.n_sc);
ArrayList<KeyPoint> keypoints = findDenseKeypoints(gdata);
fdData[i] = new FeatureDiscoveryData(keypoints, gdata);
}
}
if (!clusteringDataPreBuilt) {
//describe the keypoints found before using the current parameter settings
//n_b and a, may have already been done during parameter search if now just
//searching for values of k
double[][][] features = new double[data.numInstances()][][];
for (int i = 0; i < data.numInstances(); ++i)
features[i] = describeKeyPoints(fdData[i].gdata.guassSeries, fdData[i].keypoints);
//stuff features into instances format
ArrayList<Attribute> atts = new ArrayList<>();
assert(features[0][0].length == params.n_b*2);
for (int i = 0; i < features[0][0].length; ++i)
atts.add(new Attribute(""+i));
clusterData = new Instances("ClusterInfo", atts, features.length * features[0].length);
for (int i = 0; i < features.length; ++i)
for (int j = 0; j < features[i].length; ++j)
clusterData.add(new DenseInstance(1, features[i][j]));
}
//cluster the feature descriptions that have been found/provided
int maxIterations = 10;
int numAttempts = searchingForK ? 2 : 10;
double epsilon = 0.001; //not used currently, simplekmeans does not support, and is non trivial to implement
//defines the minimum change in centroids before stopping the iteration, has potential implications for runtime,
//however final clasification is extremely unlikely to change
double bestCompactness = Double.MAX_VALUE; //custom implemented
for (int i = 0; i < numAttempts; ++i) {
SimpleKMeans t_kmeans = new SimpleKMeans();
t_kmeans.setMaxIterations(maxIterations);
t_kmeans.setInitializeUsingKMeansPlusPlusMethod(true);
t_kmeans.setSeed(i);
t_kmeans.setNumClusters(params.k);
t_kmeans.setPreserveInstancesOrder(true); //needed to call .getAssignments()
t_kmeans.buildClusterer(clusterData);
if (numAttempts > 1) {
double compactness = compactnessOfClustering(t_kmeans, clusterData);
if (compactness < bestCompactness)
kmeans = t_kmeans;
}
else
kmeans = t_kmeans;
}
int [] assignments = kmeans.getAssignments(); //final assignments of each FEATURE
//build histograms
bags = new BoTSW_Bag[data.numInstances()];
int featsPerSeries = clusterData.numInstances() / data.numInstances();
int feat = 0;
for (int i = 0; i < data.numInstances(); ++i) {
double[] hist = new double[params.k];
for (int j = 0; j < featsPerSeries; ++j)
++hist[assignments[feat++]];
hist = normaliseHistogramSSR(hist);
hist = normaliseHistograml2(hist);
bags[i] = new BoTSW_Bag(hist, data.get(i).classValue());
}
//CODE FOR USING SVM FOR CORRECTNESS TESTING PURPOSES, DOES NOT REPRODUCE AUTHORS RESULTS EXACTLY (DISCREPANCIES IN SVM IMPLEMENTATIONS)
if (useSVM) {
Timer svmTimer = new Timer("\t\t\ttrainingsvm");
//stuff back into instances
ArrayList<Attribute> bagatts = new ArrayList<>();
for (int i = 0; i < params.k; ++i)
bagatts.add(new Attribute(""+i));
List<String> classVals = new ArrayList<>(data.numClasses());
for (int i = 0; i < data.numClasses(); ++i)
classVals.add(""+i);
bagatts.add(new Attribute("classVal", classVals));
bagData = new Instances("Bags", bagatts, data.numInstances());
bagData.setClassIndex(bagData.numAttributes()-1);
for (int i = 0; i < bags.length; ++i) {
double[] inst = new double[params.k+1];
for (int j = 0; j < params.k; ++j) {
inst[j] = bags[i].hist[j];
}
inst[inst.length-1] = bags[i].classValue;
bagData.add(new DenseInstance(1, inst));
}
//train svm, as close to original in-code params as i can seem to get
svm = new LibSVM();
svm.setCost(params.c_svm);
svm.setCoef0(0);
svm.setEps(0.001);
svm.setGamma(0.5);
svm.setKernelType(new SelectedTag(LibSVM.KERNELTYPE_LINEAR, LibSVM.TAGS_KERNELTYPE));
svm.setDegree(3);
svm.setNu(0.5);
svm.setShrinking(true);
svm.setCacheSize(200);
svm.setProbabilityEstimates(false);
svm.buildClassifier(bagData);
svmTimer.printlnTimeSoFar();
}
}
public static double compactnessOfClustering(SimpleKMeans kmeans, Instances input) throws Exception {
Instances centroids = kmeans.getClusterCentroids();
int[] assignments = kmeans.getAssignments();
double totalSqDist = 0.0;
for (int i = 0; i < assignments.length; ++i) {
Instance sample = input.get(i);
Instance centroid = centroids.get(assignments[i]);
for (int j = 0; j < sample.numAttributes(); ++j)
totalSqDist += (sample.value(j) - centroid.value(j)) * (sample.value(j) - centroid.value(j));
}
return totalSqDist;
}
double[] normaliseHistograml2(double[] hist) {
double n = 0.;
for(int x=0; x<hist.length; ++x)
n += hist[x] * hist[x];
for(int i = 0; i < hist.length; ++i)
hist[i] /= n;
return hist;
}
double[] normaliseHistogramSSR(double[] hist) {
for (int j = 0; j < hist.length; ++j)
hist[j] = Math.sqrt(hist[j]);
return hist;
}
double[][] extractFeatures(double[] series) throws Exception {
GuassianData gdata = findDoGs(series, params.sigma, params.k_sc, params.n_sc);
ArrayList<KeyPoint> keypoints = findDenseKeypoints(gdata);
return describeKeyPoints(gdata.guassSeries, keypoints);
}
double[][] describeKeyPoints(double[][] series, ArrayList<KeyPoint> keypoints) throws Exception {
//setup
int i, j, mx, tm, sc;
int n_b = params.n_b, a = params.a; //just for readability
int halfn_b = n_b/2;
double[] gfilter;
gfilter = gaussian((double)halfn_b*a, n_b*a);
//compute gradients across series
double[][] globalGradients = new double[series.length][];
for(i=0; i<series.length; ++i) {
globalGradients[i] = new double[series[i].length];
globalGradients[i][0] = series[i][1] - series[i][0];
for(j=1; j < series[i].length-1; ++j)
globalGradients[i][j] = (series[i][j+1] - series[i][j-1]) * 0.5;
globalGradients[i][j] = series[i][j] - series[i][j-1];
}
//compute gradients within each block of each keypoint
double[][] localGradients = new double[keypoints.size()][];
for(i = 0; i < localGradients.length; ++i)
localGradients[i] = new double[a*n_b+1];
for(i=0; i<localGradients.length; ++i) {
sc = keypoints.get(i).scale;
tm = keypoints.get(i).time - a*halfn_b;
mx = keypoints.get(i).time + a*halfn_b; //TODO BUG DEBUG
//original code had (- 1), i found that this code gave index out of bound error on line
//' localGradients[i][j] = gfilter[j] * globalGradients[sc][tm+j]; '
//with tm+j being the cuplrit
//authors could not reproduce error in c++ code, unsure of cause
//effect of change is minimal
if(tm > 0 && mx < series[0].length)
for(j=0; j<=a*n_b; ++j)
localGradients[i][j] = gfilter[j] * globalGradients[sc][tm+j];
else // If near extrema
for(j=0; j<=a*n_b; ++j)
if( (tm+j) < series[0].length && (tm+j) > 0 )
localGradients[i][j] = gfilter[j] * globalGradients[sc][tm+j];
//deleting mid element
double[] temp = new double[localGradients[i].length-1];
int mid = (int)(localGradients[i].length/2);
for (j = 0; j < mid; ++j)
temp[j] = localGradients[i][j];
for (j = mid+1; j < localGradients[i].length; ++j)
temp[j-1] = localGradients[i][j];
localGradients[i] = temp;
}
//sum local gradients to form final features
double[][] features = new double[keypoints.size()][];
for(i = 0; i < features.length; ++i)
features[i] = new double[2*n_b];
for(i=0; i<features.length; ++i)
for(j=0; j<n_b; ++j)
for(mx=0; mx<a; ++mx)
if(localGradients[i][j*a+mx] < 0)
features[i][2*j] -= localGradients[i][j*a+mx];
else
features[i][2*j+1] += localGradients[i][j*a+mx];
return features;
}
public ArrayList<KeyPoint> findDenseKeypoints(GuassianData gdata) {
int scales = gdata.DoGs.length -1;
int times = gdata.DoGs[0].length;
int pointsPerScale = times/params.denseSampleRate;
ArrayList<KeyPoint> keypoints = new ArrayList<>(scales*pointsPerScale);
for(int scale = 1; scale < scales; scale++)
for(int time = 0; time < times; time+=params.denseSampleRate)
keypoints.add(new KeyPoint(time, scale));
return keypoints;
}
public double[] applyGuassian(double[] ts, double sigma) {
double[] r = new double[ts.length];
double[] vg = gaussian(sigma);
int i, j, k, m;
int dec;
dec = (int) ((vg.length + 1) * 0.5);
for(i=0; i<ts.length; ++i) {
k = i-dec;
m = 1;
for(j=0; j<vg.length; ++j) {
if(Math.abs(++k) < ts.length)
r[i] += (vg[j] * ts[Math.abs(k)]);
else
r[i] += (vg[j] * ts[ts.length-(++m)]);
}
}
return r;
}
public double[] gaussian(double sigma) {
int qs = (int) (4* sigma);
double[] vg = new double[1 + 2*qs];
int x, y = -1;
for(x=-qs; x<=qs; ++x)
vg[++y] = Math.exp(-1. * x * x / (2.*sigma*sigma) ) / ( Math.sqrt(2. * Math.PI) * sigma);
return vg;
}
public double[] gaussian(double sigma, int length) {
double[] vg;
if(length % 2 == 1)
vg = new double[length];
else
vg = new double[length+1];
int x, l = vg.length/2;
for(x=1; x<=l; ++x) {
vg[l-x] = Math.exp(-1. * x * x / (2.*sigma*sigma) ) / ( Math.sqrt(2. * Math.PI) * sigma);
vg[l+x] = Math.exp(-1. * x * x / (2.*sigma*sigma) ) / ( Math.sqrt(2. * Math.PI) * sigma);
}
vg[l] = 1. / ( Math.sqrt(2. * Math.PI) * sigma);
double max = vg[0];
for (int i = 1; i < vg.length; ++i)
if (vg[i] > max)
max = vg[i];
for(int i = 0; i < vg.length; ++i)
vg[i] /= max;
return vg;
}
public GuassianData findDoGs(double[] ts, double sigma, double k_sc, int n_sc) {
int size = ts.length;
//for the guassaion filtered series [0] and DoGs [1]
GuassianData res = new GuassianData();
res.guassSeries = new double[n_sc+3][];
res.DoGs = new double[n_sc+2][];
for(int i =0; i<res.DoGs.length; ++i)
res.DoGs[i] = new double[size];
int i, j;
res.guassSeries[0] = applyGuassian(ts, sigma / k_sc);
res.guassSeries[1] = applyGuassian(ts, sigma);
for(i=0; i<size; ++i)
res.DoGs[0][i] = res.guassSeries[1][i] - res.guassSeries[0][i];
// RANG NORMAUX
for(j=1; j<res.DoGs.length; ++j) {
res.guassSeries[j+1] = applyGuassian(ts, Math.pow(k_sc, j) * sigma);
for(i=0; i<size; ++i)
res.DoGs[j][i] = res.guassSeries[j+1][i] - res.guassSeries[j][i];
}
return res;
}
/**
* Assumes class index, if present, is last
* @return data of passed instance in a double array with the class value removed if present
*/
protected static double[] toArrayNoClass(Instance inst) {
int length = inst.numAttributes();
if (inst.classIndex() >= 0)
--length;
double[] data = new double[length];
for (int i=0, j=0; i < inst.numAttributes(); ++i)
if (inst.classIndex() != i)
data[j++] = inst.value(i);
return data;
}
public void clean() {
if (clusterData != null)
clusterData.clear(); //keeps header info
if (bagData != null)
bagData.clear();
}
@Override
public double classifyInstance(Instance instnc) throws Exception {
if(useSVM)
return classifyInstanceSVM(instnc);
BoTSW_Bag testBag = buildTestBag(instnc);
double bestDist = Double.MAX_VALUE;
double nn = -1.0;
//find dist FROM testBag TO all trainBags
for (int i = 0; i < bags.length; ++i) {
double dist = distance(testBag, bags[i], bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags[i].classValue;
}
}
return nn;
}
/**
* Used within ensemble as part of a leave-one-out crossvalidation, to skip having to rebuild
* the classifier every time (since the n histograms would be identical each time anyway), therefore this classifies
* the instance at the index passed while ignoring its own corresponding histogram
*
* @param test index of instance to classify
* @return classification
*/
public double classifyInstance(int test) throws Exception {
if(useSVM)
throw new Exception("sped-up loo cv not possible with svm");
BoTSW_Bag testBag = bags[test];
double bestDist = Double.MAX_VALUE;
double nn = -1.0;
//find dist FROM testBag TO all trainBags
for (int i = 0; i < bags.length; ++i) {
if (i == test) //skip 'this' one, leave-one-out
continue;
double dist = distance(testBag, bags[i], bestDist);
if (dist < bestDist) {
bestDist = dist;
nn = bags[i].classValue;
}
}
return nn;
}
protected double distance(BoTSW_Bag instA, BoTSW_Bag instB, double bestDist) throws Exception {
switch (distFunc) {
case EUCLIDEAN_DISTANCE:
return euclidean(instA, instB, bestDist);
case HISTOGRAM_INTERSECTION:
return histIntersection(instA, instB);
case BOSS_DISTANCE:
return bossDistance(instA, instB, bestDist);
default:
throw new Exception("No distance function set");
}
}
protected double euclidean(BoTSW_Bag instA, BoTSW_Bag instB, double bestDist) {
double dist = 0.0;
//find dist only from values in instA
for (int i = 0; i < instA.hist.length; ++i) {
double valA = instA.hist[i];
double valB = instB.hist[i];
dist += (valA-valB)*(valA-valB);
if (dist > bestDist)
return Double.MAX_VALUE;
}
return dist;
}
protected double bossDistance(BoTSW_Bag instA, BoTSW_Bag instB, double bestDist) {
double dist = 0.0;
//find dist only from values in instA
for (int i = 0; i < instA.hist.length; ++i) {
double valA = instA.hist[i];
if (instA.hist[i] == 0)
continue;
double valB = instB.hist[i];
dist += (valA-valB)*(valA-valB);
if (dist > bestDist)
return Double.MAX_VALUE;
}
return dist;
}
protected double histIntersection(BoTSW_Bag instA, BoTSW_Bag instB) {
double sim = 0.0;
for (int i = 0; i < instA.hist.length; ++i) {
double valA = instA.hist[i];
double valB = instB.hist[i];
sim += Math.min(valA, valB);
}
//keep it as a value minimisation problem just for ease of code
return -sim;
}
public double classifyInstanceSVM(Instance instnc) throws Exception {
double[] dist = distributionForInstanceSVM(instnc);
int maxi = 0;
double max = dist[maxi];
for (int i = 1; i < dist.length; ++i)
if (dist[i] > max) {
max = dist[i];
maxi = i;
}
return (double)maxi;
}
public BoTSW_Bag buildTestBag(Instance instnc) throws Exception {
double[][] features = extractFeatures(toArrayNoClass(instnc));
//cluster/form histograms
Instances testFeatures = new Instances(clusterData, features.length);
double[] hist = new double[params.k];
for (int i = 0; i < features.length; ++i) {
testFeatures.add(new DenseInstance(1, features[i]));
int cluster = kmeans.clusterInstance(testFeatures.get(i));
++hist[cluster];
}
hist = normaliseHistogramSSR(hist);
hist = normaliseHistograml2(hist);
return new BoTSW_Bag(hist, instnc.classValue());
}
@Override
public double[] distributionForInstance(Instance instnc) throws Exception {
if (useSVM)
return distributionForInstanceSVM(instnc);
else
throw new UnsupportedOperationException("Not supported yet for non-svm classification."); //To change body of generated methods, choose Tools | Templates.
}
public double[] distributionForInstanceSVM(Instance instnc) throws Exception {
BoTSW_Bag testBag = buildTestBag(instnc);
//classify
Instances testBagData = new Instances(bagData, 1);
double[] inst = new double[params.k+1];
for (int j = 0; j < params.k; ++j)
inst[j] = testBag.hist[j];
inst[inst.length-1] = testBag.classValue;
testBagData.add(new DenseInstance(1, inst));
return svm.distributionForInstance(testBagData.get(0));
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
}
| 47,234 | 37.185125 | 170 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/ApproxElasticEnsemble.java | /*
* Copyright (C) 2019 Chang Wei Tan
*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import experiments.data.DatasetLoading;
import tsml.classifiers.legacy.elastic_ensemble.fast_elastic_ensemble.utils.SequenceStatsCache;
import tsml.classifiers.legacy.elastic_ensemble.Efficient1NN;
import weka.core.Instance;
import weka.core.Instances;
import java.io.File;
import java.util.ArrayList;
import java.util.Random;
import java.util.Scanner;
/**
* This is a method introduced in the paper
* FastEE: Fast Ensembles of Elastic Distances for Time Series Classification.
* It builds an approximate nearest neighbour table that can be used to learn the optimal parameter for each distance
* measure without compromising much on the classification accuracy, hence faster.
* Please refer to the paper for more details about this table.
*
* @author Chang Wei (chang.tan@monash.edu)
*/
public class ApproxElasticEnsemble extends FastElasticEnsemble {
private int nSamples;
public ApproxElasticEnsemble() {
setApproxSamples(2);
}
public void setApproxSamples(int nSamples) {
this.nSamples = nSamples;
}
@Override
public void buildClassifier(Instances train) throws Exception {
long t1= System.nanoTime();
this.train = train;
this.derTrain = null;
usesDer = false;
this.classifiers = new Efficient1NN[this.classifiersToUse.length];
this.cvAccs = new double[classifiers.length];
this.cvPreds = new double[classifiers.length][this.train.numInstances()];
for (int c = 0; c < classifiers.length; c++) {
classifiers[c] = getClassifier(this.classifiersToUse[c]);
if (isDerivative(this.classifiersToUse[c])) {
usesDer = true;
}
}
if (usesDer) {
this.derTrain = df.transform(train);
}
if (buildFromFile) {
File existingTrainOut;
Scanner scan;
int paramId;
double cvAcc;
for (int c = 0; c < classifiers.length; c++) {
existingTrainOut = new File(this.resultsDir + classifiersToUse[c] +
"/Predictions/" + datasetName + "/trainFold" + this.resampleId + ".csv");
if (!existingTrainOut.exists()) {
throw new Exception("Error: training file doesn't exist for " + existingTrainOut.getAbsolutePath());
}
scan = new Scanner(existingTrainOut);
scan.useDelimiter("\n");
scan.next();//header
paramId = Integer.parseInt(scan.next().trim().split(",")[0]);
cvAcc = Double.parseDouble(scan.next().trim().split(",")[0]);
for (int i = 0; i < train.numInstances(); i++) {
this.cvPreds[c][i] = Double.parseDouble(scan.next().split(",")[1]);
}
scan.close();
if (isDerivative(classifiersToUse[c])) {
if (!isFixedParam(classifiersToUse[c])) {
classifiers[c].setParamsFromParamId(derTrain, paramId);
}
classifiers[c].buildClassifier(derTrain);
} else {
if (!isFixedParam(classifiersToUse[c])) {
classifiers[c].setParamsFromParamId(train, paramId);
}
classifiers[c].buildClassifier(train);
}
cvAccs[c] = cvAcc;
}
} else {
double[] cvAccAndPreds;
for (int c = 0; c < classifiers.length; c++) {
if (writeToFile) {
classifiers[c].setFileWritingOn(this.resultsDir, this.datasetName, this.resampleId);
}
if (isFixedParam(classifiersToUse[c])) {
if (isDerivative(classifiersToUse[c])) {
cvAccAndPreds = classifiers[c].loocv(derTrain);
} else {
cvAccAndPreds = classifiers[c].loocv(train);
}
} else if (isDerivative(classifiersToUse[c])) {
cvAccAndPreds = classifiers[c].approxParameterSearch(derTrain, nSamples);
} else {
cvAccAndPreds = classifiers[c].approxParameterSearch(train, nSamples);
}
cvAccs[c] = cvAccAndPreds[0];
for (int i = 0; i < nSamples; i++) {
this.cvPreds[c][i] = cvAccAndPreds[i + 1];
}
}
/*
if (this.writeEnsembleTrainingFile) {
StringBuilder output = new StringBuilder();
double[] ensembleCvPreds = this.getTrainPreds();
output.append(train.relationName()).append(",FastEE,train\n");
output.append(this.getParameters()).append("\n");
output.append(this.getTrainAcc()).append("\n");
for (int i = 0; i < train.numInstances(); i++) {
output.append(train.instance(i).classValue()).append(",").append(ensembleCvPreds[i]).append("\n");
}
FileWriter fullTrain = new FileWriter(this.ensembleTrainFilePathAndName);
fullTrain.append(output);
fullTrain.close();
}
*/
}
trainResults.setBuildTime(System.nanoTime() - t1);
}
// classify instance with lower bounds
public double classifyInstance(final Instance instance, final int queryIndex, final SequenceStatsCache cache) throws Exception {
if (classifiers == null) {
throw new Exception("Error: classifier not built");
}
Instance derIns = null;
if (this.usesDer) {
Instances temp = new Instances(derTrain, 1);
temp.add(instance);
temp = df.transform(temp);
derIns = temp.instance(0);
}
double bsfVote = -1;
double[] classTotals = new double[train.numClasses()];
ArrayList<Double> bsfClassVal = null;
double pred;
this.previousPredictions = new double[this.classifiers.length];
for (int c = 0; c < classifiers.length; c++) {
if (isDerivative(classifiersToUse[c])) {
pred = classifiers[c].classifyInstance(derTrain, derIns, queryIndex, cache);
} else {
pred = classifiers[c].classifyInstance(train, instance, queryIndex, cache);
}
previousPredictions[c] = pred;
try {
classTotals[(int) pred] += cvAccs[c];
} catch (Exception e) {
System.out.println("cv accs " + cvAccs.length);
System.out.println(pred);
throw e;
}
if (classTotals[(int) pred] > bsfVote) {
bsfClassVal = new ArrayList<>();
bsfClassVal.add(pred);
bsfVote = classTotals[(int) pred];
} else if (classTotals[(int) pred] == bsfVote) {
bsfClassVal.add(pred);
}
}
if (bsfClassVal.size() > 1) {
return bsfClassVal.get(new Random(46).nextInt(bsfClassVal.size()));
}
return bsfClassVal.get(0);
}
public static void main(String[] args) throws Exception {
ApproxElasticEnsemble ee = new ApproxElasticEnsemble();
Instances train = DatasetLoading.loadDataNullable("C:/Users/cwtan/workspace/Dataset/TSC_Problems/ArrowHead/ArrowHead_TRAIN");
Instances test = DatasetLoading.loadDataNullable("C:/Users/cwtan/workspace/Dataset/TSC_Problems/ArrowHead/ArrowHead_TEST");
ee.buildClassifier(train);
SequenceStatsCache cache = new SequenceStatsCache(test, test.numAttributes() - 1);
System.out.println("Train Acc: " + ee.getTrainAcc());
int correct = 0;
for (int i = 0; i < test.numInstances(); i++) {
double actual = test.instance(i).classValue();
double pred = ee.classifyInstance(test.instance(i), i, cache);
if (actual == pred) {
correct++;
}
}
System.out.println("Test Acc: " + (double) correct / test.numInstances());
System.out.println("Test Acc -- correct: " + correct + "/" + test.numInstances());
}
}
| 9,146 | 38.769565 | 133 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/DD_DTW.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import java.text.DecimalFormat;
import java.util.concurrent.TimeUnit;
import utilities.InstanceTools;
import machine_learning.classifiers.kNN;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import tsml.classifiers.SaveParameterInfo;
import weka.core.DenseInstance;
import weka.core.EuclideanDistance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.TechnicalInformation;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_DistanceBasic;
import weka.core.neighboursearch.PerformanceStats;
/**
*
* @author Jason Lines (j.lines@uea.ac.uk)
*
* Implementation of:
* Gorecki, Tomasz, and Maciej Luczak.
* Using derivatives in time series classification.
* Data Mining and Knowledge Discovery 26.2 (2013): 310-331.
*
*/
/*
=========================================================================================================================
BRIEF DESCRIPTION:
=========================================================================================================================
The classifier works by using a weighted combination of the raw data representation and derivatives with either
Euclidean Distance or full-window DTW. The idea is that the distance of using the distance measure on the raw
data is weighted using parameter a, and the distance using derivative-transformed data is weighted using a
parameter b. These two params are in the range of 0-1, and the sum of a+b is always 1 (i.e. binary split in the
weighting between the two classifiers. Therefore a = 1, b = 0 is equivilent to just using DTW, and a=0, b=1
is equivilent to just using derivative DTW.
The author's propose using a single parameter alpha to weight these two components by using it to derive values of
a and b. However, in our experiments this approach does not seem to reproduce the published results. However, simply
searching from a = 0 to 1 and b = 1 to 0 in increments of 0.01 (101 param options) appears to reproduce results
=========================================================================================================================
HOW TO USE:
=========================================================================================================================
The class extends the kNN class, so classifier functionality is included. Three parameters should be set:
1. whether the classifier uses ED or DTW (default is ED unless set) (handled by enum in constructor)
2. values for a and b (defaults to a=1 and b=0, equiv to just ED or DTW)
The params a and b can be set explicitly through a mutator. However, if not specified, the buildClassifier method
performs the LOOCV procedure outlined in the original paper to find the values of a and b using the training data.
=========================================================================================================================
RECREATING RESULTS:
=========================================================================================================================
See method recreateResultsTable()
Data dir of the TSC problems, DATA_DIR, must be set to match local implementation
=========================================================================================================================
RELATED CLASSES:
=========================================================================================================================
Next iteration:
NNTranformWeighting.java
*/
public class DD_DTW extends kNN implements SaveParameterInfo{
protected ClassifierResults res =new ClassifierResults();
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "T. Gorecki and M. Luczak");
result.setValue(TechnicalInformation.Field.TITLE, "Using derivatives in time series classification");
result.setValue(TechnicalInformation.Field.JOURNAL, "Data Mining and Knowledge Discovery");
result.setValue(TechnicalInformation.Field.VOLUME, "26");
result.setValue(TechnicalInformation.Field.NUMBER,"2");
result.setValue(TechnicalInformation.Field.PAGES, "310-331");
result.setValue(TechnicalInformation.Field.YEAR, "2015");
return result;
}
public static final String DATA_DIR = "C:/Temp/Dropbox/TSC Problems/";
public static final double[] ALPHAS = {
//<editor-fold defaultstate="collapsed" desc="alpha values">
1,
1.01,
1.02,
1.03,
1.04,
1.05,
1.06,
1.07,
1.08,
1.09,
1.1,
1.11,
1.12,
1.13,
1.14,
1.15,
1.16,
1.17,
1.18,
1.19,
1.2,
1.21,
1.22,
1.23,
1.24,
1.25,
1.26,
1.27,
1.28,
1.29,
1.3,
1.31,
1.32,
1.33,
1.34,
1.35,
1.36,
1.37,
1.38,
1.39,
1.4,
1.41,
1.42,
1.43,
1.44,
1.45,
1.46,
1.47,
1.48,
1.49,
1.5,
1.51,
1.52,
1.53,
1.54,
1.55,
1.56,
1.57
//</editor-fold>
};
public static final String[] GORECKI_DATASETS = {
//<editor-fold defaultstate="collapsed" desc="Datasets from the paper">
"fiftywords", // 450,455,270,50
"Adiac", // 390,391,176,37
"Beef", // 30,30,470,5
"CBF", // 30,900,128,3
"Coffee", // 28,28,286,2
"FaceAll", // 560,1690,131,14
"FaceFour", // 24,88,350,4
"fish", // 175,175,463,7
"GunPoint", // 50,150,150,2
"Lightning2", // 60,61,637,2
"Lightning7", // 70,73,319,7
"OliveOil", // 30,30,570,4
"OSULeaf", // 200,242,427,6
"SwedishLeaf", // 500,625,128,15
"SyntheticControl", // 300,300,60,6
"Trace", // 100,100,275,4
"TwoPatterns", // 1000,4000,128,4
"wafer", // 1000,6164,152,2
"yoga"// 300,3000,426,2
//</editor-fold>
};
protected GoreckiDerivativesEuclideanDistance distanceFunction;
protected boolean paramsSet;
protected boolean sampleForCV=false;
protected double prop;
public void sampleForCV(boolean b, double p){
sampleForCV=b;
prop=p;
}
public enum DistanceType{EUCLIDEAN, DTW};
// defaults to Euclidean distance
public DD_DTW(){
super();
this.distanceFunction = new GoreckiDerivativesDTW();
this.paramsSet = false;
}
public DD_DTW(DistanceType distType){
super();
if(distType==DistanceType.EUCLIDEAN){
this.distanceFunction = new GoreckiDerivativesEuclideanDistance();
}else{
this.distanceFunction = new GoreckiDerivativesDTW();
}
this.paramsSet = false;
}
public void setAandB(double a, double b){
this.distanceFunction.a = a;
this.distanceFunction.b = b;
this.paramsSet = true;
}
@Override
public void buildClassifier(Instances train) throws Exception {
long startTime=System.nanoTime();
if(!paramsSet){
this.distanceFunction.crossValidateForAandB(train);
paramsSet=true;
}
this.setDistanceFunction(this.distanceFunction);
super.buildClassifier(train);
try {
res.setTimeUnit(TimeUnit.NANOSECONDS);
res.setBuildTime(System.nanoTime()-startTime);
} catch (Exception e) {
System.err.println("Inheritance preventing me from throwing this error...");
System.err.println(e);
}
}
@Override
public String getParameters() {
return "BuildTime,"+res.getBuildTime()+",a,"+distanceFunction.a+",b,"+distanceFunction.b;
}
public static class GoreckiDerivativesEuclideanDistance extends EuclideanDistance{
protected double alpha;
protected double a;
protected double b;
public boolean sampleTrain=true; //Change back to default to false
public GoreckiDerivativesEuclideanDistance(){
this.a = 1;
this.b = 0;
this.alpha = -1;
// defaults to no derivative input
}
public GoreckiDerivativesEuclideanDistance(Instances train){
// this is what the paper suggests they use, but doesn't reproduce results.
//this.crossValidateForAlpha(train);
// when cv'ing for a = 0:0.01:1 and b = 1:-0.01:0 results can be reproduced though, so use that
this.crossValidateForAandB(train);
}
public GoreckiDerivativesEuclideanDistance(double alpha){
this.alpha = alpha;
this.a = Math.cos(alpha);
this.b = Math.sin(alpha);
}
public GoreckiDerivativesEuclideanDistance(double a, double b){
this.alpha = alpha;
this.a = Math.cos(alpha);
this.b = Math.sin(alpha);
}
@Override
public double distance(Instance one, Instance two){
return this.distance(one, two, Double.MAX_VALUE);
}
@Override
public double distance(Instance one, Instance two, double cutoff, PerformanceStats stats){
return this.distance(one,two,cutoff);
}
@Override
public double distance(Instance first, Instance second, double cutoff){
double dist = 0;
double dirDist = 0;
int classPenalty = 0;
if(first.classIndex()>0){
classPenalty=1;
}
double firstDir, secondDir;
for(int i = 0; i < first.numAttributes()-classPenalty; i++){
dist+= ((first.value(i)-second.value(i))*(first.value(i)-second.value(i)));
// one less for derivatives, since we don't want to include the class value!
// could skip the first instead of last, but this makes more sense for earlier early abandon
if(i < first.numAttributes()-classPenalty-1){
firstDir = first.value(i+1)-first.value(i);
secondDir = second.value(i+1)-second.value(i);
dirDist+= ((firstDir-secondDir)*(firstDir-secondDir));
}
}
return(a*Math.sqrt(dist)+b*Math.sqrt(dirDist));
}
public double[] getNonScaledDistances(Instance first, Instance second){
double dist = 0;
double dirDist = 0;
int classPenalty = 0;
if(first.classIndex()>0){
classPenalty=1;
}
double firstDir, secondDir;
for(int i = 0; i < first.numAttributes()-classPenalty; i++){
dist+= ((first.value(i)-second.value(i))*(first.value(i)-second.value(i)));
if(i < first.numAttributes()-classPenalty-1){
firstDir = first.value(i+1)-first.value(i);
secondDir = second.value(i+1)-second.value(i);
dirDist+= ((firstDir-secondDir)*(firstDir-secondDir));
}
}
return new double[]{Math.sqrt(dist),Math.sqrt(dirDist)};
}
// implemented to mirror original MATLAB implementeation that's described in the paper (with appropriate modifications)
public double crossValidateForAlpha(Instances tr){
Instances train=tr;
if(sampleTrain){
tr=InstanceTools.subSample(tr, tr.numInstances()/10, 0);
}
double[] labels = new double[train.numInstances()];
for(int i = 0; i < train.numInstances(); i++){
labels[i] = train.instance(i).classValue();
}
double[] a = new double[ALPHAS.length];
double[] b = new double[ALPHAS.length];
for(int alphaId = 0; alphaId < ALPHAS.length; alphaId++){
a[alphaId] = Math.cos(ALPHAS[alphaId]);
b[alphaId] = Math.sin(ALPHAS[alphaId]);
}
int n = train.numInstances();
int k = ALPHAS.length;
int[] mistakes = new int[k];
//
// // need to get the derivatives (MATLAB code uses internal diff function instead)
// Instances dTrain = new GoreckiDerivativesDistance.GoreckiDerivativeFilter().process(train);
double[] D;
double[] L;
double[] d;
double dist;
double dDist;
double[] individualDistances;
for(int i = 0; i < n; i++){
D = new double[k];
L = new double[k];
for(int j = 0; j < k; j++){
D[j]=Double.MAX_VALUE;
}
for(int j = 0; j < n; j++){
if(i==j){
continue;
}
individualDistances = this.getNonScaledDistances(train.instance(i), train.instance(j));
// have to be a bit different here, since we can't vectorise in Java
// dist = distanceFunction.distance(train.instance(i), train.instance(j));
// dDist = distanceFunction.distance(dTrain.instance(i), dTrain.instance(j));
dist = individualDistances[0];
dDist = individualDistances[1];
d = new double[k];
for(int alphaId = 0; alphaId < k; alphaId++){
d[alphaId] = a[alphaId]*dist+b[alphaId]*dDist;
if(d[alphaId] < D[alphaId]){
D[alphaId]=d[alphaId];
L[alphaId]=labels[j];
}
}
}
for(int alphaId = 0; alphaId < k; alphaId++){
if(L[alphaId]!=labels[i]){
mistakes[alphaId]++;
}
}
}
int bsfMistakes = Integer.MAX_VALUE;
int bsfAlphaId = -1;
for(int alpha = 0; alpha < k; alpha++){
if(mistakes[alpha] < bsfMistakes){
bsfMistakes = mistakes[alpha];
bsfAlphaId = alpha;
}
}
this.alpha = ALPHAS[bsfAlphaId];
this.a = Math.cos(this.alpha);
this.b = Math.sin(this.alpha);
// System.out.println("bestAlphaId,"+bsfAlphaId);
return (double)(train.numInstances()-bsfMistakes)/train.numInstances();
}
// changed to now return the predictions of the best alpha parameter
public double[] crossValidateForAandB(Instances tr){
Instances train=tr;
if(sampleTrain){
tr=InstanceTools.subSample(tr, tr.numInstances()/10, 0);
}
double[] labels = new double[train.numInstances()];
for(int i = 0; i < train.numInstances(); i++){
labels[i] = train.instance(i).classValue();
}
double[] a = new double[101];
double[] b = new double[101];
for(int alphaId = 0; alphaId <= 100; alphaId++){
a[alphaId] = (double)(100-alphaId)/100;
b[alphaId] = (double)alphaId/100;
}
int n = train.numInstances();
int k = a.length;
int[] mistakes = new int[k];
double[] D;
double[] L;
double[] d;
double dist;
double dDist;
double[][] LforAll = new double[n][];
double[] individualDistances;
for(int i = 0; i < n; i++){
D = new double[k];
L = new double[k];
for(int j = 0; j < k; j++){
D[j]=Double.MAX_VALUE;
}
for(int j = 0; j < n; j++){
if(i==j){
continue;
}
individualDistances = this.getNonScaledDistances(train.instance(i), train.instance(j));
dist = individualDistances[0];
dDist = individualDistances[1];
d = new double[k];
for(int alphaId = 0; alphaId < k; alphaId++){
d[alphaId] = a[alphaId]*dist+b[alphaId]*dDist;
if(d[alphaId] < D[alphaId]){
D[alphaId]=d[alphaId];
L[alphaId]=labels[j];
}
}
}
for(int alphaId = 0; alphaId < k; alphaId++){
if(L[alphaId]!=labels[i]){
mistakes[alphaId]++;
}
}
LforAll[i] = L;
}
int bsfMistakes = Integer.MAX_VALUE;
int bsfAlphaId = -1;
for(int alpha = 0; alpha < k; alpha++){
if(mistakes[alpha] < bsfMistakes){
bsfMistakes = mistakes[alpha];
bsfAlphaId = alpha;
}
}
this.alpha = -1;
this.a = a[bsfAlphaId];
this.b = b[bsfAlphaId];
double[] bestAlphaPredictions = new double[train.numInstances()];
for(int i = 0; i < bestAlphaPredictions.length; i++){
bestAlphaPredictions[i] = LforAll[i][bsfAlphaId];
}
return bestAlphaPredictions;
}
public double getA() {
return a;
}
public double getB() {
return b;
}
}
public static class GoreckiDerivativesDTW extends GoreckiDerivativesEuclideanDistance{
public GoreckiDerivativesDTW(){
super();
}
public GoreckiDerivativesDTW(Instances train){
super(train);
}
public GoreckiDerivativesDTW(double alpha){
super(alpha);
}
public GoreckiDerivativesDTW(double a, double b){
super(a,b);
}
@Override
public double distance(Instance one, Instance two){
return this.distance(one, two, Double.MAX_VALUE);
}
@Override
public double distance(Instance one, Instance two, double cutoff, PerformanceStats stats){
return this.distance(one,two,cutoff);
}
@Override
public double distance(Instance first, Instance second, double cutoff){
double[] distances = getNonScaledDistances(first, second);
return a*distances[0]+b*distances[1];
}
public double[] getNonScaledDistances(Instance first, Instance second){
double dist = 0;
double derDist = 0;
// DTW dtw = new DTW();
DTW_DistanceBasic dtw = new DTW_DistanceBasic();
int classPenalty = 0;
if(first.classIndex()>0){
classPenalty=1;
}
GoreckiDerivativeFilter filter = new GoreckiDerivativeFilter();
Instances temp = new Instances(first.dataset(),0);
temp.add(first);
temp.add(second);
try{
temp = filter.process(temp);
}catch(Exception e){
e.printStackTrace();
return null;
}
dist = dtw.distance(first, second);
derDist = dtw.distance(temp.get(0), temp.get(1), Double.MAX_VALUE);
return new double[]{Math.sqrt(dist),Math.sqrt(derDist)};
}
}
// They calculate derivatives differently to the transform we have (which matches Keogh et al.'s DDTW implemetation)
// Derivatives are built into the new distance measures, but this is needed to recreating the derivative Euclidean/DTW comparison results
private static class GoreckiDerivativeFilter extends weka.filters.SimpleBatchFilter{
@Override
public String globalInfo() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
protected Instances determineOutputFormat(Instances inputFormat) throws Exception {
Instances output = new Instances(inputFormat,0);
output.deleteAttributeAt(0);
output.setRelationName("goreckiDerivative_"+output.relationName());
for(int a = 0; a < output.numAttributes()-1; a++){
output.renameAttribute(a, "derivative_"+a);
}
return output;
}
@Override
public Instances process(Instances instances) throws Exception {
Instances output = determineOutputFormat(instances);
Instance thisInstance;
Instance toAdd;
double der;
for(int i = 0; i < instances.numInstances(); i++){
thisInstance = instances.get(i);
toAdd = new DenseInstance(output.numAttributes());
for(int a = 1; a < instances.numAttributes()-1; a++){
der = thisInstance.value(a)-thisInstance.value(a-1);
toAdd.setValue(a-1, der);
}
toAdd.setValue(output.numAttributes()-1, thisInstance.classValue());
output.add(toAdd);
}
return output;
}
}
public static void recreateResultsTable() throws Exception{
recreateResultsTable(0);
}
public static void recreateResultsTable(int seed) throws Exception{
String[] datasets = GORECKI_DATASETS;
String dataDir = "C:/Temp/Dropbox/TSC Problems/";
Instances train, test, dTrain, dTest;
EuclideanDistance ed;
kNN knn;
int correct;
double acc, err;
DecimalFormat df = new DecimalFormat("##.##");
// important - use the correct one! Gorecki uses different derivatives to Keogh
GoreckiDerivativeFilter derFilter = new GoreckiDerivativeFilter();
StringBuilder st = new StringBuilder();
System.out.println("Dataset,ED,DED,DD_ED,DTW,DDTW,DD_DTW");
for(String dataset:datasets){
System.out.print(dataset+",");
train = DatasetLoading.loadDataNullable(dataDir+dataset+"/"+dataset+"_TRAIN");
test = DatasetLoading.loadDataNullable(dataDir+dataset+"/"+dataset+"_TEST");
// instance resampling happens here, seed of 0 means that the standard train/test split is used
if(seed!=0){
Instances[] temp = InstanceTools.resampleTrainAndTestInstances(train, test, seed);
train = temp[0];
test = temp[1];
}
dTrain = derFilter.process(train);
dTest = derFilter.process(test);
// ED
// ed = new GoreckiEuclideanDistance();
ed = new EuclideanDistance();
ed.setDontNormalize(true);
knn = new kNN(ed);
correct = getCorrect(knn, train, test);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.print(df.format(err)+",");
// DED
ed = new EuclideanDistance();
knn = new kNN(ed);
correct = getCorrect(knn, dTrain, dTest);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.print(df.format(err)+",");
//DD_ED
DD_DTW dd_ed = new DD_DTW(DistanceType.EUCLIDEAN);
correct = getCorrect(dd_ed, train, test);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.print(df.format(err)+",");
//DTW
DTW_DistanceBasic dtw = new DTW_DistanceBasic();
knn = new kNN(dtw);
correct = getCorrect(knn, train, test);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.print(df.format(err)+",");
// DDTW
DTW_DistanceBasic dDtw = new DTW_DistanceBasic();
knn = new kNN(dDtw);
correct = getCorrect(knn, dTrain, dTest);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.print(df.format(err)+",");
// DDDTW
DD_DTW dd_dtw = new DD_DTW(DistanceType.DTW);
correct = getCorrect(dd_dtw, train, test);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.println(df.format(err));
}
}
public static void main(String[] args){
// option 1: simple example of the classifier
// option 2: recreate the results from the original published work
int option = 1;
try{
if(option==1){
String dataName = "ItalyPowerDemand";
Instances train = DatasetLoading.loadDataNullable(DATA_DIR+dataName+"/"+dataName+"_TRAIN");
Instances test = DatasetLoading.loadDataNullable(DATA_DIR+dataName+"/"+dataName+"_TEST");
// create the classifier, using DTW as the distance function as an example
DD_DTW nndw = new DD_DTW(DistanceType.DTW);;
// params a and b have not been explicitly set, so buildClassifier will cv to find them
nndw.buildClassifier(train);
int correct = 0;
for(int i = 0; i < test.numInstances(); i++){
if(nndw.classifyInstance(test.instance(i))==test.instance(i).classValue()){
correct++;
}
}
System.out.println(dataName+":\t"+new DecimalFormat("#.###").format((double)correct/test.numInstances()*100)+"%");
}else if(option==2){
recreateResultsTable();
}
}catch(Exception e){
e.printStackTrace();
}
}
protected static int getCorrect(kNN knn, Instances train, Instances test) throws Exception{
knn.buildClassifier(train);
int correct = 0;
for(int i = 0; i < test.numInstances(); i++){
if(test.instance(i).classValue()==knn.classifyInstance(test.instance(i))){
correct++;
}
}
return correct;
}
}
| 27,737 | 34.245235 | 142 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/DTD_C.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import experiments.data.DatasetLoading;
import java.text.DecimalFormat;
import machine_learning.classifiers.kNN;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.TechnicalInformation;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_DistanceBasic;
import tsml.transformers.*;
/**
*
* @author Jason Lines (j.lines@uea.ac.uk)
* @author modified by Aaron Bostrom
*
* Implementation of:
* Gorecki, Tomasz, and Maciej Luczak.
* Non-isometric transforms in time series classification using DTW
* Knowledge-Based Systems 61 (2014): 98-108.
*
*/
/*
=========================================================================================================================
BRIEF DESCRIPTION:
=========================================================================================================================
The classifier is highly related to DD_DTW; however, instead of using a weighted combination of the raw
data representation and derivatives, this classifier replaces derivatives with either Sine, Cosine or Hilbert-transformed
data. It should also be noted that unlike the aforementioned class, this classifier does not use ED, and uses only a
full-window DTW implementation. Two params are again used to weight the classifier, a and b, which represent the weight
of standard DTW and transformed DTW respectively (note: only one transform is ever used at a time, so it is only DTW and
cosDTW/sinDTW/hilDTW). The transformed-DTW is simply just the DTW distance measure being used to compute distances with
transformed data.
As with the previous derivative iteration of this classifier, the params a and b are in the range of 0-1, and the sum of
a+b is always 1 (i.e. binary split in the weighting between the two classifiers). Therefore a = 1, b = 0 is equivilent to
just using DTW, and a=0, b=1 is equivilent DTW on the appropriately-transformed data.
Again, the author's propose using a single parameter alpha to weight these two components by using it to derive values of
a and b. This was ignored in this classifier however, as results indicated that this did not reproduce results for the
derivative version of the classifier (see notes in DD_DTW.java). Therefore in our experiments we search
from a = 0 to 1 and b = 1 to 0 in increments of 0.01 (101 param options) again.
=========================================================================================================================
HOW TO USE:
=========================================================================================================================
The class extends the kNN class, so classifier functionality is included. Three additional parameters should be set:
1. the type of transform to use in the classifier (Cosine/Sine/Hilbert) (default is Cosine unless specified in constructor)
2. values for a and b (defaults to a=1 and b=0, equiv to just DTW)
The params a and b can be set explicitly through a mutator. However, if not specified, the buildClassifier method
performs the LOOCV procedure outlined in the original paper to find the values of a and b using the training data.
=========================================================================================================================
RECREATING RESULTS:
=========================================================================================================================
See method recreateResultsTable()
String DATA_DIR should be changed to point to the dir of TSC problems (examples included in code)
=========================================================================================================================
RELATED CLASSES:
=========================================================================================================================
Previous iteration:
DD_DTW.java
Classes used:
Cosine.java, Sine.java, Hilbert.java
*/
public class DTD_C extends DD_DTW{
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "T. Gorecki and M. Luczak");
result.setValue(TechnicalInformation.Field.TITLE, "Non-isometric transforms in time series classification using DTW");
result.setValue(TechnicalInformation.Field.JOURNAL, "Knowledge-Based Systems");
result.setValue(TechnicalInformation.Field.VOLUME, "61");
result.setValue(TechnicalInformation.Field.PAGES, "98-108");
result.setValue(TechnicalInformation.Field.YEAR, "2014");
return result;
}
public static final String DATA_DIR = "C:/Temp/Dropbox/TSC Problems/";
// public static final String DATA_DIR = "/Users/Jay/Dropbox/TSC Problems/";
public static final double[] ALPHAS = {
//<editor-fold defaultstate="collapsed" desc="alpha values">
1,
1.01,
1.02,
1.03,
1.04,
1.05,
1.06,
1.07,
1.08,
1.09,
1.1,
1.11,
1.12,
1.13,
1.14,
1.15,
1.16,
1.17,
1.18,
1.19,
1.2,
1.21,
1.22,
1.23,
1.24,
1.25,
1.26,
1.27,
1.28,
1.29,
1.3,
1.31,
1.32,
1.33,
1.34,
1.35,
1.36,
1.37,
1.38,
1.39,
1.4,
1.41,
1.42,
1.43,
1.44,
1.45,
1.46,
1.47,
1.48,
1.49,
1.5,
1.51,
1.52,
1.53,
1.54,
1.55,
1.56,
1.57
//</editor-fold>
};
public static final String[] PAPER_DATASETS = {
//<editor-fold defaultstate="collapsed" desc="Datasets from the paper">
"fiftywords", // 450,455,270,50
"Adiac", // 390,391,176,37
"Beef", // 30,30,470,5
"Car", // 60,60,577,4
"CBF", // 30,900,128,3
"ChlorineConcentration", // 467,3840,166,3
"CinC_ECG_torso", // 40,1380,1639,4
"Coffee", // 28,28,286,2
"Cricket_X", // 390,390,300,12
"Cricket_Y", // 390,390,300,12
"Cricket_Z", // 390,390,300,12
"DiatomSizeReduction", // 16,306,345,4
"ECGFiveDays", // 23,861,136,2
"FaceAll", // 560,1690,131,14
"FaceFour", // 24,88,350,4
"FacesUCR", // 200,2050,131,14
"fish", // 175,175,463,7
"GunPoint", // 50,150,150,2
"Haptics", // 155,308,1092,5
"InlineSkate", // 100,550,1882,7
"ItalyPowerDemand", // 67,1029,24,2
"Lightning2", // 60,61,637,2
"Lightning7", // 70,73,319,7
"MALLAT", // 55,2345,1024,8
"MedicalImages", // 381,760,99,10
"MoteStrain", // 20,1252,84,2
"NonInvasiveFatalECG_Thorax1", // 1800,1965,750,42
"NonInvasiveFatalECG_Thorax2", // 1800,1965,750,42
"OliveOil", // 30,30,570,4
"OSULeaf", // 200,242,427,6
"Plane", // 105,105,144,7
"SonyAIBORobotSurface", // 20,601,70,2
"SonyAIBORobotSurfaceII", // 27,953,65,2
"StarLightCurves", // 1000,8236,1024,3
"SwedishLeaf", // 500,625,128,15
"Symbols", // 25,995,398,6
"SyntheticControl", // 300,300,60,6
"Trace", // 100,100,275,4
"TwoPatterns", // 1000,4000,128,4
"TwoLeadECG", // 23,1139,82,2
"UWaveGestureLibrary_X", // 896,3582,315,8
"UWaveGestureLibrary_Y", // 896,3582,315,8
"UWaveGestureLibrary_Z", // 896,3582,315,8
"wafer", // 1000,6164,152,2
"WordSynonyms", // 267,638,270,25
"yoga" // 300,3000,426,2
//</editor-fold>
};
public static enum TransformType{SIN,COS,HIL};
private TransformType transformType;
public DTD_C(){
super();
this.transformType = TransformType.COS;
this.distanceFunction = new TransformWeightedDTW(this.transformType);
}
public DTD_C(TransformType transformType){
super();
this.transformType = transformType;
this.distanceFunction = new TransformWeightedDTW(this.transformType);
}
@Override
public String getParameters() {
return super.getParameters()+",transformType,"+this.transformType;
}
public static class TransformWeightedDTW extends DD_DTW.GoreckiDerivativesDTW{
private TransformType transformType;
public TransformWeightedDTW(TransformType transformType){
super();
this.transformType = transformType;
}
public double[] getNonScaledDistances(Instance first, Instance second){
DTW_DistanceBasic dtw = new DTW_DistanceBasic();
int classPenalty = 0;
if(first.classIndex()>0){
classPenalty=1;
}
Instances temp = new Instances(first.dataset(),0);
temp.add(first);
temp.add(second);
Transformer bf=null;
switch(this.transformType){
case COS:
bf=new Cosine();
break;
case SIN:
bf=new Sine();
break;
case HIL: default:
bf=new Hilbert();
break;
}
temp = bf.transform(temp);
double dist = dtw.distance(first, second);
double transDist = dtw.distance(temp.get(0), temp.get(1), Double.MAX_VALUE);
return new double[]{Math.sqrt(dist),Math.sqrt(transDist)};
}
}
public static void recreateResultsTable() throws Exception{
System.out.println("Recreating Results from Gorecki 2:");
Instances train, test;
DTW_DistanceBasic dtw;
kNN knn;
double acc, err;
int correct;
DecimalFormat df = new DecimalFormat("#.##");
Instances transTrain, transTest;
Transformer[] transforms = {new Cosine(), new Sine(), new Hilbert()};
TransformType[] transformTypes = {TransformType.COS,TransformType.SIN,TransformType.HIL};
System.out.println("Dataset,fullCosDTW,fullSinDTW,fullHilDTW,weightedCosDTW,weightedSinDTW,weightedHilDTW");
for(String dataset:PAPER_DATASETS){
System.out.print(dataset+",");
train = DatasetLoading.loadDataNullable(DATA_DIR+dataset+"/"+dataset+"_TRAIN");
test = DatasetLoading.loadDataNullable(DATA_DIR+dataset+"/"+dataset+"_TEST");
// DTW on only the transformed data first
for(Transformer transform:transforms){
transTrain = transform.transform(train);
transTest = transform.transform(test);
dtw = new DTW_DistanceBasic();
knn = new kNN();
knn.setDistanceFunction(dtw);
correct = getCorrect(knn, transTrain, transTest);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.print(df.format(err)+",");
}
// now use a combination of the raw and transform
for(TransformType transform:transformTypes){
DTD_C tdtw = new DTD_C(transform);
correct = getCorrect(tdtw, train, test);
acc = (double)correct/test.numInstances();
err = (1-acc)*100;
System.out.print(df.format(err)+",");
}
System.out.println("");
}
}
public static void main(String[] args){
// option 1: simple example of the classifier
// option 2: recreate the results from the original published work
int option = 1;
try{
if(option==1){
String dataName = "ItalyPowerDemand";
Instances train = DatasetLoading.loadDataNullable(DATA_DIR+dataName+"/"+dataName+"_TRAIN");
Instances test = DatasetLoading.loadDataNullable(DATA_DIR+dataName+"/"+dataName+"_TEST");
// create the classifier, using cosine in the distance calculations as an example
DTD_C nntw = new DTD_C(TransformType.COS);
// params a and b have not been explicitly set, so buildClassifier will cv to find them
nntw.buildClassifier(train);
int correct = 0;
for(int i = 0; i < test.numInstances(); i++){
if(nntw.classifyInstance(test.instance(i))==test.instance(i).classValue()){
correct++;
}
}
System.out.println(dataName+":\t"+new DecimalFormat("#.###").format((double)correct/test.numInstances()*100)+"%");
}else if(option==2){
recreateResultsTable();
}
}catch(Exception e){
e.printStackTrace();
}
}
}
| 13,996 | 37.243169 | 130 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/DTWCV.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import java.util.ArrayList;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_DistanceBasic;
import java.util.HashMap;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import java.util.concurrent.TimeUnit;
import machine_learning.classifiers.SaveEachParameter;
import weka.core.*;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.classifiers.ParameterSplittable;
/*
* The reason for specialising is this class has the option of searching for
the optimal window length
* through a grid search of values.
*
* By default this class does not do a search of window size.
* To search for the window size call
* optimiseWindow(true);
* By default, this does a leave one out cross validation on every possible
window size, then sets the proportion to the one with the largest accuracy,
ties taking the smallest window (slow)
. This will be slow, and not how the Keogh group do it. They do a stepwise increase
of window by 1% until there is no improvement for three steps.
This has two possible speedups
1. Optimize window. This starts at full window, w=100%, and records the maximum warp
made over the data set, say k. Rather than move to w=w-1 it moves to w=k if k<w-1,
thus saving many evaluations
2. Early abandon on a window. If, during the accuracy calculation for a single window size,
the accuracy cannot be better than the best so far, we can quit.
3. Early abandon on the nearest neighbour calculation. One obvious speed up is
to store the distance matrix for a given window size. This requires O(n^2) extra
memory and means you cannot early abandon individual distances.
DONE: avoid repeated evaluations for short series. Needs a debug
2. Set up check pointing
CHECK THIS: For implementation reasons, a window size of 1
is equivalent to Euclidean distance (rather than a window size of 0
*
* @author: ajb
*/
public class DTWCV extends EnhancedAbstractClassifier implements SaveEachParameter,ParameterSplittable{
private boolean optimiseWindow=false;
private double windowSize=1;
private int maxPercentageWarp=100;
private Instances train;
private int trainSize;
private int bestWarp;
private int maxWindowSize;
DTW_DistanceBasic dtw;
HashMap<Integer,Double> distances;
double maxR=1;
ArrayList<Double> accuracy=new ArrayList<>();
protected String resultsPath;
protected boolean saveEachParaAcc=false;
@Override
public void setPathToSaveParameters(String r){
resultsPath=r;
setSaveEachParaAcc(true);
}
@Override
public void setSaveEachParaAcc(boolean b){
saveEachParaAcc=b;
}
public void setFindTrainAccuracyEstimate(boolean setCV){
if(setCV==true)
throw new UnsupportedOperationException("Doing a top leve CV is not yet possible for DTWCV. It cross validates to optimize, so could store those, but will be biased"); //To change body of generated methods, choose Tools | Templates.
//This method doe
}
public DTWCV(){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
dtw=new DTW();
accuracy=new ArrayList<>();
}
public DTWCV(DTW_DistanceBasic d){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
dtw=d;
accuracy=new ArrayList<>();
}
@Override
public String getParameters() {
String result="BuildTime,"+trainResults.getBuildTime()+",CVAcc,"+trainResults.getAcc()+",Memory,"+trainResults.getMemory();
result+=",BestWarpPercent,"+bestWarp+"AllAccs,";
for(double d:accuracy)
result+=","+d;
return result;
}
public double getMaxR(){ return maxR;}
public void setMaxPercentageWarp(int a){maxPercentageWarp=a;}
public void optimiseWindow(boolean b){ optimiseWindow=b;}
public void setR(double r){dtw.setR(r);}
public double getR(){ return dtw.getR();}
public int getBestWarp(){ return bestWarp;}
public int getWindowSize(){ return dtw.getWindowSize(train.numAttributes()-1);}
@Override
public void buildClassifier(Instances d) throws Exception{
trainResults =new ClassifierResults();
long t=System.nanoTime();
train=d;
trainSize=d.numInstances();
if(optimiseWindow){
maxR=0;
double maxAcc=0;
int dataLength=train.numAttributes()-1;
/* If the data length < 100 then there will be some repetition
should skip some values I reckon
if(dataLength<maxNosWindows)
maxPercentageWarp=dataLength;
*/
double previousPercentage=0;
for(int i=maxPercentageWarp;i>=0;i-=1){
//Set r for current value as the precentage of series length.
// dtw=new DTW();
int previousWindowSize=dtw.findWindowSize(previousPercentage,d.numAttributes()-1);
int newWindowSize=dtw.findWindowSize(i/100.0,d.numAttributes()-1);
if(previousWindowSize==newWindowSize)// no point doing this one
continue;
previousWindowSize=newWindowSize;
dtw.setR(i/100.0);
/*Can do an early abandon inside cross validate. If it cannot be more accurate
than maxR even with some left to evaluate then stop evaluation
*/
double acc=crossValidateAccuracy(maxAcc);
accuracy.add(acc);
if(acc>maxAcc){
maxR=i;
maxAcc=acc;
}
// System.out.println(" r="+i+" warpsize ="+x+" train acc= "+acc+" best acc ="+maxR);
/* Can ignore all window sizes bigger than the max used on the previous iteration
*/
if(maxWindowSize<(i-1)*dataLength/100){
System.out.println("WINDOW SIZE ="+dtw.getWindowSize()+" Can reset downwards at "+i+"% to ="+((int)(100*(maxWindowSize/(double)dataLength))));
i=(int)(100*(maxWindowSize/(double)dataLength));
i++;
// i=Math.round(100*(maxWindowSize/(double)dataLength))/100;
}
}
bestWarp=(int)(maxR*dataLength/100);
System.out.println("OPTIMAL WINDOW ="+maxR+" % which gives a warp of"+bestWarp+" data");
// dtw=new DTW();
dtw.setR(maxR/100.0);
trainResults.setAcc(maxAcc);
}
try {
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime()-t);
} catch (Exception e) {
System.err.println("Inheritance preventing me from throwing this error...");
System.err.println(e);
}
Runtime rt = Runtime.getRuntime();
long usedBytes = (rt.totalMemory() - rt.freeMemory());
trainResults.setMemory(usedBytes);
if(getEstimateOwnPerformance()){ //Save basic train results
long estTime = System.nanoTime();
for(int i=0;i<train.numInstances();i++){
Instance test=train.remove(i);
long predTime = System.nanoTime();
int pred=(int)classifyInstance(test);
predTime = System.nanoTime() - predTime;
double[] dist = new double[train.numClasses()];
dist[pred] = 1.0;
trainResults.addPrediction(test.classValue(), dist, pred, predTime, "");
train.add(i,test);
}
estTime = System.nanoTime() - estTime;
trainResults.setErrorEstimateTime(estTime);
trainResults.setErrorEstimateMethod("cv_loo");
trainResults.setEstimatorName("DTWCV");
trainResults.setDatasetName(train.relationName());
trainResults.setSplit("train");
//no foldid/seed
trainResults.setNumClasses(train.numClasses());
trainResults.setParas(getParameters());
trainResults.finaliseResults();
}
}
@Override
public double classifyInstance(Instance d){
/*Basic distance, with early abandon. This is only for 1-nearest neighbour*/
double minSoFar=Double.MAX_VALUE;
double dist; int index=0;
for(int i=0;i<train.numInstances();i++){
dist=dtw.distance(train.instance(i),d,minSoFar);
if(dist<minSoFar){
minSoFar=dist;
index=i;
}
}
return train.instance(index).classValue();
}
@Override
public double[] distributionForInstance(Instance instance){
double[] dist=new double[instance.numClasses()];
dist[(int)classifyInstance(instance)]=1;
return dist;
}
/**Could do this by calculating the distance matrix, but then
* you cannot use the early abandon. Early abandon about doubles the speed,
* as will storing the distances. Given the extra n^2 memory, probably better
* to just use the early abandon. We could store those that were not abandoned?
answer is to store those without the abandon in a hash table indexed by i and j,
*where index i,j == j,i
* @return
*/
private double crossValidateAccuracy(double maxAcc){
double a=0,d, minDist;
int nearest;
Instance inst;
int bestNosCorrect=(int)(maxAcc*trainSize);
maxWindowSize=0;
int w;
distances=new HashMap<>(trainSize);
for(int i=0;i<trainSize;i++){
//Find nearest to element i
nearest=0;
minDist=Double.MAX_VALUE;
inst=train.instance(i);
for(int j=0;j<trainSize;j++){
if(i!=j){
// d=dtw.distance(inst,train.instance(j),minDist);
//Store past distances if not early abandoned
//Not seen i,j before
if(j>i){
d=dtw.distance(inst,train.instance(j),minDist);
//Store if not early abandon
if(d!=Double.MAX_VALUE){
// System.out.println(" Storing distance "+i+" "+j+" d="+d+" with key "+(i*trainSize+j));
distances.put(i*trainSize+j,d);
// storeCount++;
}
//Else if stored recover
}else if(distances.containsKey(j*trainSize+i)){
d=distances.get(j*trainSize+i);
// System.out.println(" Recovering distance "+i+" "+j+" d="+d);
// recoverCount++;
}
//Else recalculate with new early abandon
else{
d=dtw.distance(inst,train.instance(j),minDist);
}
if(d<minDist){
nearest=j;
minDist=d;
w=dtw.findMaxWindow();
if(w>maxWindowSize)
maxWindowSize=w;
}
}
}
//Measure accuracy for nearest to element i
if(inst.classValue()==train.instance(nearest).classValue())
a++;
//Early abandon if it cannot be better than the best so far.
if(a+trainSize-i<bestNosCorrect){
// System.out.println(" Early abandon on CV when a="+a+" and i ="+i+" best nos correct = "+bestNosCorrect+" maxAcc ="+maxAcc+" train set size ="+trainSize);
return 0.0;
}
}
// System.out.println("trainSize ="+trainSize+" stored ="+storeCount+" recovered "+recoverCount);
return a/(double)trainSize;
}
public static void main(String[] args) throws Exception{
DTWCV c = new DTWCV();
String path="C:\\Research\\Data\\Time Series Data\\Time Series Classification\\";
Instances test=DatasetLoading.loadDataNullable(path+"Coffee\\Coffee_TEST.arff");
Instances train=DatasetLoading.loadDataNullable(path+"Coffee\\Coffee_TRAIN.arff");
train.setClassIndex(train.numAttributes()-1);
c.buildClassifier(train);
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public void setParamSearch(boolean b) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public void setParametersFromIndex(int x) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
| 13,985 | 39.53913 | 244 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/DTW_kNN.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import machine_learning.classifiers.kNN;
import weka.core.*;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW;
/* This class is a specialisation of kNN that can only be used with the efficient DTW distance
*
* The reason for specialising is this class has the option of searching for the optimal window length
* through a grid search of values.
*
* By default this class does not do a search (optimiseWindow=false).
* To search for the window size call
* optimiseWindow(true);
* By default, this does a leave one out cross validation on every possible window size, then sets the
* proportion to the one with the largest accuracy. This will be slow. Speed it up by
*
* 1. Set the max window size to consider by calling
* setMaxWindowSize(double r) where r is on range 0..1, with 1 being a full warp.
*
* 2. Set the increment size
* setIncrementSize(int s) where s is on range 1...trainSetSize
*
* This is a basic brute force implementation,
* @author Tony Bagnall, circa 2012?
*/
public class DTW_kNN extends kNN {
private boolean optimiseWindow=false;
private double windowSize=0.1;
private double maxWindowSize=1;
private int incrementSize=10;
private Instances train;
private int trainSize;
private int bestWarp;
DTW dtw=new DTW();
// DTW_DistanceEfficient dtw=new DTW_DistanceEfficient();
public DTW_kNN(){
super();
dtw.setR(windowSize);
setDistanceFunction(dtw);
super.setKNN(1);
}
public String getParameters(){
return "BestWarp,"+bestWarp+",IncrementSize,"+incrementSize+",OptimiseWindow,"+optimiseWindow;
}
public void optimiseWindow(boolean b){ optimiseWindow=b;}
public void setMaxR(double r){ maxWindowSize=r;}
public void setIncrementSize(int x){incrementSize=x;}
public DTW_kNN(int k){
super(k);
dtw.setR(windowSize);
optimiseWindow=true;
setDistanceFunction(dtw);
}
public void buildClassifier(Instances d) throws Exception {
dist.setInstances(d);
train=d;
trainSize=d.numInstances();
if(optimiseWindow){
double maxR=0;
double maxAcc=0;
/*Set the maximum warping window: Not this is all a bit mixed up.
The window size in the r value is range 0..1, but the increments should be set by the
data*/
int dataLength=0;
if(train.attribute(0).isRelationValued()){
dataLength=train.instance(0).relationalValue(0).instance(0).numAttributes();
}
else
dataLength=train.numAttributes()-1;
int max=(int)(dataLength*maxWindowSize);
System.out.println(" MAX ="+max+" increment size ="+incrementSize);
for(double i=0;i<max;i+=incrementSize){
//Set r for current value
dtw.setR(i/(double)dataLength);
double acc=crossValidateAccuracy();
System.out.println("\ti="+i+" r="+(i/(double)dataLength)+" Acc = "+acc);
if(acc>maxAcc){
maxR=i/dataLength;
maxAcc=acc;
System.out.println(" Best so far ="+maxR +" Warps ="+i+" has Accuracy ="+maxAcc);
}
}
bestWarp=(int)(maxR*dataLength);
dtw.setR(maxR);
// System.out.println(" Best R = "+maxR+" Best Warp ="+bestWarp+" Size = "+(maxR*dataLength));
}
// Then just use the normal kNN with the DTW distance.
super.buildClassifier(d);
}
//Could do this for BER instead
private double crossValidateAccuracy(){
double a=0,d=0, minDist;
int nearest=0;
Instance inst;
for(int i=0;i<trainSize;i++){
//Find nearest to element i
nearest=0;
minDist=Double.MAX_VALUE;
inst=train.instance(i);
for(int j=0;j<trainSize;j++){
if(i!=j){
d=dtw.distance(inst,train.instance(j),minDist);
if(d<minDist){
nearest=j;
minDist=d;
}
}
}
//Measure accuracy for nearest to element i
if(inst.classValue()==train.instance(nearest).classValue())
a++;
}
return a/(double)trainSize;
}
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes must be numeric
result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES);
// Can only handle discrete class
result.enable(Capabilities.Capability.NOMINAL_CLASS);
// instances
result.setMinimumNumberInstances(1);
result.enable(Capabilities.Capability.RELATIONAL_ATTRIBUTES);
return result;
}
}
| 5,706 | 36.058442 | 103 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/DistanceBasedTests.java | package tsml.classifiers.distance_based;
import distance.elastic.Euclidean;
import distance.elastic.TWE;
import experiments.data.DatasetLoading;
import tsml.classifiers.distance_based.distances.DistanceMeasure;
import tsml.classifiers.distance_based.distances.dtw.DTWDistance;
import tsml.classifiers.distance_based.distances.ed.EDistance;
import tsml.classifiers.distance_based.distances.erp.ERPDistance;
import tsml.classifiers.distance_based.distances.lcss.LCSSDistance;
import tsml.classifiers.distance_based.distances.msm.MSMDistance;
import tsml.classifiers.distance_based.distances.twed.TWEDistance;
import tsml.classifiers.distance_based.distances.wdtw.WDTWDistance;
import tsml.classifiers.distance_based.knn.KNN;
import tsml.classifiers.legacy.elastic_ensemble.LCSS1NN;
import weka.classifiers.Classifier;
import weka.core.DistanceFunction;
import weka.core.EuclideanDistance;
import weka.core.Instance;
import weka.core.Instances;
import static tsml.classifiers.distance_based.distances.wdtw.spaces.WDDTWDistanceSpace.newWDDTWDistance;
/**
* Tests to mimic those done is sktime unit tests. This is not a unit test for tsml, but could become so
*
*/
public class DistanceBasedTests {
public static void main(String[] args) throws Exception {
kNNTest();
}
private static int countCorrect(Classifier c, Instances train) throws Exception {
int correct=0;
for(Instance ins: train){
double x = c.classifyInstance(ins);
if(x == ins.classValue())
correct++;
}
return correct;
}
public static void kNNTest() throws Exception {
String dataDir=DatasetLoading.BAKED_IN_TSC_DATA_PATH;
String problem="ArrowHead";
//Count the number correct for ArrowHead 1-NN for the following distance functions with default parameters
Instances train = DatasetLoading.loadData(dataDir+problem+"/"+problem+"_TRAIN");
Instances test = DatasetLoading.loadData(dataDir+problem+"/"+problem+"_TEST");
int numDistances=8;
int[] correct = new int[numDistances];
DistanceMeasure[] measures = new DistanceMeasure[numDistances];
measures[0]=new EDistance();
measures[1]=new DTWDistance();
measures[2]=new MSMDistance();
measures[3]=new WDTWDistance();
measures[4]=new ERPDistance();
measures[5]=new LCSSDistance();
measures[6]=new TWEDistance();
measures[7]= newWDDTWDistance();
((LCSSDistance)measures[5]).setWindow(3d / (train.numAttributes() - 1));
((LCSSDistance)measures[5]).setEpsilon(0.05);
int i=0;
for(DistanceMeasure d:measures) {
KNN knn = new KNN();
knn.setSeed(0);
knn.setDistanceMeasure(d);
knn.buildClassifier(train);
correct[i] = countCorrect(knn, test);
System.out.println("Distance measure " + d + " gets " + correct[i++] + " correct out of "+test.numInstances());
}
/*
LCSS1NN jayLCSS = new LCSS1NN(3,0.05);
KNN knn = new KNN();
knn.setSeed(0);
knn.setDistanceFunction(measures[5]);
jayLCSS.buildClassifier(train);
knn.buildClassifier(train);
Instance wrongUn=test.instance(2);
int c=16;
System.out.println(" WRONG UN INDEX = "+c);
// int county = countCorrect(jayLCSS, test);
// System.out.println("Jay LCSS gets " + county + " correct out of "+test.numInstances());
double min1=Double.MAX_VALUE,min2=Double.MAX_VALUE;
for(Instance ins:train){
double x = measures[5].distance(wrongUn, ins);
System.out.print(""+x);
if(x<min1) {
System.out.print(" NEW MIN ");
min1 = x;
}
else if(x==min1)
System.out.print(" TIE ");
else
System.out.print(" ");
double y = jayLCSS.distance(wrongUn, ins);
System.out.print(","+y);
if(y<min2) {
System.out.print(" NEW MIN ");
min2=y;
}
else if(y==min2)
System.out.print(" TIE ");
else
System.out.print(" ");
if(x!=y)
System.out.println(" MISMATCH");
else
System.out.println(" Class "+ins.classValue());
}
System.out.println(" new prediction = "+knn.classifyInstance(wrongUn)+" k = "+ knn.getK());
System.out.println(" new prediction = "+jayLCSS.classifyInstance(wrongUn));
double[] dist1=knn.distributionForInstance(wrongUn);
double[] dist2=jayLCSS.distributionForInstance(wrongUn);
for(double d:dist1)
System.out.print(d+",");
System.out.println("");
for(double d:dist2)
System.out.print(d+",");
int c1=countCorrect(knn,test);
int c2=countCorrect(jayLCSS,test);
System.out.println("\n George count = "+c1+" Jay count = "+c2);
for(int i=0;i<test.numInstances();i++){
double x= knn.classifyInstance(test.instance(i));
System.out.print(i+","+x);
double[] x2= knn.distributionForInstance(test.instance(i));
for(double d: x2)
System.out.print(","+d);
System.out.print("\n");
}
*/
}
}
| 5,424 | 37.204225 | 124 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/ElasticEnsemble.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import java.io.File;
import java.util.ArrayList;
import java.util.Random;
import java.util.Scanner;
import tsml.classifiers.legacy.elastic_ensemble.DTW1NN;
import tsml.classifiers.legacy.elastic_ensemble.ED1NN;
import tsml.classifiers.legacy.elastic_ensemble.ERP1NN;
import tsml.classifiers.legacy.elastic_ensemble.Efficient1NN;
import tsml.classifiers.legacy.elastic_ensemble.LCSS1NN;
import tsml.classifiers.legacy.elastic_ensemble.MSM1NN;
import tsml.classifiers.legacy.elastic_ensemble.TWE1NN;
import tsml.classifiers.legacy.elastic_ensemble.WDTW1NN;
import tsml.transformers.Derivative;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import utilities.WritableTestResults;
import experiments.data.DatasetLoading;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.EnhancedAbstractClassifier;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformationHandler;
/**
* A new Elastic Ensemble for sharing with others
@article{lines15elastic,
title={Time Series Classification with Ensembles of Elastic Distance Measures},
author={J. Lines and A. Bagnall},
journal={Data Mining and Knowledge Discovery},
volume={29},
issue={3},
pages={565--592},
year={2015}
}
* @author sjx07ngu
*/
public class ElasticEnsemble extends EnhancedAbstractClassifier implements WritableTestResults,TechnicalInformationHandler{
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "J. Lines and A. Bagnall");
result.setValue(TechnicalInformation.Field.TITLE, "Time Series Classification with Ensembles of Elastic Distance Measures");
result.setValue(TechnicalInformation.Field.JOURNAL, "Data Mining and Knowledge Discovery");
result.setValue(TechnicalInformation.Field.VOLUME, "29");
result.setValue(TechnicalInformation.Field.NUMBER, "3");
result.setValue(TechnicalInformation.Field.PAGES, "565-592");
result.setValue(TechnicalInformation.Field.YEAR, "2015");
return result;
}
public enum ConstituentClassifiers{
Euclidean_1NN,
DTW_R1_1NN,
DTW_Rn_1NN,
WDTW_1NN,
DDTW_R1_1NN,
DDTW_Rn_1NN,
WDDTW_1NN,
LCSS_1NN,
MSM_1NN,
TWE_1NN,
ERP_1NN
};
public static boolean isDerivative(ConstituentClassifiers classifier){
return (classifier==ConstituentClassifiers.DDTW_R1_1NN || classifier==ConstituentClassifiers.DDTW_Rn_1NN || classifier==ConstituentClassifiers.WDDTW_1NN);
}
public static boolean isFixedParam(ConstituentClassifiers classifier){
return (classifier==ConstituentClassifiers.DDTW_R1_1NN || classifier==ConstituentClassifiers.DTW_R1_1NN || classifier==ConstituentClassifiers.Euclidean_1NN);
}
protected ConstituentClassifiers[] classifiersToUse;
protected String datasetName;
protected int resampleId;
protected String resultsDir;
protected double[] cvAccs;
protected double[][] cvPreds;
protected boolean buildFromFile = false;
protected boolean writeToFile = false;
protected Instances train;
protected Instances derTrain;
protected Efficient1NN[] classifiers = null;
protected boolean usesDer = false;
protected static Derivative df = new Derivative();
// utility to enable AJBs COTE
protected double[] previousPredictions = null;
double ensembleCvAcc =-1;
double[] ensembleCvPreds = null;
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
public String[] getIndividualClassifierNames() {
String[] names= new String[this.classifiersToUse.length];
for(int i = 0; i < classifiersToUse.length; i++){
names[i] = classifiersToUse[i].toString();
}
return names;
}
public double[] getIndividualCVAccs() {
return this.cvAccs;
}
public double getTrainAcc() throws Exception {
if(this.ensembleCvAcc != -1 && this.ensembleCvPreds!=null){
return this.ensembleCvAcc;
}
this.getTrainPreds();
return this.ensembleCvAcc;
}
public double[] getTrainPreds() throws Exception {
if(this.ensembleCvPreds!=null){
return this.ensembleCvPreds;
}
long estTime = System.nanoTime();
this.ensembleCvPreds = new double[train.numInstances()];
double actual, pred;
double bsfWeight;
int correct = 0;
ArrayList<Double> bsfClassVals;
double[] weightByClass;
for(int i = 0; i < train.numInstances(); i++){
long predTime = System.nanoTime(); //TODO hack/incorrect until george overhaul in
actual = train.instance(i).classValue();
bsfClassVals = null;
bsfWeight = -1;
weightByClass = new double[train.numClasses()];
for(int c = 0; c < classifiers.length; c++){
weightByClass[(int)this.cvPreds[c][i]]+=this.cvAccs[c];
if(weightByClass[(int)this.cvPreds[c][i]] > bsfWeight){
bsfWeight = weightByClass[(int)this.cvPreds[c][i]];
bsfClassVals = new ArrayList<>();
bsfClassVals.add(this.cvPreds[c][i]);
}else if(weightByClass[(int)this.cvPreds[c][i]] == bsfWeight){
bsfClassVals.add(this.cvPreds[c][i]);
}
}
if(bsfClassVals.size()>1){
pred = bsfClassVals.get(new Random(i).nextInt(bsfClassVals.size()));
}else{
pred = bsfClassVals.get(0);
}
if(pred==actual){
correct++;
}
this.ensembleCvPreds[i]=pred;
predTime = System.nanoTime() - predTime;
double[] dist = new double[train.numClasses()];
dist[(int)pred]++;
trainResults.addPrediction(actual, dist, pred, predTime, "");
}
estTime = System.nanoTime() - estTime;
trainResults.setErrorEstimateTime(estTime);
trainResults.setErrorEstimateMethod("cv_loo");
trainResults.setEstimatorName("EE");
trainResults.setDatasetName(train.relationName());
trainResults.setSplit("train");
//no foldid/seed
trainResults.setNumClasses(train.numClasses());
trainResults.setParas(getParameters());
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.finaliseResults();
this.ensembleCvAcc = (double)correct/train.numInstances();
return this.ensembleCvPreds;
}
// @Override
public double[] getIndividualCvAccs() {
return this.cvAccs;
}
// @Override
public double[][] getIndividualCvPredictions() {
return this.cvPreds;
}
/**
* Default constructor; includes all constituent classifiers
*/
public ElasticEnsemble(){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
this.classifiersToUse = ConstituentClassifiers.values();
}
/**
* Constructor allowing specific constituent classifier types to be passed
* @param classifiersToUse ConstituentClassifiers[] list of classifiers to use as enums
*/
public ElasticEnsemble(ConstituentClassifiers[] classifiersToUse){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
this.classifiersToUse = classifiersToUse;
}
/**
* Constructor that builds an EE from existing training output. By default includes all constituent classifier types.
* NOTE: this DOES NOT resample data; data must be resampled independently of the classifier. This just ensures the correct naming convention of output files
*
* @param resultsDir path to the top-level of the stored training output
* @param datasetName name of the dataset to be loaded
* @param resampleId resampleId of the dataset to be loaded
*/
public ElasticEnsemble(String resultsDir, String datasetName, int resampleId){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
this.resultsDir = resultsDir;
this.datasetName = datasetName;
this.resampleId = resampleId;
this.classifiersToUse = ConstituentClassifiers.values();
this.buildFromFile = true;
}
/**
* Constructor that builds an EE from existing training output. Includes the classifier types passed in as an array of enums
*
* @param resultsDir path to the top-level of the stored training output
* @param datasetName name of the dataset to be loaded
* @param resampleId resampleId of the dataset to be loaded
* @param classifiersToUse the classifiers to load
*/
public ElasticEnsemble(String resultsDir, String datasetName, int resampleId, ConstituentClassifiers[] classifiersToUse){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
this.resultsDir = resultsDir;
this.datasetName = datasetName;
this.resampleId = resampleId;
this.classifiersToUse = classifiersToUse;
this.buildFromFile = true;
}
/**
* Turns on file writing to store training output. NOTE: this doesn't resample the data; data needs to be resampled independently of the classifier. This just ensures the correct naming convention for output files.
*
* @param resultsDir path to the top-level of the training output store (makes dir if it doesn't exist)
* @param datasetName identifier in the written files for this dataset
* @param resampleId resample id of the dataset
*/
public void setInternalFileWritingOn(String resultsDir, String datasetName, int resampleId){
this.resultsDir = resultsDir;
this.datasetName = datasetName;
this.resampleId = resampleId;
this.writeToFile = true;
}
/**
* Builds classifier. If building from file, cv weights and predictions will be loaded from file. If running from scratch, training cv will be performed for constituents to find best params, cv accs, and cv preds
* @param train The training data
* @throws Exception if building from file and results not found, or if there is an issue with the training data
*/
@Override
public void buildClassifier(Instances train) throws Exception{
trainResults.setBuildTime(System.nanoTime());
this.train = train;
this.derTrain = null;
usesDer = false;
this.classifiers = new Efficient1NN[this.classifiersToUse.length];
this.cvAccs = new double[classifiers.length];
this.cvPreds = new double[classifiers.length][this.train.numInstances()];
for(int c = 0; c < classifiers.length; c++){
classifiers[c] = getClassifier(this.classifiersToUse[c]);
if(isDerivative(this.classifiersToUse[c])){
usesDer = true;
}
}
if(usesDer){
this.derTrain = df.transform(train);
}
if(buildFromFile){
File existingTrainOut;
Scanner scan;
int paramId;
double cvAcc;
for(int c = 0; c < classifiers.length; c++){
existingTrainOut = new File(this.resultsDir+classifiersToUse[c]+"/Predictions/"+datasetName+"/trainFold"+this.resampleId+".csv");
if(!existingTrainOut.exists()){
throw new Exception("Error: training file doesn't exist for "+existingTrainOut.getAbsolutePath());
}
scan = new Scanner(existingTrainOut);
scan.useDelimiter("\n");
scan.next();//header
paramId = Integer.parseInt(scan.next().trim().split(",")[0]);
cvAcc = Double.parseDouble(scan.next().trim().split(",")[0]);
for(int i = 0; i < train.numInstances(); i++){
this.cvPreds[c][i] = Double.parseDouble(scan.next().split(",")[1]);
}
scan.close();
if(isDerivative(classifiersToUse[c])){
if(!isFixedParam(classifiersToUse[c])){
classifiers[c].setParamsFromParamId(derTrain, paramId);
}
classifiers[c].buildClassifier(derTrain);
}else{
if(!isFixedParam(classifiersToUse[c])){
classifiers[c].setParamsFromParamId(train, paramId);
}
classifiers[c].buildClassifier(train);
}
cvAccs[c] = cvAcc;
}
}else{
double[] cvAccAndPreds;
for(int c = 0; c < classifiers.length; c++){
if(writeToFile){
classifiers[c].setFileWritingOn(this.resultsDir, this.datasetName, this.resampleId);
}
if(isDerivative(classifiersToUse[c])){
cvAccAndPreds = classifiers[c].loocv(derTrain);
}else{
cvAccAndPreds = classifiers[c].loocv(train);
}
cvAccs[c] = cvAccAndPreds[0];
for(int i = 0; i < train.numInstances(); i++){
this.cvPreds[c][i] = cvAccAndPreds[i+1];
}
}
if(this.getEstimateOwnPerformance()){
this.getTrainPreds();
}
}
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.setBuildTime(System.nanoTime()-trainResults.getBuildTime());
trainResults.setParas(getParameters());
}
/**
* Returns an Efficient1NN object corresponding to the input enum. Output classifier includes the correct internal information for handling LOOCV/param tuning.
* @param classifier
* @return
* @throws Exception
*/
public static Efficient1NN getClassifier(ConstituentClassifiers classifier) throws Exception{
Efficient1NN knn = null;
switch(classifier){
case Euclidean_1NN:
return new ED1NN();
case DTW_R1_1NN:
return new DTW1NN(1);
case DDTW_R1_1NN:
knn = new DTW1NN(1);
knn.setClassifierIdentifier(classifier.toString());
return knn;
case DTW_Rn_1NN:
return new DTW1NN();
case DDTW_Rn_1NN:
knn = new DTW1NN();
knn.setClassifierIdentifier(classifier.toString());;
return knn;
case WDTW_1NN:
return new WDTW1NN();
case WDDTW_1NN:
knn = new WDTW1NN();
knn.setClassifierIdentifier(classifier.toString());
return knn;
case LCSS_1NN:
return new LCSS1NN();
case ERP_1NN:
return new ERP1NN();
case MSM_1NN:
return new MSM1NN();
case TWE_1NN:
return new TWE1NN();
default:
throw new Exception("Unsupported classifier type");
}
}
/**
* Classify a test instance. Each constituent classifier makes a prediction, votes are weighted by CV accs, and the majority weighted class value vote is returned
* @param instance test instance
* @return predicted class value of instance
* @throws Exception
*/
public double classifyInstance(Instance instance) throws Exception{
if(classifiers==null){
throw new Exception("Error: classifier not built");
}
Instance derIns = null;
if(this.usesDer){
Instances temp = new Instances(derTrain,1);
temp.add(instance);
temp = df.transform(temp);
derIns = temp.instance(0);
}
double bsfVote = -1;
double[] classTotals = new double[train.numClasses()];
ArrayList<Double> bsfClassVal = null;
double pred;
this.previousPredictions = new double[this.classifiers.length];
for(int c = 0; c < classifiers.length; c++){
if(isDerivative(classifiersToUse[c])){
pred = classifiers[c].classifyInstance(derIns);
}else{
pred = classifiers[c].classifyInstance(instance);
}
previousPredictions[c] = pred;
try{
classTotals[(int)pred] += cvAccs[c];
}catch(Exception e){
System.out.println("cv accs "+cvAccs.length);
System.out.println(pred);
throw e;
}
if(classTotals[(int)pred] > bsfVote){
bsfClassVal = new ArrayList<>();
bsfClassVal.add(pred);
bsfVote = classTotals[(int)pred];
}else if(classTotals[(int)pred] == bsfVote){
bsfClassVal.add(pred);
}
}
if(bsfClassVal.size()>1){
return bsfClassVal.get(new Random(46).nextInt(bsfClassVal.size()));
}
return bsfClassVal.get(0);
}
public double[] classifyInstanceByConstituents(Instance instance) throws Exception{
Instance ins = instance;
double[] predsByClassifier = new double[this.classifiers.length];
for(int i=0;i<classifiers.length;i++){
predsByClassifier[i] = classifiers[i].classifyInstance(ins);
}
return predsByClassifier;
}
public double[] getPreviousPredictions() throws Exception{
if(this.previousPredictions == null){
throw new Exception("Error: no previous instance found");
}
return this.previousPredictions;
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception{
if(classifiers==null){
throw new Exception("Error: classifier not built");
}
Instance derIns = null;
if(this.usesDer){
Instances temp = new Instances(derTrain,1);
temp.add(instance);
temp = df.transform(temp);
derIns = temp.instance(0);
}
double[] classTotals = new double[train.numClasses()];
double cvSum = 0;
double pred;
for(int c = 0; c < classifiers.length; c++){
if(isDerivative(classifiersToUse[c])){
pred = classifiers[c].classifyInstance(derIns);
}else{
pred = classifiers[c].classifyInstance(instance);
}
try{
classTotals[(int)pred] += cvAccs[c];
}catch(Exception e){
System.out.println("cv accs "+cvAccs.length);
System.out.println(pred);
throw e;
}
cvSum+=cvAccs[c];
}
for(int c = 0; c < classTotals.length; c++){
classTotals[c]/=cvSum;
}
return classTotals;
}
public double[] getCVAccs() throws Exception{
if(this.cvAccs==null){
throw new Exception("Error: classifier not built yet");
}
return this.cvAccs;
}
private String getClassifierInfo(){
StringBuilder st = new StringBuilder();
st.append("EE using:\n");
st.append("=====================\n");
for(int c = 0; c < classifiers.length; c++){
st.append(classifiersToUse[c]).append(" ").append(classifiers[c].getClassifierIdentifier()).append(" ").append(cvAccs[c]).append("\n");
}
return st.toString();
}
@Override
public String getParameters(){
StringBuilder params = new StringBuilder();
params.append(super.getParameters()).append(",");
for(int c = 0; c < classifiers.length; c++){
params.append(classifiers[c].getClassifierIdentifier()).append(",").append(classifiers[c].getParamInformationString()).append(",");
}
return params.toString();
}
@Override
public String toString(){
return super.toString()+"\n"+this.getClassifierInfo();
}
public static void exampleUsage(String datasetName, int resampeId, String outputResultsDirName) throws Exception{
System.out.println("to do");
}
public static void main(String[] args) throws Exception{
ElasticEnsemble ee = new ElasticEnsemble();
Instances train = DatasetLoading.loadDataNullable("C:/users/sjx07ngu/dropbox/tsc problems/ItalyPowerDemand/ItalyPowerDemand_TRAIN");
Instances test = DatasetLoading.loadDataNullable("C:/users/sjx07ngu/dropbox/tsc problems/ItalyPowerDemand/ItalyPowerDemand_TEST");
ee.buildClassifier(train);
int correct = 0;
for(int i = 0; i < test.numInstances(); i++){
if(test.instance(i).classValue()==ee.classifyInstance(test.instance(i))){
correct++;
}
}
System.out.println("correct: "+correct+"/"+test.numInstances());
System.out.println((double)correct/test.numInstances());
System.out.println(ee.getTrainAcc());
}
}
| 22,608 | 36.494196 | 218 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/FastDTW.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
/*
Wrapper for a DTW implementation that speeds up the window size search
through caching
*/
package tsml.classifiers.distance_based;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher.FastWWSByPercent;
import tsml.classifiers.legacy.elastic_ensemble.fast_window_search.windowSearcher.WindowSearcher;
import weka.core.*;
/**
* Wrapper for Chan Wei's implementation of DTW
* @author ajb
*/
public class FastDTW extends EnhancedAbstractClassifier{
WindowSearcher ws;
protected ArrayList<Double> buildTimes;
public FastDTW(){
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
ws=new FastWWSByPercent();
}
@Override
public void buildClassifier(Instances data) throws Exception {
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
long startTime=System.nanoTime();
ws.buildClassifier(data);
trainResults.setBuildTime(System.nanoTime()-startTime);
Runtime rt = Runtime.getRuntime();
long usedBytes = (rt.totalMemory() - rt.freeMemory());
trainResults.setMemory(usedBytes);
}
public double classifyInstance(Instance data) throws Exception {
return ws.classifyInstance(data);
}
@Override
public String getParameters() {
String result="CVAcc,"+trainResults.getAcc()+",Memory,"+trainResults.getMemory();
result+=",WindowSize,"+ws.getBestWin()+",Score,"+ws.getBestScore();
return result;
}
}
| 2,359 | 31.777778 | 99 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/FastElasticEnsemble.java | /*
* Copyright (C) 2019 Chang Wei Tan
*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import experiments.data.DatasetLoading;
import tsml.classifiers.legacy.elastic_ensemble.fast_elastic_ensemble.utils.SequenceStatsCache;
import tsml.classifiers.legacy.elastic_ensemble.Efficient1NN;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.TechnicalInformation;
import java.io.File;
import java.util.ArrayList;
import java.util.Random;
import java.util.Scanner;
/**
* This is a method introduced in the paper
* FastEE: Fast Ensembles of Elastic Distances for Time Series Classification.
* It builds a nearest neighbour table that can be used to learn the optimal parameter for each distance
* measure.
* Once this table is built, learning the optimal parameter can be done at one pass.
* Please refer to the paper for more details about this table.
*
* @author Chang Wei (chang.tan@monash.edu)
*/
public class FastElasticEnsemble extends ElasticEnsemble {
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "C. Tan, F. Petitjean and G. Webb");
result.setValue(TechnicalInformation.Field.TITLE, "FastEE: Fast Ensembles of Elastic Distances for Time Series Classification");
result.setValue(TechnicalInformation.Field.JOURNAL, "Data Mining and Knowledge Discovery");
result.setValue(TechnicalInformation.Field.VOLUME, "XXX");
result.setValue(TechnicalInformation.Field.NUMBER, "XXX");
result.setValue(TechnicalInformation.Field.PAGES, "XXX");
result.setValue(TechnicalInformation.Field.YEAR, "2019");
return result;
}
public FastElasticEnsemble() {
this.classifiersToUse = ConstituentClassifiers.values();
}
public FastElasticEnsemble(ConstituentClassifiers[] classifiersToUse) {
this.classifiersToUse = classifiersToUse;
}
public FastElasticEnsemble(String resultsDir, String datasetName, int resampleId) {
this.resultsDir = resultsDir;
this.datasetName = datasetName;
this.resampleId = resampleId;
this.classifiersToUse = ConstituentClassifiers.values();
this.buildFromFile = true;
}
public FastElasticEnsemble(String resultsDir, String datasetName, int resampleId, ConstituentClassifiers[] classifiersToUse) {
this.resultsDir = resultsDir;
this.datasetName = datasetName;
this.resampleId = resampleId;
this.classifiersToUse = classifiersToUse;
this.buildFromFile = true;
}
/* @Override
public double getTrainAcc() {
if (this.ensembleCvAcc != -1 && this.ensembleCvPreds != null) {
return this.ensembleCvAcc;
}
this.getTrainPreds();
return this.ensembleCvAcc;
}
*/
@Override
public void buildClassifier(Instances train) throws Exception {
long t1= System.nanoTime();
this.train = train;
this.derTrain = null;
usesDer = false;
this.classifiers = new Efficient1NN[this.classifiersToUse.length];
this.cvAccs = new double[classifiers.length];
this.cvPreds = new double[classifiers.length][this.train.numInstances()];
for (int c = 0; c < classifiers.length; c++) {
classifiers[c] = getClassifier(this.classifiersToUse[c]);
if (isDerivative(this.classifiersToUse[c])) {
usesDer = true;
}
}
if (usesDer) {
this.derTrain = df.transform(train);
}
if (buildFromFile) {
File existingTrainOut;
Scanner scan;
int paramId;
double cvAcc;
for (int c = 0; c < classifiers.length; c++) {
existingTrainOut = new File(this.resultsDir + classifiersToUse[c] + "/Predictions/" + datasetName + "/trainFold" + this.resampleId + ".csv");
if (!existingTrainOut.exists()) {
throw new Exception("Error: training file doesn't exist for " + existingTrainOut.getAbsolutePath());
}
scan = new Scanner(existingTrainOut);
scan.useDelimiter("\n");
scan.next();//header
paramId = Integer.parseInt(scan.next().trim().split(",")[0]);
cvAcc = Double.parseDouble(scan.next().trim().split(",")[0]);
for (int i = 0; i < train.numInstances(); i++) {
this.cvPreds[c][i] = Double.parseDouble(scan.next().split(",")[1]);
}
scan.close();
if (isDerivative(classifiersToUse[c])) {
if (!isFixedParam(classifiersToUse[c])) {
classifiers[c].setParamsFromParamId(derTrain, paramId);
}
classifiers[c].buildClassifier(derTrain);
} else {
if (!isFixedParam(classifiersToUse[c])) {
classifiers[c].setParamsFromParamId(train, paramId);
}
classifiers[c].buildClassifier(train);
}
cvAccs[c] = cvAcc;
}
} else {
double[] cvAccAndPreds;
for (int c = 0; c < classifiers.length; c++) {
if (writeToFile) {
classifiers[c].setFileWritingOn(this.resultsDir, this.datasetName, this.resampleId);
}
if (isFixedParam(classifiersToUse[c])) {
if (isDerivative(classifiersToUse[c])) {
cvAccAndPreds = classifiers[c].loocv(derTrain);
} else {
cvAccAndPreds = classifiers[c].loocv(train);
}
} else if (isDerivative(classifiersToUse[c])) {
cvAccAndPreds = classifiers[c].fastParameterSearch(derTrain);
} else {
cvAccAndPreds = classifiers[c].fastParameterSearch(train);
}
cvAccs[c] = cvAccAndPreds[0];
for (int i = 0; i < train.numInstances(); i++) {
this.cvPreds[c][i] = cvAccAndPreds[i + 1];
}
}
/*
if (this.writeEnsembleTrainingFile) {
StringBuilder output = new StringBuilder();
double[] ensembleCvPreds = this.getTrainPreds();
output.append(train.relationName()).append(",FastEE,train\n");
output.append(this.getParameters()).append("\n");
output.append(trainResults.getAcc()).append("\n");
for (int i = 0; i < train.numInstances(); i++) {
output.append(train.instance(i).classValue()).append(",").append(ensembleCvPreds[i]).append("\n");
}
FileWriter fullTrain = new FileWriter(this.ensembleTrainFilePathAndName);
fullTrain.append(output);
fullTrain.close();
}
*/
}
trainResults.setBuildTime(System.nanoTime() - t1);
}
// classify instance with lower bounds
public double classifyInstance(final Instance instance, final int queryIndex, final SequenceStatsCache cache) throws Exception{
if(classifiers==null){
throw new Exception("Error: classifier not built");
}
Instance derIns = null;
if(this.usesDer){
Instances temp = new Instances(derTrain,1);
temp.add(instance);
temp = df.transform(temp);
derIns = temp.instance(0);
}
double bsfVote = -1;
double[] classTotals = new double[train.numClasses()];
ArrayList<Double> bsfClassVal = null;
double pred;
this.previousPredictions = new double[this.classifiers.length];
for(int c = 0; c < classifiers.length; c++){
if(isDerivative(classifiersToUse[c])){
pred = classifiers[c].classifyInstance(derTrain, derIns, queryIndex, cache);
}else{
pred = classifiers[c].classifyInstance(train, instance, queryIndex, cache);
}
previousPredictions[c] = pred;
try{
classTotals[(int)pred] += cvAccs[c];
}catch(Exception e){
System.out.println("cv accs "+cvAccs.length);
System.out.println(pred);
throw e;
}
if(classTotals[(int)pred] > bsfVote){
bsfClassVal = new ArrayList<>();
bsfClassVal.add(pred);
bsfVote = classTotals[(int)pred];
}else if(classTotals[(int)pred] == bsfVote){
bsfClassVal.add(pred);
}
}
if(bsfClassVal.size()>1){
return bsfClassVal.get(new Random(46).nextInt(bsfClassVal.size()));
}
return bsfClassVal.get(0);
}
private String getClassifierInfo() {
StringBuilder st = new StringBuilder();
st.append("FastEE using:\n");
st.append("=====================\n");
for (int c = 0; c < classifiers.length; c++) {
st.append(classifiersToUse[c]).append(" ").append(classifiers[c].getClassifierIdentifier()).append(" ").append(cvAccs[c]).append("\n");
}
return st.toString();
}
@Override
public String toString() {
return super.toString() + "\n" + this.getClassifierInfo();
}
public static void main(String[] args) throws Exception {
FastElasticEnsemble ee = new FastElasticEnsemble();
Instances train = DatasetLoading.loadDataNullable("C:/Users/cwtan/workspace/Dataset/TSC_Problems/ArrowHead/ArrowHead_TRAIN");
Instances test = DatasetLoading.loadDataNullable("C:/Users/cwtan/workspace/Dataset/TSC_Problems/ArrowHead/ArrowHead_TEST");
ee.buildClassifier(train);
SequenceStatsCache cache = new SequenceStatsCache(test, test.numAttributes() - 1);
System.out.println("Train Acc: " + ee.trainResults.getAcc());
int correct = 0;
for (int i = 0; i < test.numInstances(); i++) {
double actual = test.instance(i).classValue();
double pred = ee.classifyInstance(test.instance(i), i, cache);
if (actual == pred) {
correct++;
}
}
System.out.println("Test Acc: " + (double) correct / test.numInstances());
System.out.println("Test Acc -- correct: " + correct + "/" + test.numInstances());
}
}
| 11,433 | 39.119298 | 157 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/NN_CID.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import experiments.data.DatasetLists;
import utilities.ClassifierTools;
import machine_learning.classifiers.kNN;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import tsml.classifiers.SaveParameterInfo;
import weka.core.EuclideanDistance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.TechnicalInformation;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW;
import weka.core.neighboursearch.PerformanceStats;
import java.util.concurrent.TimeUnit;
/**
*Implementation of the algorithm described in
*
@inproceedings{batista11cid,
author="G. Batista and X. Wang and E. Keogh ",
title="A Complexity-Invariant Distance Measure for Time Series",
booktitle ="Proceedings of the 11th {SIAM} International Conference on Data Mining (SDM)",
year="2011"
}
and
@inproceedings{batista14cid,
author="G. Batista and E. Keogh and O. Tataw and X. Wang ",
title="{CID}: an efficient complexity-invariant distance for time series",
journal={Data Mining and Knowledge Discovery},
volume={28},
pages="634--669",
year={2014}
}
The distance measure CID(Q,C)=ED(Q,C) × CF(Q,C),
where ED is the Eucidean distance and
CF(Q,C) = max (CE(Q),CE(C))
min (CE(Q),CE(C))
ie the ratio of complexities. In the paper,
*
* @author ajb
*/
public class NN_CID extends kNN implements SaveParameterInfo{
protected ClassifierResults res =new ClassifierResults();
CIDDistance cid=new CIDDistance();
public NN_CID(){
super();
cid=new CIDDistance();
}
public void useDTW(){
cid=new CIDDTWDistance();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(TechnicalInformation.Type.ARTICLE);
result.setValue(TechnicalInformation.Field.AUTHOR, "G. Batista, E. Keogh, O. Tataw and X. Wang");
result.setValue(TechnicalInformation.Field.YEAR, "2014");
result.setValue(TechnicalInformation.Field.TITLE, "CID: an efficient complexity-invariant distance for time series");
result.setValue(TechnicalInformation.Field.JOURNAL, "Data Mining and Knowledge Discovery");
result.setValue(TechnicalInformation.Field.VOLUME, "28");
result.setValue(TechnicalInformation.Field.NUMBER, "3");
result.setValue(TechnicalInformation.Field.PAGES, "634--669");
return result;
}
//<editor-fold defaultstate="collapsed" desc="problems used in DAMI paper">
public static String[] problems={
"FiftyWords",
"Adiac",
"Beef",
"CBF",
"ChlorineConcentration",
"CinCECGtorso",
"Coffee",
"CricketX",
"CricketY",
"CricketZ",
"DiatomSizeReduction",
"ECG200",
"ECGFiveDays",
"FaceAll",
"FaceFour",
"FacesUCR",
"Fish",
"GunPoint",
"Haptics",
"InlineSkate",
"ItalyPowerDemand",
"Lightning2",
"Lightning7",
"Mallat",
"MedicalImages",
"Motes",
"OSULeaf",
"OliveOil",
"SonyAIBORobotSurface1",
"SonyAIBORobotSurface2",
"StarLightCurves",
"SwedishLeaf",
"Symbols",
"SyntheticControl",
"Trace",
"TwoLeadECG",
"TwoPatterns",
"Wafer",
"WordsSynonyms",
"Yoga",
"uWaveGestureLibraryX",
"uWaveGestureLibraryY",
"uWaveGestureLibraryZ"
};
//</editor-fold>
//<editor-fold defaultstate="collapsed" desc="ACCURACY for CID DTW reported in DAMI paper">
static double[] reportedResults={
0.7736,
0.6215,
0.5333,
0.9989,
0.6487,
0.9457,
0.8214,
0.7513,
0.8026,
0.7949,
0.9346,
0.8900,
0.7816,
0.8556,
0.8750,
0.8985,
0.8457,
0.9267,
0.4286,
0.4145,
0.9563,
0.8689,
0.7397,
0.9254,
0.7421,
0.7955,
0.6281,
0.8333,
0.8153,
0.8772,
0.9343,
0.8832,
0.9407,
0.9733,
0.9900,
0.8622,
0.9958,
0.9945,
0.7571,
0.8443,
0.7889,
0.7217,
0.7066
};
//</editor-fold>
@Override
public String getParameters() {
return "BuildTime,"+res.getBuildTime();
}
@Override
public void buildClassifier(Instances train) throws Exception {
long startTime=System.nanoTime();
this.setDistanceFunction(cid);
// cid.setInstances(train);
super.buildClassifier(train);
try {
res.setTimeUnit(TimeUnit.NANOSECONDS);
res.setBuildTime(System.nanoTime()-startTime);
} catch (Exception e) {
System.err.println("Inheritance preventing me from throwing this error...");
System.err.println(e);
}
}
public static class CIDDistance extends EuclideanDistance {
@Override
public double distance(Instance one, Instance two){
return this.distance(one, two, Double.MAX_VALUE);
}
@Override
public double distance(Instance one, Instance two, double cutoff, PerformanceStats stats){
return this.distance(one,two,cutoff);
}
@Override
public double distance(Instance first, Instance second, double cutoff){
double d=0;
//Find the acf terms
double d1=0,d2=0;
double[] data1=first.toDoubleArray();
double[] data2=second.toDoubleArray();
for(int i=0;i<first.numAttributes()-1;i++)
d+=(data1[i]-data2[i])*(data1[i]-data2[i]);
d=Math.sqrt(d);
for(int i=0;i<first.numAttributes()-2;i++)
d1+=(data1[i]-data1[i+1])*(data1[i]-data1[i+1]);
for(int i=0;i<first.numAttributes()-2;i++)
d2+=(data2[i]-data2[i+1])*(data2[i]-data2[i+1]);
d1=Math.sqrt(d1+0.001); //This is from theircode
d2=Math.sqrt(d2+0.001); //This is from theircode
if(d1<d2){
double temp=d1;
d1=d2;
d2=temp;
}
d=Math.sqrt(d);
d=d*(d1/d2);
return d;
}
}
public static class CIDDTWDistance extends CIDDistance {
DTW dtw = new DTW();
@Override
public double distance(Instance one, Instance two){
return this.distance(one, two, Double.MAX_VALUE);
}
@Override
public double distance(Instance one, Instance two, double cutoff, PerformanceStats stats){
return this.distance(one,two,cutoff);
}
@Override
public double distance(Instance first, Instance second, double cutoff){
double d=0;
//Find the acf terms
double d1=0,d2=0;
double[] data1=first.toDoubleArray();
double[] data2=second.toDoubleArray();
d=dtw.distance(first, second);
for(int i=0;i<first.numAttributes()-2;i++)
d1+=(data1[i]-data1[i+1])*(data1[i]-data1[i+1]);
for(int i=0;i<first.numAttributes()-2;i++)
d2+=(data2[i]-data2[i+1])*(data2[i]-data2[i+1]);
d1=Math.sqrt(d1)+0.001; //This is from theircode
d2=Math.sqrt(d2)+0.001; //This is from theircode
if(d1<d2){
double temp=d1;
d1=d2;
d2=temp;
}
d=d*(d1/d2);
return d;
}
}
public static void recreateDTWDistance(String problemPath){
int c=0;
for(String s:DatasetLists.tscProblems46){
kNN k= new kNN(1);
NN_CID k2= new NN_CID();
k2.useDTW();
Instances train=DatasetLoading.loadDataNullable(problemPath+s+"\\"+s+"_TRAIN");
Instances test=DatasetLoading.loadDataNullable(problemPath+s+"\\"+s+"_TEST");
try {
k.buildClassifier(train);
k2.buildClassifier(train);
} catch (Exception e) {
e.printStackTrace();
}
double a1=ClassifierTools.accuracy(test, k);
double a2=ClassifierTools.accuracy(test, k2);
System.out.println(s+","+a1+","+a2);
if(a2>a1)
c++;
}
System.out.println("CID Better on "+c+" out of "+DatasetLists.tscProblems46.length);
}
public static void recreateEuclideanDistance(String problemPath){
int c=0;
for(String s:DatasetLists.tscProblems46){
kNN k= new kNN(1);
NN_CID k2= new NN_CID();
Instances train=DatasetLoading.loadDataNullable(problemPath+s+"\\"+s+"_TRAIN");
Instances test=DatasetLoading.loadDataNullable(problemPath+s+"\\"+s+"_TEST");
try {
k.buildClassifier(train);
k2.buildClassifier(train);
} catch (Exception e) {
e.printStackTrace();
}
double a1=ClassifierTools.accuracy(test, k);
double a2=ClassifierTools.accuracy(test, k2);
System.out.println(s+","+a1+","+a2);
if(a2>a1)
c++;
}
System.out.println("CID Better on "+c+" out of "+DatasetLists.tscProblems46.length);
}
public static void main(String[]args){
recreateEuclideanDistance("Z:\\ArchiveData\\univariate_arff\\");
// recreateDTWDistance();
}
int[][] DTWOptimalWindows={
{4,0,1,0,1,2,0,1,1,0,1,1,0,2,1,0,1,1,0,1,1,1,1,1,1,1,1,0,0,1,4,0,0,1,1,0,1,1,0,1,1,1,1,1,2,1,1,1,3,1,0,1,0,0,1,0,1,1,1,1,1,1,1,1,1,1,1,0,2,0,4,1,1,4,0,1,1,1,0,4,1,6,0,0,0,5,0,1,4,0,1,1,1,1,1,3,1,0,1,1},
{0,0,0,0,0,0,0,0,0,0,0,1,3,2,14,0,14,0,1,1,0,0,1,0,1,0,0,1,0,0,3,0,0,1,2,0,1,1,0,1,1,0,1,0,1,2,0,0,2,2,0,0,2,0,0,0,1,0,0,3,2,0,0,0,0,4,0,0,0,0,0,0,0,1,3,2,0,1,1,0,0,1,0,0,1,3,0,0,0,0,8,0,0,0,0,0,1,1,0,1},
{0,1,7,1,0,0,3,1,0,1,1,0,1,1,1,0,1,2,0,0,1,1,0,0,0,1,0,1,1,1,0,0,0,0,1,1,0,1,0,0,0,2,0,0,1,0,0,0,3,1,3,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,3,1,0,0,0,0,1,1,1,1,2,1,0,1,1,1,1,0,1,2,0,0,1,0,1,0,0,0,1,1,1,0,1,1},
{8,6,6,9,6,9,9,2,23,6,9,20,4,8,4,4,8,8,2,8,6,9,11,9,6,2,9,5,8,8,9,4,8,6,8,2,5,14,0,8,10,3,7,8,2,2,11,6,9,5,7,6,5,0,6,9,9,3,9,9,9,7,7,8,13,2,10,8,9,8,9,9,6,9,5,9,9,6,9,2,7,9,3,2,9,7,9,11,6,9,3,7,4,8,0,8,0,8,6,10},
{7,15,6,8,7,5,7,11,8,1,9,5,7,8,4,5,8,9,4,6,6,5,9,5,7,3,3,5,7,9,7,0,9,7,5,2,5,10,0,7,6,8,8,6,7,5,6,6,6,9,9,11,7,4,9,7,5,8,9,7,5,8,15,13,9,7,9,8,11,6,8,10,6,5,5,8,10,6,8,11,9,4,5,4,5,8,9,6,2,8,5,6,5,8,10,2,8,8,4,8},
{1,0,0,4,0,0,2,3,1,0,2,0,4,1,6,2,1,2,1,0,0,4,1,1,2,4,0,1,0,4,4,1,0,2,2,4,1,2,3,2,2,13,0,1,1,3,7,0,3,2,0,1,0,0,1,5,1,5,1,1,0,1,0,1,0,1,0,1,2,4,1,6,0,1,0,10,2,3,0,1,0,0,6,1,0,0,1,0,0,2,1,0,0,4,0,4,4,1,1,1},
{11,8,0,7,6,15,4,4,20,12,7,5,9,8,7,21,17,14,7,8,5,9,5,3,7,5,7,9,8,10,9,9,13,7,5,3,13,6,6,8,11,14,9,8,8,4,8,6,1,5,15,12,9,4,8,1,9,9,18,15,8,8,15,4,18,11,5,7,6,10,8,15,7,5,6,7,9,12,11,3,18,8,7,5,4,2,9,4,8,3,2,5,13,7,12,5,2,7,8,8},
{0,0,0,0,0,0,0,0,1,0,0,0,1,0,2,0,0,0,0,0,2,0,2,0,0,0,0,11,0,1,0,0,18,0,2,0,0,0,0,0,0,4,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,2,0,0,0,1,2,0,1,0,0,0,0,0},
{1,1,1,2,1,1,2,1,1,0,1,0,1,1,2,1,1,1,1,2,2,2,0,2,0,2,1,2,1,1,3,2,1,1,0,1,2,2,1,1,2,0,2,4,3,1,1,1,1,2,1,1,1,2,2,0,3,0,2,1,0,1,0,1,0,0,2,0,2,2,2,1,4,1,2,1,1,2,1,0,1,1,2,2,1,1,2,1,3,1,1,2,1,1,2,2,1,1,1,1},
{0,0,3,0,0,0,0,0,0,0,0,0,0,0,2,0,0,3,0,0,0,3,0,0,4,3,0,0,0,0,3,0,0,0,0,0,0,3,2,4,0,0,0,0,3,0,1,0,0,0,0,0,0,2,0,0,0,0,3,3,4,0,0,2,4,0,0,0,0,0,3,0,2,0,0,3,3,0,0,0,0,0,0,3,0,2,0,0,0,0,0,0,1,4,4,0,4,0,0,0},
{13,32,17,38,32,32,55,42,37,13,54,52,62,38,41,62,22,17,22,71,20,8,52,45,49,46,32,20,38,13,21,8,20,23,7,39,17,12,21,20,18,48,32,21,10,17,21,32,31,47,17,48,19,43,22,45,26,12,20,40,25,20,32,34,12,64,40,34,48,91,26,13,22,54,86,11,21,86,86,36,31,46,32,20,21,25,17,40,40,18,21,10,11,54,9,86,21,20,12,47},
{8,10,6,10,11,9,14,8,9,13,13,21,14,14,12,11,10,6,8,11,7,8,10,14,15,5,9,7,5,7,8,13,9,5,10,13,9,5,13,11,11,11,7,12,7,11,14,9,9,7,7,8,7,6,8,13,13,13,16,8,14,11,7,11,10,8,4,11,10,6,8,10,8,10,11,10,10,8,6,6,8,11,7,13,14,7,10,8,7,11,7,11,10,11,6,14,9,8,11,7},
{16,11,10,6,9,7,11,11,16,10,6,7,5,11,6,10,20,11,13,10,10,14,11,10,8,14,8,12,12,7,11,6,12,10,11,12,13,14,9,9,12,11,9,8,20,13,14,10,4,5,15,8,12,8,11,13,8,6,8,11,8,12,22,9,9,10,12,5,22,17,11,10,8,22,7,15,7,7,11,9,11,12,7,10,11,10,10,10,14,10,5,6,9,11,11,16,11,13,6,10},
{5,7,5,13,11,7,17,9,8,6,8,16,12,11,5,5,5,12,7,12,10,8,12,5,6,10,7,8,4,8,7,11,6,16,16,7,7,10,13,15,5,6,14,9,7,8,15,5,14,15,7,8,14,7,12,12,14,8,6,8,13,12,10,11,12,12,10,14,11,6,10,12,5,8,5,5,7,7,12,8,15,7,4,8,9,9,7,8,9,9,8,9,5,9,5,10,10,16,6,5},
{0,1,1,0,0,0,0,0,0,0,0,1,0,0,0,1,1,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0},
{2,4,0,2,3,3,2,2,2,4,3,6,3,2,6,2,8,3,3,6,3,2,3,3,3,3,6,9,8,4,3,2,2,2,4,0,3,3,3,2,3,6,2,2,8,2,9,3,6,2,3,8,6,0,3,3,9,3,0,3,3,3,2,3,2,2,2,3,6,8,2,3,3,2,3,2,2,2,2,8,3,0,3,3,6,2,3,2,4,6,2,0,2,7,2,2,6,4,4,2},
{0,0,4,6,2,6,6,6,2,7,7,3,3,3,0,3,6,6,4,7,2,0,0,0,0,3,6,3,3,3,6,3,3,0,8,6,3,6,6,6,4,7,4,2,6,9,0,0,7,6,4,0,3,0,0,4,6,3,0,0,0,3,0,3,6,4,3,0,9,3,4,0,6,9,3,3,7,4,6,0,0,0,3,7,0,2,6,3,6,9,9,6,6,3,2,6,0,0,3,0},
{0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,4,4,0,4,0,4,4,0,0,0,6,0,0,0,0,0,0,3,6,3,3,0,0,0,0,4,4,0,0,0,0,0,3,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,4,0,0,0,0,3,0,0,0,3,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0},
{7,12,4,4,17,20,4,3,24,4,1,4,7,17,4,4,4,6,6,5,4,0,22,18,4,24,6,4,8,4,8,13,3,16,4,4,15,4,4,6,4,14,4,4,20,4,4,17,7,10,20,4,19,7,7,4,4,1,0,6,4,24,4,4,4,3,3,4,7,20,4,28,7,4,4,15,4,7,15,4,17,4,4,7,0,20,17,4,14,4,15,4,4,6,6,20,4,4,15,3},
{0,0,0,0,0,0,0,7,3,0,0,0,0,0,0,0,0,2,2,0,0,0,2,2,0,3,2,3,2,2,0,3,0,0,2,0,0,22,2,0,0,2,0,0,3,2,2,0,2,0,2,0,0,0,0,2,0,0,0,2,0,2,0,0,2,0,2,0,2,3,2,2,0,4,2,0,0,0,2,0,2,2,5,0,3,2,5,0,0,5,2,0,0,0,2,3,15,0,0,0},
{1,12,1,0,2,1,5,16,0,3,1,1,17,2,12,10,1,3,3,2,1,2,0,5,1,26,0,13,61,11,2,0,4,24,2,13,28,1,0,2,4,3,1,9,0,2,0,2,18,8,3,1,10,6,13,33,9,1,0,0,6,0,0,1,2,17,0,11,0,1,1,2,6,6,14,0,14,0,6,3,1,1,1,0,1,3,0,4,4,0,0,27,28,53,3,0,0,0,0,1},
{0,2,0,1,0,1,2,3,2,0,1,1,1,0,2,0,0,1,1,0,1,3,2,1,2,0,1,1,0,1,0,1,1,0,1,1,1,1,1,3,1,1,1,0,0,1,1,1,1,0,2,1,1,1,0,2,1,0,1,1,0,1,3,1,1,2,1,2,1,4,1,2,1,0,1,0,0,3,3,0,0,2,2,1,6,0,6,2,0,5,1,1,0,1,1,3,1,2,4,5},
{14,11,12,10,10,14,11,13,11,10},
{4,4,3,6,3,3,4,3,4,4,3,4,2,5,3,2,5,3,2,6,9,5,2,3,3,5,4,5,3,5,3,3,3,3,4,4,4,5,4,4,3,3,4,2,3,3,4,4,4,3,4,2,3,3,2,4,4,5,5,3,3,3,2,3,2,4,3,5,4,3,3,4,4,2,4,5,2,4,3,5,4,6,3,4,3,4,3,5,6,5,5,4,4,4,2,3,4,3,4,4},
{2,3,5,5,0,3,5,7,11,3,0,6,4,3,2,0,4,3,1,3,3,4,3,2,5,1,3,3,1,4,1,2,0,2,2,2,3,2,3,6,1,2,1,1,2,2,1,0,1,4,2,0,4,1,2,3,4,6,7,5,3,5,2,6,6,3,3,4,2,7,6,3,1,5,2,2,2,3,1,5,5,1,4,3,3,2,4,6,0,3,3,2,3,5,2,3,4,12,3,3},
{3,4,11,5,5,4,4,4,4,4,6,6,3,4,5,4,3,4,4,6,3,4,3,5,3,4,4,13,5,4,2,4,8,4,4,6,6,6,11,4,5,4,6,4,4,3,6,5,5,5,3,6,4,12,4,14,3,5,6,5,4,4,4,5,3,3,8,3,8,6,8,3,4,5,7,4,3,8,4,4,4,4,3,4,6,10,4,5,5,4,6,6,3,5,3,7,5,4,4,5},
{9,6,6,5,8,6,5,8,6,6,7,6,6,6,6,5,9,5,9,8,7,5,8,6,5,6,6,6,6,6,6,7,6,6,6,7,5,6,6,6,7,7,6,5,6,5,5,7,6,6,6,6,5,6,5,9,6,5,7,8,9,5,5,6,10,8,6,6,5,3,6,6,6,5,6,7,6,6,5,6,6,6,8,6,6,6,6,5,6,5,6,6,6,8,9,6,5,6,6,7},
{4,0,2,1,1,1,1,2,1,1,2,2,2,1,1,1,1,1,0,1,2,1,1,1,1,1,0,1,1,2,1,1,2,1,1,2,1,0,2,6,1,0,1,1,0,0,4,0,4,1,1,1,6,2,1,1,1,2,2,0,1,1,2,1,1,1,1,3,1,4,0,1,2,1,2,1,2,1,2,1,1,1,1,2,1,5,1,1,3,1,1,1,1,0,1,1,1,1,1,0},
{1,1,1,1,1,1,1,1,1,1},
{1,1,1,1,1,1,1,2,1,1},
{0,4,5,3,5,0,3,2,4,3,3,2,1,3,6,6,3,4,6,2,5,6,5,5,3,3,3,3,4,4,1,3,6,4,3,5,0,5,5,2,6,5,5,3,3,5,5,1,7,2,5,4,0,5,3,7,2,1,5,2,3,4,3,5,1,2,5,4,3,6,5,3,5,3,3,0,5,5,4,4,4,3,5,3,4,1,7,1,4,3,5,2,2,3,4,3,4,5,0,3},
{0,1,2,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,4,2,0,0,0,2,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,2,0,2,0,0,1,0,0,1,1,1,0,0,2,0,0,1,0,0,7,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,6,0,0,0,0,2,0,0,0,0,2,1,4,0,0,0,0,0},
{0,1,1,1,1,1,6,6,2,6},
{2,2,3,3,4,3,3,17,3,0,5,3,4,2,30,5,1,3,5,4,4,3,4,2,3,3,0,3,1,2,8,4,5,1,6,5,4,5,7,0,2,3,9,3,2,3,2,6,8,6,6,5,5,3,3,0,4,5,5,6,2,2,4,0,5,1,4,2,2,4,0,4,4,2,9,3,3,8,3,3,5,1,2,2,6,4,7,4,3,0,9,1,6,5,6,5,4,5,6,6},
{5,1,0,6,4,3,3,1,6,3,1,3,3,4,2,4,2,3,4,3,2,0,4,1,2,2,3,5,3,3,0,7,6,2,2,5,2,5,0,1,3,1,3,5,2,5,2,5,2,5,2,4,1,3,3,3,6,5,2,1,5,0,0,5,2,5,4,2,3,2,2,0,1,1,8,3,4,3,4,1,2,2,5,2,4,3,5,3,1,1,8,3,8,5,2,5,3,3,2,3},
{14,1,5,3,3,3,17,4,5,7,12,6,7,11,10,35,8,13,2,7,5,17,1,3,11,19,4,6,9,3,2,6,13,3,3,24,3,4,3,7,2,7,4,4,14,9,21,8,14,0,5,2,3,11,4,9,2,2,3,5,12,5,4,3,14,6,3,4,4,4,4,2,9,20,10,4,9,5,15,4,12,13,3,6,4,5,8,4,7,8,5,10,10,21,2,3,13,11,4,5},
{1,3,1,0,5,1,3,2,1,1,2,0,2,4,0,0,2,8,1,0,4,5,2,0,2,0,1,1,5,4,1,0,2,3,0,1,1,3,2,2,1,2,3,1,3,4,0,0,1,0,5,1,2,2,1,3,3,0,2,4,1,3,1,4,0,1,2,0,3,3,6,1,1,2,1,0,4,3,2,2,2,2,0,2,5,4,0,0,0,0,3,1,0,1,1,3,2,5,2,0},
{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,5,0,0,0,0,0,0,0,0,0,0,0,5,5,0,5,0,0,0,5,0,5,0,0,0,0,0,0,0,5,5,0,0,0,0,5,0,0,5,0,0,0,0,9,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,9,0,0,0,0,0,5,17,0,0,0,0,0},
{94,65,30,94,46,31,42,52,38,41,65,46,73,94,51,86,29,40,49,19,23,79,34,49,51,27,53,49,39,21,20,48,49,36,94,24,43,49,19,86,86,33,38,69,95,39,46,35,79,22,39,29,94,29,51,24,44,29,50,29,69,42,80,94,21,95,21,49,96,39,29,67,45,40,41,94,35,39,42,77,67,29,69,38,51,77,67,46,39,27,53,47,69,95,44,34,24,40,95,20},
{6,15,8,4,17,2,24,17,3,9,6,12,11,6,9,12,40,20,18,4,12,16,13,11,8,11,5,24,4,12,6,10,3,20,6,21,5,6,9,12,16,4,14,9,5,7,6,2,6,12,6,20,17,18,13,6,10,13,6,20,13,4,4,5,11,4,24,6,4,51,5,6,5,14,6,12,6,11,12,12,12,2,46,21,15,33,2,12,3,11,5,22,13,5,14,13,6,6,5,6},
{5,5,11,5,6,18,16,13,17,18,9,5,4,6,8,3,6,7,6,5,2,6,5,7,5,6,6,11,3,8,5,4,6,3,4,9,12,8,12,8,3,17,4,7,7,14,6,4,8,7,3,6,6,4,12,15,16,16,13,11,11,4,18,12,4,3,5,11,38,16,11,24,15,6,7,8,5,8,4,5,13,12,5,18,12,9,7,4,5,6,6,5,35,7,11,6,4,18,18,4},
{0,2,1,0,0,2,2,3,1,0,1,0,1,1,0,5,3,7,0,5,2,5,0,0,6,3,4,3,4,0,0,4,6,1,0,1,0,3,3,3,5,5,0,1,1,2,4,2,3,0,2,0,5,2,3,0,1,2,0,0,0,0,3,0,2,7,3,2,1,4,2,1,0,0,1,2,0,0,4,2,1,1,0,0,0,2,3,3,2,0,4,0,1,1,0,0,6,3,0,3},
{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,2,0,0},
{21,14,9,8,11,10,13,22,10,10,11,4,8,2,8,8,15,3,12,4,4,11,9,8,5,3,9,14,18,8,12,18,29,6,12,3,10,9,6,10,10,7,4,18,8,3,7,10,12,43,12,17,16,5,10,10,9,21,10,6,10,8,13,9,2,11,11,8,11,5,10,12,5,9,4,3,12,10,6,18,27,17,22,28,13,10,6,10,5,8,18,10,8,12,17,2,13,7,17,10},
{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0},
{0,0,0,0,2,0,2,0,2,0,2,2,2,0,0,2,0,3,0,0,0,2,0,0,0,2,0,8,8,2,2,2,0,2,2,2,2,0,7,2,0,8,0,0,2,0,8,0,4,0,2,2,0,2,0,2,2,2,0,2,2,2,0,0,2,0,0,0,0,0,2,0,2,2,2,0,3,2,0,2,8,3,0,0,2,0,2,0,0,0,2,0,0,0,0,0,0,0,0,8},
{4,7,4,2,7,7,4,2,4,0,3,2,2,7,4,7,4,0,7,0,2,0,0,3,7,4,7,4,2,2,4,4,3,0,7,0,4,7,0,0,7,2,2,0,0,4,2,7,3,7,7,7,2,3,4,4,7,7,4,2,2,2,2,0,2,0,4,7,2,7,0,0,3,4,7,0,2,4,7,0,4,0,7,0,7,7,0,0,2,2,7,4,0,2,0,2,0,3,2,3},
{2,8,0,2,2,11,4,8,6,11,5,0,0,9,16,0,52,0,0,3,0,4,0,8,5,2,0,17,0,2,2,8,24,3,0,0,9,14,17,0,9,3,27,0,0,0,11,5,0,10,0,10,0,9,2,6,9,3,0,14,0,5,12,4,0,9,2,0,3,8,0,3,16,0,0,4,3,2,15,0,0,0,4,0,30,2,0,3,6,6,0,9,16,17,18,10,10,4,4,0},
{0,0,1,0,0,0,0,0,0,0},
{0,0,0,0,0,0,0,0,0,0},
{0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0},
{6,7,6,7,6,8,3,11,5,7,3,6,11,11,5,5,7,4,7,4,8,5,9,5,5,3,2,4,6,5,6,4,5,7,4,19,6,6,6,4,10,6,4,5,6,6,7,4,11,2,10,5,5,2,9,8,25,11,7,6,5,10,7,6,10,7,7,5,5,6,5,6,4,9,11,5,4,26,4,5,6,3,4,15,12,5,5,4,7,4,5,6,6,13,10,6,6,5,17,8},
{0,4,0,2,0,0,2,0,0,0,0,0,2,0,0,2,2,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,2,2,0,2,0,0,2,0,0,0,0,4,0,0,0,0,0,2,0,2,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,2,0,0,0,2,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0},
{14,13,15,14,21,21,9,26,19,36,23,9,5,9,5,13,14,22,17,9,28,7,4,21,10,55,29,9,8,17,11,19,9,20,14,33,12,9,4,27,9,9,6,12,14,7,5,7,20,7,14,11,12,5,17,23,15,9,20,4,10,5,13,5,7,29,19,57,8,19,6,8,14,25,6,9,3,14,25,27,10,5,2,27,53,9,3,14,13,22,8,8,9,4,1,11,26,4,6,9},
{6,6,5,5,5,6,7,5,3,5,5,5,6,7,5,5,6,2,6,5,5,2,6,5,4,5,5,6,6,5,6,3,5,2,7,6,4,5,5,6,4,5,3,4,3,6,6,3,2,7,3,3,6,5,2,7,6,6,7,7,5,6,5,4,5,5,5,2,3,7,3,7,6,6,7,5,1,6,5,3,6,4,3,5,6,5,6,6,5,5,4,6,3,3,4,5,3,6,3,3},
{2,0,2,3,2,2,2,0,2,0,3,0,2,2,3,4,2,0,0,4,2,0,0,2,2,0,0,0,2,0,2,0,2,0,2,2,0,2,2,0,0,2,3,3,0,4,3,2,2,2,2,0,4,0,2,3,2,2,2,0,2,0,2,0,0,2,0,6,3,2,3,0,2,2,3,2,2,0,0,0,3,3,0,2,2,4,2,2,2,0,2,2,2,0,4,0,0,0,2,2},
{0,0,0,2,0,6,6,6,0,2,2,0,0,2,3,0,0,2,0,0,0,0,3,0,2,0,0,0,0,0,0,0,6,4,0,0,0,0,0,6,0,0,2,2,0,0,0,6,6,0,0,0,0,0,2,0,0,4,6,2,6,0,0,0,6,0,0,0,0,0,6,0,4,0,0,3,0,3,4,0,0,0,0,0,0,2,0,0,2,0,2,0,0,0,4,0,2,4,4,0},
{3,2,4,3,4,3,3,3,4,3,4,4,4,2,4,3,0,4,2,3,3,2,4,3,6,0,4,4,6,3,4,0,4,0,3,4,3,4,2,2,2,2,2,2,2,2,4,2,3,3,0,3,2,2,4,2,3,4,3,0,0,3,4,3,3,0,3,4,0,4,2,2,4,2,3,2,3,2,3,0,4,4,3,2,2,3,4,0,4,3,3,3,2,3,2,3,3,3,2,3},
{8,43,5,4,7,5,8,23,10,36,37,7,4,2,5,4,5,5,36,22,23,5,8,6,5,6,7,4,7,7,10,5,2,9,8,22,10,5,9,5,7,5,22,7,9,28,7,5,23,9,20,43,5,23,7,7,3,5,5,43,4,9,4,5,5,6,5,5,8,4,4,33,8,5,5,3,5,4,5,6,40,4,5,5,9,5,15,24,4,7,23,5,7,21,5,7,4,7,8,22},
{17,2,39,33,4,13,29,2,31,69,38,13,4,5,2,9,29,21,30,1,42,6,1,5,1,18,18,42,13,38,11,16,11,16,4,30,29,13,39,12,77,11,12,16,44,9,1,27,10,7,16,32,21,38,12,21,1,4,23,33,2,8,9,32,57,9,16,10,3,16,2,37,46,28,10,19,9,2,1,4,13,11,15,13,11,2,22,17,22,23,33,11,21,10,15,6,2,1,33,1},
{2,1,3,4,12,7,0,13,3,4,4,0,6,1,6,3,1,1,3,0,8,3,2,1,1,0,3,4,1,2,4,8,8,4,4,5,3,2,8,3,5,2,13,4,0,1,3,3,4,9,2,2,7,1,3,3,3,10,13,6,7,3,8,3,4,10,1,5,2,3,8,5,6,11,3,3,4,5,9,2,0,4,7,3,3,11,7,6,1,1,12,3,6,15,4,3,3,1,3,12},
{4,5,8,4,3,5,5,6,8,5,5,4,7,4,5,6,5,7,6,9,6,8,5,4,5,6,4,7,6,7,7,5,4,4,5,9,5,5,5,9,7,4,6,5,4,4,7,5,2,4,8,4,5,8,6,5,5,5,5,5,5,5,5,9,7,4,4,8,8,3,4,5,5,8,5,5,5,4,4,4,5,8,6,6,5,9,5,4,4,7,6,8,6,6,4,4,4,4,7,7},
{15,6,32,6,13,5,22,15,24,13,30,20,13,16,13,13,16,4,12,20,13,12,21,5,43,10,17,13,22,34,12,3,38,6,23,8,20,17,29,13,13,23,17,34,17,13,4,23,19,5,12,12,13,14,20,19,16,16,12,13,9,32,16,13,12,25,16,6,20,12,6,13,20,5,23,16,21,5,8,12,6,16,13,17,7,13,19,22,13,20,20,5,16,16,23,6,13,13,8,13},
{0,6,0,0,0,0,2,0,2,3,3,2,3,0,21,0,2,0,2,2,0,6,2,9,2,3,11,0,2,0,0,0,3,0,0,9,0,5,0,2,3,0,5,2,0,3,3,2,2,0,2,5,2,0,6,5,0,6,0,2,3,5,3,0,0,5,2,2,0,2,2,12,6,6,19,2,0,2,2,0,2,0,9,6,8,5,0,0,2,3,0,2,3,0,0,2,2,3,2,0},
{0,0,4,11,0,4,2,0,2,4,0,4,0,0,0,0,14,7,0,0,0,4,2,0,0,2,2,2,0,0,16,22,10,0,0,0,11,2,0,2,5,24,17,0,16,0,0,2,2,8,22,19,2,5,4,4,2,2,2,2,2,0,0,21,8,0,8,2,4,0,0,0,19,17,4,2,13,7,0,4,4,2,14,0,0,14,17,2,2,0,5,4,4,7,11,7,2,0,2,0},
{16,18,21,7,17,21,19,14,16,15},
{0,0,11,0,0,0,12,1,1,0,0,1,0,4,0,11,0,0,10,3,5,1,1,0,0,0,0,11,9,12,1,1,0,12,1,1,2,0,0,1,0,0,0,0,0,0,0,0,12,3,1,0,2,12,1,1,8,0,0,0,0,0,9,1,1,0,0,12,1,1,0,9,1,0,0,12,0,0,0,0,0,0,0,1,12,8,1,0,0,0,0,1,1,0,8,1,6,0,12,1},
{4,3,3,4,3,2,3,3,2,4,2,2,2,2,2,3,4,2,3,2,2,3,3,3,3,2,3,3,2,2,4,2,2,3,3,2,4,2,2,3,4,2,2,3,2,2,2,3,2,3,3,3,2,3,2,4,3,3,4,2,2,3,2,3,3,3,3,3,2,3,2,2,3,3,2,4,2,2,4,3,2,3,2,3,4,2,2,3,3,4,3,3,3,3,3,2,3,3,2,2},
{8,9,1,14,0,7,8,4,0,0,13,3,4,2,7,0,6,0,10,4,3,16,6,11,6,4,2,5,6,5,9,4,2,5,2,6,1,5,9,4,7,3,7,9,4,17,0,6,19,5,4,0,18,8,4,4,4,1,18,4,2,9,9,14,4,3,0,7,5,0,15,0,13,11,3,17,0,5,12,3,2,7,9,0,10,7,5,12,11,1,7,6,11,4,7,9,10,0,9,7},
{7,2,12,6,11,6,9,6,11,12,9,11,4,2,4,17,9,11,9,16,6,7,6,16,7,7,6,4,16,6,9,7,14,7,11,4,11,14,14,14,12,9,14,4,14,6,4,12,16,7,16,11,9,4,7,9,6,11,2,12,11,14,14,16,4,4,6,4,4,6,7,4,4,9,9,4,4,4,2,11,6,19,6,9,4,7,9,2,6,6,4,9,4,11,2,11,14,6,7,4},
{8,12,5,11,4,17,12,6,11,20,18,11,10,25,10,13,4,17,17,13,18,4,9,15,6,25,24,13,7,13,5,19,17,11,18,50,11,12,6,11,6,17,11,17,4,13,14,6,19,30,15,15,8,23,11,0,10,17,23,0,4,12,11,27,5,15,5,16,12,7,12,14,6,16,8,7,12,15,7,8,11,20,15,5,14,5,12,10,28,0,16,18,12,29,7,9,23,18,11,12},
{5,5,2,2,1,5,8,0,5,24,7,5,12,3,1,2,7,0,20,7,5,6,8,2,9,4,12,22,20,0,12,7,7,5,20,3,8,26,11,5,7,0,7,6,5,0,7,7,4,7,7,7,4,9,6,9,5,12,7,2,0,7,8,6,8,8,7,20,13,3,2,5,0,2,24,4,0,5,2,26,3,0,3,1,30,4,2,5,4,6,17,2,17,6,32,13,0,1,3,1},
{4,3,3,4,6,3,3,4,5,5,4,7,5,5,5,6,4,3,3,8,5,5,4,3,3,4,3,3,5,3,4,4,5,3,3,11,5,3,3,4,11,8,3,4,4,4,6,5,5,5,5,4,4,3,5,5,11,7,6,6,6,3,5,4,11,6,4,5,3,4,4,4,3,6,3,6,6,3,19,6,5,3,4,3,5,4,4,4,5,4,6,19,4,5,4,4,31,6,4,3},
{5,7,9,2,5,4,13,4,2,2,2,3,4,3,8,3,5,7,0,9,4,4,4,2,8,3,3,9,3,5,2,3,4,5,7,7,7,7,10,5,19,3,10,7,8,4,10,4,7,10,5,5,5,5,5,5,4,7,5,7,3,8,5,3,4,0,11,5,10,8,7,5,5,18,8,3,3,2,4,7,0,19,7,4,8,3,7,4,7,5,9,4,4,4,5,7,4,2,4,5},
{5,8,7,5,4,5,5,7,10,7,8,7,6,6,6,5,5,8,8,5,5,8,6,9,7,6,11,6,7,9,4,8,5,6,9,8,8,10,8,8,7,6,7,7,8,7,8,6,5,9,7,8,11,4,9,8,6,6,6,8,5,8,6,6,6,4,6,6,9,8,6,6,9,7,6,5,6,6,8,9,5,9,8,10,5,9,7,8,6,6,6,5,9,6,8,6,5,7,6,10},
{4,4,8,12,7,5,5,8,7,5,9,9,8,8,5,6,6,9,6,7,8,8,8,8,11,9,7,7,4,4,6,4,7,4,7,6,7,6,7,9,7,9,2,11,6,9,3,6,7,3,6,9,6,7,5,11,9,4,8,4,7,4,8,7,10,10,7,8,7,9,3,5,7,12,6,6,6,8,8,5,7,9,8,5,14,8,5,8,8,6,7,7,4,4,6,7,7,3,5,6},
{5,6,5,6,8,5,5,4,8,6,9,5,4,4,8,4,8,4,6,8,3,7,4,3,6,8,3,3,4,7,3,4,5,7,5,7,4,3,4,6,4,6,5,6,10,11,4,4,6,6,5,5,3,5,6,6,5,5,6,5,4,1,4,7,7,8,13,7,9,5,3,8,7,5,7,8,6,7,5,6,6,7,5,4,4,1,3,9,6,4,7,5,10,5,3,5,3,6,4,7},
{7,3,7,4,5,8,6,6,10,7,9,12,6,5,4,5,8,7,7,6,6,7,8,6,6,4,5,4,4,6,3,5,4,3,5,2,7,5,2,2,5,8,8,3,6,3,3,4,2,6,10,9,6,7,3,17,6,4,7,6,17,6,8,5,14,5,8,4,2,6,9,4,10,6,13,6,6,4,6,4,5,6,15,5,5,6,5,7,7,2,9,3,5,4,9,5,3,4,6,4},
{4,6,3,3,3,7,3,4,4,5},
{2,1,1,3,0,4,1,1,0,2,2,0,2,2,0,0,2,0,0,2,0,4,0,1,5,1,4,0,1,1,1,1,1,1,2,0,1,1,1,5,2,1,0,0,2,4,1,0,2,0,4,0,0,1,2,2,0,3,2,0,1,0,1,0,3,2,1,3,2,1,1,2,2,0,1,2,4,0,1,1,1,1,2,2,1,0,1,0,2,0,5,1,0,1,0,1,6,4,3,0},
{0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,1,0,0},
{9,7,4,11,6,6,6,10,3,5,5,6,6,6,7,3,4,4,5,5,6,4,6,6,8,9,10,6,5,8,7,7,6,5,5,9,6,5,7,6,6,11,8,8,6,8,6,8,6,8,9,6,12,11,4,6,5,10,7,10,6,10,10,6,15,13,6,9,7,6,6,6,9,6,6,3,3,5,8,9,14,11,10,4,8,7,6,9,7,6,10,6,10,11,6,10,6,6,7,6},
{7,5,3,36,4,4,36,4,7,3,47,10,35,3,30,36,4,3,16,36,4,6,36,47,39,41,51,39,36,25,47,36,3,3,39,8,3,39,47,14,3,25,4,51,3,47,3,5,53,36,4,5,6,30,36,5,36,5,23,36,22,30,4,36,9,39,8,7,47,19,4,3,8,5,3,22,53,24,53,53,9,39,5,4,3,5,29,7,3,3,47,4,7,36,39,16,5,5,4,5},
{7,9,3,9,3,5,24,30,4,24,47,6,3,6,53,23,36,3,16,22,4,7,51,3,47,4,4,39,4,53,47,47,53,23,34,30,9,16,4,4,3,53,4,16,4,23,4,4,25,3,4,8,14,22,24,36,24,4,8,4,23,34,21,21,13,39,4,3,39,23,7,37,12,39,3,39,41,4,4,53,3,36,24,3,35,7,23,4,4,3,3,30,35,46,47,6,5,24,4,4},
{7,3,2,12,2,5,2,5,0,1,12,3,2,3,2,5,3,4,6,8,13,4,1,3,3,3,3,6,1,2,2,3,6,6,3,5,1,4,5,4,7,0,5,3,6,2,8,2,1,3,3,3,8,1,6,0,7,5,6,7,4,3,5,3,5,3,2,3,3,4,0,2,3,3,3,1,2,0,30,3,6,3,2,2,4,3,2,2,2,4,2,4,2,4,3,2,21,5,2,8},
};
}
| 28,556 | 65.566434 | 310 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/ShapeDTW_1NN.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.classifiers.multivariate.MultivariateAbstractClassifier;
import tsml.transformers.DWT;
import tsml.transformers.HOG1D;
import tsml.transformers.Slope;
import tsml.transformers.Subsequences;
import tsml.transformers.*;
import utilities.multivariate_tools.MultivariateInstanceTools;
import weka.classifiers.AbstractClassifier;
import weka.core.*;
import weka.core.converters.ConverterUtils;
import java.util.ArrayList;
import java.util.Random;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
/**
* The ShapeDTW classifier works by initially extracting a set of subsequences
* describing local neighbourhoods around each data point in a time series.
* These subsequences are then passed into a shape descriptor function that
* transforms these local neighbourhoods into a new representation. This
* new representation is then sent into DTW with 1-NN (DTW_D).
*
* @author Vincent Nicholson
*
*/
public class ShapeDTW_1NN extends EnhancedAbstractClassifier {
// hyper-parameters
private int subsequenceLength;
// if shapeDescriptor is null, then its the 'raw' shape descriptor.
// Supported Transformers are the following:
// null - raw
// PAA
// DWT
// Derivative
// Slope
// HOG1D
private Transformer shapeDescriptor;
//The second shape descriptor is for performing another transformation and concatenating the results together.
private Transformer secondShapeDescriptor = null;
//Authors also propose a weighting factor which is a value to multiply the output of the second transformation
//by. This produces an output in the form compound = (ShapeDescriptor,weightingFactor*secondShapeDescriptor).
private double weightingFactor = 1.0;
// Transformer for extracting the neighbourhoods
private Subsequences subsequenceTransformer;
// NN_DTW_Subsequences for performing classification on the training data
private NN_DTW_Subsequences nnDtwSubsequences;
//The Dimension independent transformers
private DimensionIndependentTransformer d1;
private DimensionIndependentTransformer d2;
// Another method proposed is to combine the results of two shapeDescriptors together, if this is set to
// true, then the results of shapeDescriptor and secondShapeDescriptor are concatenated together.
private boolean useSecondShapeDescriptor = false;
private final Transformer [] VALID_TRANSFORMERS = new Transformer[] {new PAA(), new DWT(),
new Derivative(), new Slope(),
new HOG1D()};
// For storing the dataset when creating the compound shape descriptors.
private Instances compoundDataset;
/**
* Private constructor with settings:
* subsequenceLength = 30
* shapeDescriptorFunction = null (so 'raw' is used)
*/
public ShapeDTW_1NN() {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.subsequenceLength = 30;
this.shapeDescriptor = null;
this.subsequenceTransformer = new Subsequences(subsequenceLength);
this.nnDtwSubsequences = new NN_DTW_Subsequences();
}
public ShapeDTW_1NN(int subsequenceLength,Transformer shapeDescriptor,boolean useSecondShapeDescriptor,
Transformer secondShapeDescriptor) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.subsequenceLength = subsequenceLength;
this.shapeDescriptor = shapeDescriptor;
this.subsequenceTransformer = new Subsequences(subsequenceLength);
this.nnDtwSubsequences = new NN_DTW_Subsequences();
this.secondShapeDescriptor = secondShapeDescriptor;
this.useSecondShapeDescriptor = useSecondShapeDescriptor;
}
public int getSubsequenceLength() {
return subsequenceLength;
}
public Transformer getShapeDescriptors() {
return shapeDescriptor;
}
public boolean isUsingSecondShapeDescriptor() {return useSecondShapeDescriptor; }
public Transformer getSecondShapeDescriptor() {return secondShapeDescriptor;}
public double getWeightingFactor() {return weightingFactor; }
public void setSubsequenceLength(int subsequenceLength) {
this.subsequenceLength = subsequenceLength;
}
public void setShapeDescriptors(Transformer shapeDescriptors) {
this.shapeDescriptor = shapeDescriptors;
}
public void setIsUsingSecondShapeDescriptor(boolean flag) {this.useSecondShapeDescriptor = flag; }
public void setSecondShapeDescriptor(Transformer t) {this.secondShapeDescriptor = t; }
public void setWeightingFactor(double newWeightingFactor) {this.weightingFactor = newWeightingFactor;}
/**
* Private method for performing the subsequence extraction on a set of instances as
* well as the shape descriptor function for training (if not null).
*
* @param data
* @return
*/
private Instances preprocessData(Instances data) {
Instances transformedData = this.subsequenceTransformer.transform(data);
Instances shapeDesc1;
Instances shapeDesc2 = null;
//If shape descriptor is null aka 'raw', use the subsequences.
if (this.shapeDescriptor == null) {
shapeDesc1 = new Instances(transformedData);
} else {
this.d1 = new DimensionIndependentTransformer(this.shapeDescriptor);
shapeDesc1 = this.d1.transform(transformedData);
}
//Test if a second shape descriptor is required
if(useSecondShapeDescriptor) {
if(this.secondShapeDescriptor == null) {
shapeDesc2 = new Instances(transformedData);
} else {
this.d2 = new DimensionIndependentTransformer(this.secondShapeDescriptor);
shapeDesc2 = this.d2.transform(transformedData);
}
}
Instances combinedInsts = combineInstances(shapeDesc1,shapeDesc2);
return combinedInsts;
}
/**
* Private method for performing the subsequence extraction on an instance as
* well as the shape descriptor function for testing (if not null).
*
* @param data
* @return
*/
private Instance preprocessData(Instance data) {
Instance transformedData = this.subsequenceTransformer.transform(data);
Instance shapeDesc1;
Instance shapeDesc2 = null;
//If shape descriptor is null aka 'raw', use the subsequences.
if (this.shapeDescriptor == null) {
shapeDesc1 = transformedData;
} else {
shapeDesc1 = this.d1.transform(transformedData);
}
//Test if a second shape descriptor is required
if(useSecondShapeDescriptor) {
if(this.secondShapeDescriptor == null) {
shapeDesc2 = transformedData;
} else {
shapeDesc2 = this.d2.transform(transformedData);
}
}
Instance combinedInsts = combineInstances(shapeDesc1,shapeDesc2);
return combinedInsts;
}
/**
* Private function for concatenating two shape descriptors together.
*
* @param shapeDesc1
* @param shapeDesc2
* @return
*/
private Instances combineInstances(Instances shapeDesc1,Instances shapeDesc2) {
if(shapeDesc2 == null) {
return shapeDesc1;
}
//Create the header for the new data to be stored in.
Instances compoundHeader = createCompoundHeader(shapeDesc1,shapeDesc2);
for(int i=0;i<shapeDesc1.numInstances();i++) {
Instances relationHeader = new Instances(compoundHeader.attribute(0).relation());
DenseInstance newInst = new DenseInstance(2);
newInst.setDataset(compoundHeader);
//Combine all the dimensions together to create the relation
Instances relation = createRelationalData(shapeDesc1.get(i), shapeDesc2.get(i), relationHeader);
//Add relation to the first value of newInst
int index = newInst.attribute(0).addRelation(relation);
newInst.setValue(0, index);
//Add the class value.
newInst.setValue(1, shapeDesc1.get(i).classValue());
compoundHeader.add(newInst);
}
compoundHeader.setClassIndex(1);
this.compoundDataset = compoundHeader;
return compoundHeader;
}
/**
* Private function for creating the header for the compound shape descriptor data.
*
* @param shapeDesc1 - Instances of the first shape descriptor.
* @param shapeDesc2 - Instances of the second shape descriptor.
* @return
*/
private Instances createCompoundHeader(Instances shapeDesc1,Instances shapeDesc2) {
// Create the Instances object
ArrayList<Attribute> atts = new ArrayList<>();
//Create the relational attribute
ArrayList<Attribute> relationalAtts = new ArrayList<>();
int numAttributes = shapeDesc1.attribute(0).relation().numAttributes() +
shapeDesc2.attribute(0).relation().numAttributes();
// Add the original elements
for (int i = 0; i < numAttributes; i++)
relationalAtts.add(new Attribute("Compound_element_" + i));
// Create the relational table
Instances relationTable = new Instances("Compound_Elements", relationalAtts, shapeDesc1.numInstances());
// Create the attribute from the relational table
atts.add(new Attribute("relationalAtt", relationTable));
// Add the class attribute
atts.add(shapeDesc1.classAttribute());
Instances compoundShapeDesc = new Instances("Compound_Elements",atts,shapeDesc1.numInstances());
return compoundShapeDesc;
}
/**
* Private function for creating the relation along each dimension within
* inst1 and inst2.
*
* @param inst1
* @param inst2
* @return
*/
private Instances createRelationalData(Instance inst1, Instance inst2, Instances header) {
Instances rel1 = inst1.relationalValue(0);
Instances rel2 = inst2.relationalValue(0);
// Iterate over each dimension
for(int i=0;i<rel1.numInstances();i++) {
double [] dim1 = rel1.get(i).toDoubleArray();
double [] dim2 = rel2.get(i).toDoubleArray();
//multiply dim2 by a weighting factor
for(int j=0;j<dim2.length;j++) {
dim2[j] = dim2[j]*this.weightingFactor;
}
double [] both = new double[dim1.length+dim2.length];
System.arraycopy(dim1,0,both,0,dim1.length);
System.arraycopy(dim2,0,both,dim1.length,dim2.length);
// ArrayUtils.addAll(dim1,dim2);
//Create the new Instance
DenseInstance newInst = new DenseInstance(both.length);
for(int j=0;j<both.length;j++) {
newInst.setValue(j,both[j]);
}
header.add(newInst);
}
return header;
}
/**
* Private function for concatenating two shape descriptors together.
*
* @param shapeDesc1
* @param shapeDesc2
* @return
*/
private Instance combineInstances(Instance shapeDesc1, Instance shapeDesc2) {
if(shapeDesc2 == null) {
return shapeDesc1;
}
Instance combinedInst = new DenseInstance(2);
//Create the relational table
ArrayList<Attribute> relationalAtts = new ArrayList<>();
int numAttributes = shapeDesc1.attribute(0).relation().numAttributes() +
shapeDesc2.attribute(0).relation().numAttributes();
// Add the original elements
for (int i = 0; i < numAttributes; i++)
relationalAtts.add(new Attribute("Compound_element_" + i));
// Create the relational table
Instances relationTable = new Instances("Compound_Elements", relationalAtts,
shapeDesc1.attribute(0).relation().numInstances());
Instances relation = createRelationalData(shapeDesc1,shapeDesc2,relationTable);
combinedInst.setDataset(this.compoundDataset);
int index = combinedInst.attribute(0).addRelation(relation);
combinedInst.setValue(0, index);
//Add the class value.
combinedInst.setValue(1, shapeDesc1.classValue());
return combinedInst;
}
@Override
public void buildClassifier(Instances trainInst) throws Exception {
// Check the given parameters
this.checkParameters();
// Check the data
this.getCapabilities().testWithFail(trainInst);
// Record the build time.
long buildTime = System.nanoTime();
// Train the classifier
Instances transformedData = this.preprocessData(trainInst);
this.nnDtwSubsequences.buildClassifier(transformedData);
// Store the timing results.
buildTime = System.nanoTime() - buildTime ;
this.trainResults.setBuildTime(buildTime);
this.trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
}
@Override
public double [] distributionForInstance(Instance testInst) throws Exception {
Instance transformedData = this.preprocessData(testInst);
return this.nnDtwSubsequences.distributionForInstance(transformedData);
}
@Override
public double classifyInstance(Instance testInst) throws Exception {
Instance transformedData = this.preprocessData(testInst);
return this.nnDtwSubsequences.classifyInstance(transformedData);
}
/**
* Private method for checking the parameters inputted into ShapeDTW.
*
*/
private void checkParameters() {
if(this.subsequenceLength < 1) {
throw new IllegalArgumentException("subsequenceLength cannot be less than 1.");
}
//Check the shapeDescriptor function is the correct type.
boolean found = false;
for(Transformer x: this.VALID_TRANSFORMERS) {
if(this.shapeDescriptor == null) {
found = true;
break;
}
if(this.shapeDescriptor.getClass().equals(x.getClass())) {
found = true;
break;
}
}
if(!found) {
throw new IllegalArgumentException("Invalid transformer type for shapeDescriptor.");
}
//Check the secondShapeDescriptor function is the correct type.
found = false;
for(Transformer x: this.VALID_TRANSFORMERS) {
if(this.secondShapeDescriptor == null) {
found = true;
break;
}
if(this.secondShapeDescriptor.getClass().equals(x.getClass())) {
found = true;
break;
}
}
if(!found) {
throw new IllegalArgumentException("Invalid transformer type for shapeDescriptor.");
}
}
/**
* Main method for testing.
*
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
Instances trainData = createTrainData();
Instances testData = createTestData();
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
String datasetName = "InsectEPGSmallTrain";
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ConverterUtils.DataSource source = new ConverterUtils.DataSource("C:\\Users\\Vince\\Documents\\Dissertation Repositories\\datasets\\Univariate2018_arff\\" + datasetName + "\\" + datasetName + "_TRAIN.arff");
Instances train = source.getDataSet();
if (train.classIndex() == -1) {
train.setClassIndex(train.numAttributes() - 1);
}
source = new ConverterUtils.DataSource("C:\\Users\\Vince\\Documents\\Dissertation Repositories\\datasets\\Univariate2018_arff\\" + datasetName + "\\" + datasetName + "_TEST.arff");
Instances test = source.getDataSet();
if (test.classIndex() == -1) {
test.setClassIndex(test.numAttributes() - 1);
}
Instances [] data = new Instances[] {train,test};
// test bad subsequence length
// this has to be greater than 0.
/*int [] badSubsequences = new int [] {0,-1,-999999999};
for(int i=0;i<badSubsequences.length;i++) {
try{
ShapeDTW_1NN s = new ShapeDTW_1NN(badSubsequences[i],null,false,null);
s.buildClassifier(trainData);
System.out.println("Test failed.");
} catch(IllegalArgumentException e) {
System.out.println("Test passed.");
}
}
// test good subsequence length
int [] goodSubsequences = new int [] {1,5,999};
for(int i=0;i<goodSubsequences.length;i++) {
try{
ShapeDTW_1NN s = new ShapeDTW_1NN(goodSubsequences[i],null,false,null);
s.buildClassifier(trainData);
System.out.println("Test passed.");
} catch(IllegalArgumentException e) {
System.out.println("Test failed.");
}
}
// test bad transformer
// Can only be null, PAA, DWT, Derivative, Slope or HOG1D
Transformer [] badTransformer = new Transformer [] {new PCA(),new Cosine(),new FFT()};
for(int i=0;i<badTransformer.length;i++) {
try{
ShapeDTW_1NN s = new ShapeDTW_1NN(30,badTransformer[i],false,null);
s.buildClassifier(trainData);
System.out.println("Test failed.");
} catch(IllegalArgumentException e) {
System.out.println("Test passed.");
}
}
// test good transformer
Transformer [] badTransformers = new Transformer[] {new PAA(), null, new HOG1D()};
for(int i=0;i<badTransformers.length;i++) {
try{
ShapeDTW_1NN s = new ShapeDTW_1NN(30,badTransformers[i],false,null);
s.buildClassifier(trainData);
System.out.println("Test passed.");
} catch(IllegalArgumentException e) {
System.out.println("Test failed.");
}
}
// test classification functionality
//Instances [] data = new Instances [] {trainData,testData};
Transformer [] allTrans = new Transformer [] {null,new PAA(), new DWT(), new Derivative(), new Slope(),
new HOG1D()};
for(Transformer t:allTrans) {
ShapeDTW_1NN s = new ShapeDTW_1NN(30,t,false,null);
System.out.println(calculateAccuracy(s,data));
System.out.println("Test passed.");
}*/
// Test compound shape descriptor functionality.
//ShapeDTW_1NN s = new ShapeDTW_1NN(1,null,false,new Slope());
//System.out.println(calculateAccuracy(s,data));
ShapeDTW_1NN s = new ShapeDTW_1NN(30,null,false,new Slope());
System.out.println(calculateAccuracy(s,data));
System.out.println("Test passed.");
}
/**
* Function to calculate accuracy purely for testing the functionality of ShapeDTW_1NN.
*
* @param s
* @param data
* @return
* @throws Exception
*/
private static double calculateAccuracy(AbstractClassifier s, Instances [] data) throws Exception {
Instances train = data[0];
Instances test = data[1];
s.buildClassifier(train);
int correct = 0;
for(int i=0;i<test.numInstances();i++) {
double predict = s.classifyInstance(test.get(i));
if(predict == test.get(i).classValue()) {
correct++;
}
}
return (double) correct/(double) test.numInstances();
}
/**
* Function to create train data for testing purposes.
*
* @return
*/
private static Instances createTrainData() {
//Create the attributes
ArrayList<Attribute> atts = new ArrayList<>();
for(int i=0;i<5;i++) {
atts.add(new Attribute("test_" + i));
}
//Create the class values
ArrayList<String> classes = new ArrayList<>();
classes.add("1");
classes.add("0");
atts.add(new Attribute("class",classes));
Instances newInsts = new Instances("Test_dataset",atts,5);
newInsts.setClassIndex(newInsts.numAttributes()-1);
//create the test data
double [] test = new double [] {1,2,3,4,5};
createInst(test,"1",newInsts);
test = new double [] {1,1,2,3,4};
createInst(test,"1",newInsts);
test = new double [] {2,2,2,3,4};
createInst(test,"0",newInsts);
test = new double [] {2,3,4,5,6};
createInst(test,"0",newInsts);
test = new double [] {0,1,1,1,2};
createInst(test,"1",newInsts);
return newInsts;
}
/**
* Function to create test data for testing purposes.
*
* @return
*/
private static Instances createTestData() {
//Create the attributes
ArrayList<Attribute> atts = new ArrayList<>();
for(int i=0;i<5;i++) {
atts.add(new Attribute("test_" + i));
}
//Create the class values
ArrayList<String> classes = new ArrayList<>();
classes.add("1");
classes.add("0");
atts.add(new Attribute("class",classes));
Instances newInsts = new Instances("Test_dataset",atts,5);
newInsts.setClassIndex(newInsts.numAttributes()-1);
//create the test data
double [] test = new double [] {2,4,6,8,10};
createInst(test,"1",newInsts);
test = new double [] {1,1,2,3,4};
createInst(test,"1",newInsts);
test = new double [] {0,1,1,1,2};
createInst(test,"1",newInsts);
return newInsts;
}
/**
* private function for creating an instance from a double array. Used
* for testing purposes.
*
* @param arr
* @return
*/
private static void createInst(double [] arr,String classValue, Instances dataset) {
Instance inst = new DenseInstance(arr.length+1);
for(int i=0;i<arr.length;i++) {
inst.setValue(i,arr[i]);
}
inst.setDataset(dataset);
inst.setClassValue(classValue);
dataset.add(inst);
}
/**
* Inner class for calculating the DTW distance between subsequences used for the
* classification stage (no warping window for this implementation).
*
* TODO: Make this not static once testing has been completed.
*/
public static class NN_DTW_Subsequences extends MultivariateAbstractClassifier {
private Instances train;
public NN_DTW_Subsequences() {};
/**
* @param data set of instances serving as training data
* @throws Exception
*/
@Override
public void buildClassifier(Instances data) throws Exception {
this.train = data;
}
/**
* Find the instance with the lowest distance.
*
* @param inst
* @return
*/
@Override
public double classifyInstance(Instance inst) {
//A TreeMap representing the distance from the test instance
//and the training instance, and an arraylist of instances
//representing the training instances with that distance.
TreeMap<Double,ArrayList<Instance>> treeMap = new TreeMap<>();
Double dist;
ArrayList<Instance> insts;
for(Instance i: train) {
dist = calculateDistance(i,inst);
// If distance has been found before
if(treeMap.containsKey(dist)) {
insts = treeMap.get(dist);
insts.add(i);
treeMap.put(dist,insts);
} else {
insts = new ArrayList<>();
insts.add(i);
treeMap.put(dist, insts);
}
}
//return the instance with the lowest distance
Double lowestDist = treeMap.firstKey();
ArrayList<Instance> clostestInsts = treeMap.get(lowestDist);
//If only one, return it.
if(clostestInsts.size() == 1) {
return clostestInsts.get(0).classValue();
}
//Else, choose a random one
Random rnd = new Random(0);
int index = rnd.nextInt(clostestInsts.size());
return clostestInsts.get(index).classValue();
}
/**
* Private function for calculating the distance between two instances
* that are represented as a set of subsequences. It's static so that
* it can be used elsewhere (also used in the ShapeDTWFeatures transformer).
*
* @param inst1
* @param inst2
* @return
*/
public static double calculateDistance(Instance inst1, Instance inst2) {
/*
* This code has been adopted from the Slow_DTW_1NN class except for a few differences.
*/
Instance [] inst1Arr = MultivariateInstanceTools.splitMultivariateInstance(inst1);
Instance [] inst2Arr = MultivariateInstanceTools.splitMultivariateInstance(inst2);
double [] [] a = MultivariateInstanceTools.convertMultiInstanceToArrays(inst1Arr);
double [] [] b = MultivariateInstanceTools.convertMultiInstanceToArrays(inst2Arr);
double minDist;
int n=a.length;
int m=b.length;
double [] [] matrixD = new double[n][n];
double windowSize = n;
/*
//Set all to max. This is necessary for the window but I dont need to do
it all
*/
for(int i=0;i<n;i++)
for(int j=0;j<m;j++)
matrixD[i][j]=Double.MAX_VALUE;
// Original DTW was a[0]-b[0]^2 (difference between points),
// now its the euclidean distance between subsequences.
matrixD[0][0]=calculateEuclideanDistance(a[0],b[0]);
//Base cases for warping 0 to all with max interval r
//Warp a[0] onto all b[1]...b[r+1]
for(int j=1;j<windowSize && j<n;j++)
//also the case here
matrixD[0][j]=matrixD[0][j-1]+calculateEuclideanDistance(a[0],b[j]);
// Warp b[0] onto all a[1]...a[r+1]
for(int i=1;i<windowSize && i<n;i++)
matrixD[i][0]=matrixD[i-1][0]+calculateEuclideanDistance(a[i],b[0]);
//Warp the rest,
for (int i=1;i<n;i++){
for (int j = 1;j<m;j++){
//Find the min of matrixD[i][j-1],matrixD[i-1][j] and matrixD[i-1][j-1]
if (i < j + windowSize && j < i + windowSize) {
minDist=matrixD[i][j-1];
if(matrixD[i-1][j]<minDist)
minDist=matrixD[i-1][j];
if(matrixD[i-1][j-1]<minDist)
minDist=matrixD[i-1][j-1];
matrixD[i][j]=minDist+calculateEuclideanDistance(a[i],b[j]);
}
}
}
//Find the minimum distance at the end points, within the warping window.
return matrixD[n-1][m-1];
}
/**
* Private function for calculating the euclidean distance between two subsequences.
*
* @param subsequence1
* @param subsequence2
* @return
*/
private static double calculateEuclideanDistance(double [] subsequence1, double [] subsequence2) {
double total = 0.0;
for(int i=0;i<subsequence1.length;i++) {
total += Math.pow(subsequence1[i] - subsequence2[i],2);
}
return total;
}
public static void main(String[] args) {
try{
NN_DTW_Subsequences d = new NN_DTW_Subsequences();
ConverterUtils.DataSource source = new ConverterUtils.DataSource("C:\\Users\\Vince\\Documents\\Dissertation Repositories\\test.arff");
Instances data = source.getDataSet();
if (data.classIndex() == -1) {
data.setClassIndex(data.numAttributes() - 1);
}
System.out.println(data.toString());
System.out.println(d.calculateDistance(data.get(2),data.get(3)));
} catch(Exception e) {
}
}
}
}
| 29,761 | 39.65847 | 215 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/ShapeDTW_SVM.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import experiments.data.DatasetLoading;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.transformers.ShapeDTWFeatures;
import tsml.transformers.Subsequences;
import utilities.generic_storage.Pair;
import utilities.samplers.RandomStratifiedSampler;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.functions.SMO;
import weka.classifiers.functions.supportVector.PolyKernel;
import weka.classifiers.functions.supportVector.RBFKernel;
import weka.core.Instance;
import weka.core.Instances;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/*
* This class extends on the idea of ShapeDTW_1NN by transforming
* a test instance into ShapeDTW distance features (note that it uses
* the default shape descriptor which is 'raw'). It calculates the test
* instance's distance from a reference set and these become the features
* for the instance. This transformed instance is then fed into a
* SVM classifier. The same task is done on the training set.
*/
public class ShapeDTW_SVM extends EnhancedAbstractClassifier {
private Subsequences subsequenceTransformer;
//The transformer used to produce the shape dtw features.
private ShapeDTWFeatures sdtwFeats;
//The stratified sampler
private RandomStratifiedSampler sampler;
private int subsequenceLength;
private SMO svmClassifier;
public enum KernelType {PolyNomial,RBF};
private KernelType kernelType = KernelType.PolyNomial;
public ShapeDTW_SVM() {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.subsequenceTransformer = new Subsequences();
//Use a polynomial kernel (default exponent is 1, a linear kernel)
this.svmClassifier = new SMO();
this.sampler = new RandomStratifiedSampler();
}
public ShapeDTW_SVM(int subsequenceLength, KernelType k) {
super(CANNOT_ESTIMATE_OWN_PERFORMANCE);
this.subsequenceTransformer = new Subsequences(subsequenceLength);
this.svmClassifier = new SMO();
this.sampler = new RandomStratifiedSampler();
this.kernelType = k;
}
/* Getters and Setters */
public KernelType getKernelType() {return kernelType; }
public void setKernelType(KernelType k) {this.kernelType = k;}
@Override
public void buildClassifier(Instances trainInsts) throws Exception {
// Check the data
this.getCapabilities().testWithFail(trainInsts);
// Record the build time.
long buildTime = System.nanoTime();
// Convert the data into an appropriate form (ShapeDTW features)
Instances trainingData = preprocessData(trainInsts);
// Tune the exponent (just values 1,2 and 3 and C values 10^-5 to 10^5).
int numFolds = 10;
tuneSVMClassifier(trainingData,numFolds);
// Store the timing results.
buildTime = System.nanoTime() - buildTime ;
this.trainResults.setBuildTime(buildTime);
this.trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
}
/**
* Private method for tuning the SVM classifier. The most important
* parameters is the degree of the polynomial kernel (linear, quadratic
* or cubic) and the slack variable C (10^-5,...,10^5). Performs a
* 10-fold cross-validation. Parameters are the training data, the number of folds
* and the number of iterations (as its a Random Search).
*/
private void tuneSVMClassifier(Instances trainData, int numFolds) throws Exception {
Instances [] folds = createFolds(numFolds,trainData);
Double [] paramArray;
Double [] cs = new Double[]{0.000001,0.00001,0.0001,0.001,0.01,0.1,1.0,10.0,100.0,1000.0,10000.0};
if(this.kernelType == KernelType.PolyNomial) {
//The selected degrees to try
paramArray = new Double[]{1.0,2.0,3.0};
} else {
//a RBF kernel
//selected Gamma values
paramArray = new Double[]{0.005,0.01,0.05,0.1,0.5,1.0,5.0,10.0};
}
int numIters = paramArray.length*cs.length;
ArrayList<Pair<Double,Double>> paramsToTry = createParamsToTry(paramArray,cs,numIters);
//try the selected parameter values
double [] accuracies = collectResults(folds,paramsToTry);
//Choose the parameter with the best accuracy (ties are settled randomly)
ArrayList<Integer> bestIndex = new ArrayList<>();
//Gather the bestAccuracies
double bestAccuracy = -Double.MAX_VALUE;
for(int i=0;i<accuracies.length;i++) {
if(accuracies[i] > bestAccuracy) {
bestAccuracy = accuracies[i];
bestIndex = new ArrayList<>();
bestIndex.add(i);
} else if(accuracies[i] == bestAccuracy) {
bestIndex.add(i);
}
}
//Choose the configuration
Pair<Double,Double> chosenParamValue;
if(bestIndex.size() == 1) {
chosenParamValue = paramsToTry.get(bestIndex.get(0));
} else {
Random rnd = new Random();
int index = rnd.nextInt(bestIndex.size());
chosenParamValue = paramsToTry.get(bestIndex.get(index));
}
//Set up the SVM as the chosen configuration
if(kernelType == KernelType.PolyNomial) {
PolyKernel p = new PolyKernel();
p.setExponent(chosenParamValue.var1);
this.svmClassifier.setKernel(p);
this.svmClassifier.setC(chosenParamValue.var2);
System.out.println("Chosen Exponent - " + chosenParamValue.var1);
System.out.println("Chosen C Value - " + chosenParamValue.var2);
} else {
RBFKernel r = new RBFKernel();
r.setGamma(chosenParamValue.var1);
this.svmClassifier.setKernel(r);
this.svmClassifier.setC(chosenParamValue.var2);
System.out.println("Chosen Gamma Value - " + chosenParamValue.var1);
System.out.println("Chosen C Value - " + chosenParamValue.var2);
}
this.svmClassifier.buildClassifier(trainData);
}
/**
* Private method for collecting the accuracy of the SVM using the selected parameter values.
*
* @param folds
* @param paramsToTry
* @return
*/
private double [] collectResults(Instances [] folds, ArrayList<Pair<Double,Double>> paramsToTry) throws Exception{
double [] paramsAccuracies = new double[paramsToTry.size()];
int total = folds.length*folds[0].numInstances();
int numFolds = folds.length;
//Go through every parameter value
for(int i=0;i<paramsToTry.size();i++) {
System.out.println("Param number - " + i);
//Perform the n-fold cross-validation
//record the number it predicted correctly.
int numCorrect = 0;
//For each fold
for(int j=0;j<folds.length;j++) {
List<Integer> trainFolds = IntStream.rangeClosed(0,numFolds-1).boxed().collect(Collectors.toList());
//testing fold
Instances test = folds[j];
trainFolds.remove(j);
//training folds
Instances train = createTrainFolds(folds,trainFolds);
//set up the SVM
if(kernelType == KernelType.PolyNomial) {
PolyKernel p = new PolyKernel();
p.setExponent(paramsToTry.get(i).var1);
this.svmClassifier.setKernel(p);
this.svmClassifier.setC(paramsToTry.get(i).var2);
this.svmClassifier.buildClassifier(train);
} else {
RBFKernel r = new RBFKernel();
r.setGamma(paramsToTry.get(i).var1);
this.svmClassifier.setKernel(r);
this.svmClassifier.setC(paramsToTry.get(i).var2);
this.svmClassifier.buildClassifier(train);
}
//test the SVM under the current configuration
for(int k=0;k<test.numInstances();k++) {
double predictedClassValue = this.svmClassifier.classifyInstance(test.get(k));
if(predictedClassValue == test.get(k).classValue()) {
numCorrect++;
}
}
}
paramsAccuracies[i] = (double) numCorrect / (double) total;
}
return paramsAccuracies;
}
/**
* Private function for combining multiple instances into one.
*
* @return
*/
private Instances createTrainFolds(Instances [] listOfFolds, List<Integer> requiredFolds) {
Instances combinedInsts = new Instances(listOfFolds[0],
listOfFolds[0].numInstances()*requiredFolds.size());
for(int i=0;i<requiredFolds.size();i++) {
Instances fold = listOfFolds[requiredFolds.get(i)];
for(int j=0;j<fold.numInstances();j++) {
combinedInsts.add(fold.get(j));
}
}
return combinedInsts;
}
/**
* Private function for choosing the combination of parameters to try. First
* value is the chosen degree, second is the chosen C value.
*
* @param degrees
* @param cVals
* @param numIts
* @return
*/
private ArrayList<Pair<Double,Double>> createParamsToTry(Double [] degrees, Double [] cVals, int numIts) {
Random rnd = new Random();
ArrayList<Pair<Double,Double>> chosenParams = new ArrayList<>();
while(chosenParams.size() != numIts) {
Double chosenDegree = degrees[rnd.nextInt(degrees.length)];
Double chosenC = cVals[rnd.nextInt(cVals.length)];
Pair<Double,Double> chosenParam = new Pair<>(chosenDegree,chosenC);
if(chosenParams.contains(chosenParam)) {
//If this parameter pair has already been chosen, choose a different one.
continue;
} else {
chosenParams.add(chosenParam);
}
}
return chosenParams;
}
/**
* Private method to create the folds used to tune the SVM classifier.
*
* @param numFolds
* @return
*/
private Instances [] createFolds(int numFolds,Instances trainData) {
int foldSize = (int) Math.floor((double)trainData.numInstances()/ (double) numFolds);
Instances [] folds = new Instances [numFolds];
for(int i=0;i<numFolds;i++) {
folds[i] = new Instances(trainData,foldSize);
for(int j=0+foldSize*i;j<foldSize + foldSize*i;j++) {
folds[i].add(trainData.get(j));
}
}
return folds;
}
/**
* Private method for performing the subsequence extraction on an instance,
* transforming it using DWT and creating the ShapeDTW features.
*
* @param trainInsts
*/
private Instances preprocessData(Instances trainInsts) {
// Transform the trainInsts into subsequences
Instances subsequences = this.subsequenceTransformer.transform(trainInsts);
// Create the shapeDTW features on the training set
this.sdtwFeats = new ShapeDTWFeatures(subsequences);
return this.sdtwFeats.transform(subsequences);
}
/**
* Private method for performing the subsequence extraction on an instance,
* transforming it using DWT and creating the ShapeDTW features.
*
* @param trainInsts
* @return just the training set (the reference set is stored in ShapeDtwFeatures).
*/
private Instance preprocessData(Instance trainInsts) {
// Transform the trainInsts into subsequences
Instance subsequences = this.subsequenceTransformer.transform(trainInsts);
// Create the shapeDTW features
return this.sdtwFeats.transform(subsequences);
}
@Override
public double classifyInstance(Instance testInst) throws Exception {
Instance transformedInst = this.preprocessData(testInst);
return this.svmClassifier.classifyInstance(transformedInst);
}
@Override
public double [] distributionForInstance(Instance testInst) throws Exception {
Instance transformedInst = this.preprocessData(testInst);
return this.svmClassifier.distributionForInstance(transformedInst);
}
/**
* Testing method for this class.
*
* @param args - the command line arguments.
*/
public static void main(String[] args) throws Exception {
Instances [] data = DatasetLoading.sampleBeef(0);
ShapeDTW_SVM s = new ShapeDTW_SVM();
s.setKernelType(KernelType.RBF);
System.out.println(calculateAccuracy(s,data));
ShapeDTW_1NN s2 = new ShapeDTW_1NN();
System.out.println(calculateAccuracy(s2,data));
}
/**
* Function to calculate accuracy.
*
* @param s
* @param data
* @return
* @throws Exception
*/
private static double calculateAccuracy(AbstractClassifier s, Instances [] data) throws Exception {
Instances train = data[0];
Instances test = data[1];
s.buildClassifier(train);
int correct = 0;
for(int i=0;i<test.numInstances();i++) {
double predict = s.classifyInstance(test.get(i));
if(predict == test.get(i).classValue()) {
correct++;
}
}
return (double) correct/(double) test.numInstances();
}
}
| 14,441 | 40.262857 | 118 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/SlowDTW_1NN.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based;
import java.util.ArrayList;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW;
import tsml.classifiers.legacy.elastic_ensemble.distance_functions.DTW_DistanceBasic;
import java.util.HashMap;
import evaluation.storage.ClassifierResults;
import experiments.data.DatasetLoading;
import java.util.concurrent.TimeUnit;
import tsml.classifiers.EnhancedAbstractClassifier;
import tsml.classifiers.ParameterSplittable;
import machine_learning.classifiers.SaveEachParameter;
import weka.core.*;
/*
This classifier does the full 101 parameter searches for window.
It is only here for comparison to faster methods
*/
public class SlowDTW_1NN extends EnhancedAbstractClassifier implements SaveEachParameter,ParameterSplittable{
private boolean optimiseWindow=false;
private double windowSize=1;
private int maxPercentageWarp=100;
private Instances train;
private int trainSize;
private int bestWarp;
private int maxWindowSize;
DTW_DistanceBasic dtw;
HashMap<Integer,Double> distances;
double maxR=1;
ArrayList<Double> accuracy=new ArrayList<>();
protected String resultsPath;
protected boolean saveEachParaAcc=false;
@Override
public void setPathToSaveParameters(String r){
resultsPath=r;
setSaveEachParaAcc(true);
}
@Override
public void setSaveEachParaAcc(boolean b){
saveEachParaAcc=b;
}
//Think this always does para search?
// @Override
// public boolean findsTrainAccuracyEstimate(){ return findTrainAcc;}
public SlowDTW_1NN(){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
dtw=new DTW();
accuracy=new ArrayList<>();
}
public SlowDTW_1NN(DTW_DistanceBasic d){
super(CAN_ESTIMATE_OWN_PERFORMANCE);
dtw=d;
accuracy=new ArrayList<>();
}
@Override
public String getParameters() {
String result="BuildTime,"+trainResults.getBuildTime()+",CVAcc,"+trainResults.getAcc()+",Memory,"+trainResults.getMemory();
result+=",BestWarpPercent,"+bestWarp+",AllAccs,";
for(double d:accuracy)
result+=","+d;
return result;
}
public double getMaxR(){ return maxR;}
public void setMaxPercentageWarp(int a){maxPercentageWarp=a;}
public void optimiseWindow(boolean b){ optimiseWindow=b;}
public void setR(double r){dtw.setR(r);}
public double getR(){ return dtw.getR();}
public int getBestWarp(){ return bestWarp;}
public int getWindowSize(){ return dtw.getWindowSize(train.numAttributes()-1);}
@Override
public void buildClassifier(Instances d) throws Exception{
trainResults =new ClassifierResults();
long t=System.nanoTime();
train=d;
trainSize=d.numInstances();
if(optimiseWindow){
maxR=0;
double maxAcc=0;
int dataLength=train.numAttributes()-1;
/* If the data length < 100 then there will be some repetition
should skip some values I reckon
if(dataLength<maxNosWindows)
maxPercentageWarp=dataLength;
*/
for(int i=maxPercentageWarp;i>=0;i-=1){
dtw.setR(i/100.0);
double acc=crossValidateAccuracy(maxAcc);
accuracy.add(acc);
if(acc>maxAcc){
maxR=i;
maxAcc=acc;
}
}
bestWarp=(int)(maxR*dataLength/100);
System.out.println("OPTIMAL WINDOW ="+maxR+" % which gives a warp of"+bestWarp+" data");
// dtw=new DTW();
dtw.setR(maxR/100.0);
trainResults.setAcc(maxAcc);
}
try {
trainResults.setBuildTime(System.nanoTime()-t);
} catch (Exception e) {
System.err.println("Inheritance preventing me from throwing this error...");
System.err.println(e);
}
Runtime rt = Runtime.getRuntime();
long usedBytes = (rt.totalMemory() - rt.freeMemory());
trainResults.setMemory(usedBytes);
if(getEstimateOwnPerformance()){ //Save basic train results
long estTime = System.nanoTime();
for(int i=0;i<train.numInstances();i++){
Instance test=train.remove(i);
long predTime = System.nanoTime();
int pred=(int)classifyInstance(test);
predTime = System.nanoTime() - predTime;
double[] dist = new double[train.numClasses()];
dist[pred] = 1.0;
trainResults.addPrediction(test.classValue(), dist, pred, predTime, "");
train.add(i,test);
}
estTime = System.nanoTime() - estTime;
trainResults.setErrorEstimateTime(estTime);
trainResults.setErrorEstimateMethod("cv_loo");
trainResults.setEstimatorName("SlowDTW_1NN");
trainResults.setDatasetName(train.relationName());
trainResults.setSplit("train");
//no foldid/seed
trainResults.setNumClasses(train.numClasses());
trainResults.setParas(getParameters());
trainResults.setTimeUnit(TimeUnit.NANOSECONDS);
trainResults.finaliseResults();
}
}
@Override
public double classifyInstance(Instance d){
/*Basic distance, with early abandon. This is only for 1-nearest neighbour*/
double minSoFar=Double.MAX_VALUE;
double dist; int index=0;
for(int i=0;i<train.numInstances();i++){
dist=dtw.distance(train.instance(i),d,minSoFar);
if(dist<minSoFar){
minSoFar=dist;
index=i;
}
}
return train.instance(index).classValue();
}
@Override
public double[] distributionForInstance(Instance instance){
double[] dist=new double[instance.numClasses()];
dist[(int)classifyInstance(instance)]=1;
return dist;
}
/**Could do this by calculating the distance matrix, but then
* you cannot use the early abandon. Early abandon about doubles the speed,
* as will storing the distances. Given the extra n^2 memory, probably better
* to just use the early abandon. We could store those that were not abandoned?
answer is to store those without the abandon in a hash table indexed by i and j,
*where index i,j == j,i
* @return
*/
private double crossValidateAccuracy(double maxAcc){
double a=0,d, minDist;
int nearest;
Instance inst;
int bestNosCorrect=(int)(maxAcc*trainSize);
maxWindowSize=0;
int w;
distances=new HashMap<>(trainSize);
for(int i=0;i<trainSize;i++){
//Find nearest to element i
nearest=0;
minDist=Double.MAX_VALUE;
inst=train.instance(i);
for(int j=0;j<trainSize;j++){
if(i!=j){
// d=dtw.distance(inst,train.instance(j),minDist);
//Store past distances if not early abandoned
//Not seen i,j before
if(j>i){
d=dtw.distance(inst,train.instance(j),minDist);
//Store if not early abandon
if(d!=Double.MAX_VALUE){
// System.out.println(" Storing distance "+i+" "+j+" d="+d+" with key "+(i*trainSize+j));
distances.put(i*trainSize+j,d);
// storeCount++;
}
//Else if stored recover
}else if(distances.containsKey(j*trainSize+i)){
d=distances.get(j*trainSize+i);
// System.out.println(" Recovering distance "+i+" "+j+" d="+d);
// recoverCount++;
}
//Else recalculate with new early abandon
else{
d=dtw.distance(inst,train.instance(j),minDist);
}
if(d<minDist){
nearest=j;
minDist=d;
w=dtw.findMaxWindow();
if(w>maxWindowSize)
maxWindowSize=w;
}
}
}
//Measure accuracy for nearest to element i
if(inst.classValue()==train.instance(nearest).classValue())
a++;
}
// System.out.println("trainSize ="+trainSize+" stored ="+storeCount+" recovered "+recoverCount);
return a/(double)trainSize;
}
public static void main(String[] args) throws Exception{
SlowDTW_1NN c = new SlowDTW_1NN();
String path="C:\\Research\\Data\\Time Series Data\\Time Series Classification\\";
Instances test=DatasetLoading.loadDataNullable(path+"Coffee\\Coffee_TEST.arff");
Instances train=DatasetLoading.loadDataNullable(path+"Coffee\\Coffee_TRAIN.arff");
train.setClassIndex(train.numAttributes()-1);
c.buildClassifier(train);
}
@Override
public Capabilities getCapabilities() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public void setParamSearch(boolean b) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public void setParametersFromIndex(int x) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
| 10,762 | 37.031802 | 135 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/BaseDistanceMeasure.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based.distances;
import tsml.classifiers.distance_based.utils.strings.StrUtils;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import weka.core.Instance;
import weka.core.Utils;
public abstract class BaseDistanceMeasure implements DistanceMeasure {
@Override public String toString() {
final String str = Utils.joinOptions(getOptions());
if(str.isEmpty()) {
return getName();
} else {
return getName() + " " + str;
}
}
}
| 1,335 | 35.108108 | 76 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/DistanceFunctionAdapter.java | package tsml.classifiers.distance_based.distances;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import weka.core.DistanceFunction;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.neighboursearch.PerformanceStats;
import java.util.Enumeration;
public class DistanceFunctionAdapter implements DistanceFunction {
public DistanceFunctionAdapter(final DistanceMeasure dm) {
this.dm = dm;
}
private final DistanceMeasure dm;
public boolean isSymmetric() {
return dm.isSymmetric();
}
@Override public double distance(final Instance a, final Instance b) {
return dm.distance(a, b);
}
@Override public double distance(final Instance a, final Instance b,
final PerformanceStats stats) {
return dm.distance(a, b);
}
@Override public double distance(final Instance a, final Instance b, final double limit,
final PerformanceStats stats) {
return dm.distance(a, b, limit);
}
@Override public void postProcessDistances(final double[] distances) {
}
@Override public void update(final Instance ins) {
}
@Override public double distance(final Instance a, final Instance b, final double limit) {
return dm.distance(a, b, limit);
}
@Override public String toString() {
return dm.toString();
}
public DistanceFunction asDistanceFunction() {
return this;
}
@Override public String[] getOptions() {
return dm.getOptions();
}
@Override public Enumeration listOptions() {
return dm.listOptions();
}
@Override public void setOptions(final String[] options) throws Exception {
dm.setOptions(options);
}
@Override public void setInstances(final Instances insts) {
dm.buildDistanceMeasure(insts);
}
@Override public Instances getInstances() {
return null;
}
@Override public void setAttributeIndices(final String value) {
}
@Override public String getAttributeIndices() {
return null;
}
@Override public void setInvertSelection(final boolean value) {
}
@Override public boolean getInvertSelection() {
return false;
}
}
| 2,294 | 23.414894 | 94 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/DistanceMeasure.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based.distances;
/**
* @author George Oastler
*/
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import tsml.classifiers.distance_based.utils.collections.params.ParamHandler;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import weka.core.DistanceFunction;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.neighboursearch.PerformanceStats;
import static tsml.classifiers.distance_based.utils.collections.CollectionUtils.newArrayList;
public interface DistanceMeasure extends Serializable, ParamHandler {
String DISTANCE_MEASURE_FLAG = "d";
// whether the distance measure is symmetric (i.e. dist from inst A to inst B == dist from inst B to inst A
default boolean isSymmetric() {
return true;
}
default double distance(final Instance a, final Instance b) {
return distance(a, b, Double.POSITIVE_INFINITY);
}
/**
* Override this distance func
* @param a
* @param b
* @param limit
* @return
*/
default double distance(final Instance a, final Instance b, final double limit) {
return distance(Converter.fromArff(a), Converter.fromArff(b), limit);
}
default double distance(final TimeSeriesInstance a, final TimeSeriesInstance b) {
return distance(a, b, Double.POSITIVE_INFINITY);
}
/**
* Or override this distance func
* @param a
* @param b
* @param limit
* @return
*/
double distance(final TimeSeriesInstance a, final TimeSeriesInstance b, double limit);
default String getName() {
return getClass().getSimpleName();
}
default void buildDistanceMeasure(TimeSeriesInstances data) {
}
default void buildDistanceMeasure(Instances data) {
buildDistanceMeasure(Converter.fromArff(data));
}
default DistanceFunction asDistanceFunction() {
return new DistanceFunctionAdapter(this);
}
static DistanceMeasure asDistanceMeasure(DistanceFunction df) {
return new DistanceMeasureAdapter(df);
}
default double distanceUnivariate(double[] a, double[] b, double limit) {
return distance(new TimeSeriesInstance(a), new TimeSeriesInstance(b), limit);
}
default double distanceUnivariate(double[] a, double[] b) {
return distanceUnivariate(a, b, Double.POSITIVE_INFINITY);
}
default double distanceMultivariate(double[][] a, double[][] b, double limit) {
return distance(new TimeSeriesInstance(a), new TimeSeriesInstance(b), limit);
}
default double distanceMultivariate(double[][] a, double[][] b) {
return distanceMultivariate(a, b, Double.POSITIVE_INFINITY);
}
default double distanceUnivariate(List<Double> a, List<Double> b, double limit) {
return distanceMultivariate(Collections.singletonList(a), Collections.singletonList(b), limit);
}
default double distanceUnivariate(List<Double> a, List<Double> b) {
return distanceUnivariate(a, b, Double.POSITIVE_INFINITY);
}
default double distanceMultivariate(List<List<Double>> a, List<List<Double>> b, double limit) {
return distance(new TimeSeriesInstance(a), new TimeSeriesInstance(b), limit);
}
default double distanceMultivariate(List<List<Double>> a, List<List<Double>> b) {
return distanceMultivariate(a, b, Double.POSITIVE_INFINITY);
}
}
| 4,383 | 33.25 | 111 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/DistanceMeasureAdapter.java | package tsml.classifiers.distance_based.distances;
import tsml.classifiers.distance_based.utils.collections.params.ParamSet;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import weka.core.DistanceFunction;
import weka.core.Instance;
import weka.core.Instances;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
public class DistanceMeasureAdapter implements DistanceMeasure {
public DistanceMeasureAdapter(final DistanceFunction df) {
this.df = df;
}
private final DistanceFunction df;
private String[] labels;
private void checkBuilt() {
if(labels == null) {
throw new IllegalStateException("distance measure not built - call buildDistanceMeasure or setInstances first");
}
}
@Override public double distance(final TimeSeriesInstance a, final TimeSeriesInstance b, final double limit) {
checkBuilt();
return distance(Converter.toArff(a, labels), Converter.toArff(b, labels), limit);
}
@Override public double distance(final Instance a, final Instance b, final double limit) {
return df.distance(a, b, limit);
}
@Override public ParamSet getParams() {
final ParamSet paramSet = new ParamSet();
try {
paramSet.setOptions(df.getOptions());
} catch(Exception e) {
throw new IllegalStateException(e);
}
return paramSet;
}
@Override public void setParams(final ParamSet paramSet) throws Exception {
df.setOptions(paramSet.getOptions());
}
@Override public String getName() {
return df.getClass().getSimpleName();
}
@Override public void buildDistanceMeasure(final TimeSeriesInstances data) {
buildDistanceMeasure(Converter.toArff(data));
}
@Override public void buildDistanceMeasure(final Instances data) {
df.setInstances(data);
final Enumeration enumeration = data.classAttribute().enumerateValues();
final List<String> labels = new ArrayList<>();
while(enumeration.hasMoreElements()) {
final Object element = enumeration.nextElement();
labels.add(element.toString());
}
this.labels = labels.toArray(new String[0]);
}
@Override public DistanceFunction asDistanceFunction() {
return df;
}
@Override public String toString() {
return df.toString();
}
}
| 2,534 | 30.6875 | 124 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/DistanceMeasureTest.java | package tsml.classifiers.distance_based.distances;
import experiments.data.DatasetLoading;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.JUnitCore;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import tsml.classifiers.distance_based.distances.dtw.DTWDistance;
import tsml.classifiers.distance_based.distances.dtw.spaces.DTWDistanceParams;
import tsml.classifiers.distance_based.distances.dtw.spaces.DTWDistanceSpace;
import tsml.classifiers.distance_based.distances.ed.EDistance;
import tsml.classifiers.distance_based.distances.ed.spaces.EDistanceParams;
import tsml.classifiers.distance_based.distances.erp.ERPDistance;
import tsml.classifiers.distance_based.distances.erp.spaces.ERPDistanceParams;
import tsml.classifiers.distance_based.distances.lcss.LCSSDistance;
import tsml.classifiers.distance_based.distances.lcss.spaces.LCSSDistanceParams;
import tsml.classifiers.distance_based.distances.msm.MSMDistance;
import tsml.classifiers.distance_based.distances.msm.spaces.MSMDistanceParams;
import tsml.classifiers.distance_based.distances.transformed.TransformDistanceMeasure;
import tsml.classifiers.distance_based.distances.twed.TWEDistance;
import tsml.classifiers.distance_based.distances.twed.spaces.TWEDistanceParams;
import tsml.classifiers.distance_based.distances.wdtw.WDTWDistance;
import tsml.classifiers.distance_based.distances.wdtw.spaces.WDTWDistanceParams;
import tsml.classifiers.distance_based.distances.wdtw.spaces.WDTWDistanceSpace;
import tsml.classifiers.distance_based.utils.system.copy.CopierUtils;
import tsml.classifiers.distance_based.utils.collections.params.ParamSet;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.classifiers.distance_based.utils.collections.params.iteration.RandomSearch;
import tsml.classifiers.distance_based.utils.strings.StrUtils;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import tsml.data_containers.utilities.Converter;
import tsml.transformers.Derivative;
import utilities.FileUtils;
import weka.core.Instance;
import java.io.*;
import java.util.*;
import static experiments.data.DatasetLoading.*;
import static tsml.classifiers.distance_based.distances.DistanceMeasure.DISTANCE_MEASURE_FLAG;
import static tsml.classifiers.distance_based.distances.dtw.spaces.DDTWDistanceSpace.newDDTWDistance;
import static tsml.classifiers.distance_based.distances.wdtw.spaces.WDDTWDistanceSpace.newWDDTWDistance;
@RunWith(Parameterized.class)
public class DistanceMeasureTest {
@Parameterized.Parameters(name = "{3} - {2}")
public static Collection<Object[]> data() {
final Object[][] distanceMeasures = new Object[][]{
{new DTWDistance(), new DTWDistanceParams()},
{new ERPDistance(), new ERPDistanceParams()},
{new LCSSDistance(), new LCSSDistanceParams()},
{new MSMDistance(), new MSMDistanceParams()},
{new EDistance(), new EDistanceParams()},
{new WDTWDistance(), new WDTWDistanceParams()},
{new TWEDistance(), new TWEDistanceParams()},
{newWDDTWDistance(), new WDTWDistanceSpace()},
{newDDTWDistance(), new DTWDistanceSpace()},
};
final ArrayList<Object[]> data = new ArrayList<>();
for(final Object[] distanceMeasure : distanceMeasures) {
for(String datasetName : Arrays.asList(
"ItalyPowerDemand",
"GunPoint",
"BasicMotions"
)) {
final ArrayList<Object> values = new ArrayList<>(Arrays.asList(distanceMeasure));
values.add(datasetName);
values.add(((DistanceMeasure) values.get(0)).getName());
data.add(values.toArray(new Object[0]));
}
}
return data;
}
@Parameterized.Parameter(0)
public DistanceMeasure distanceMeasure;
@Parameterized.Parameter(1)
public ParamSpaceBuilder paramSpaceBuilder;
@Parameterized.Parameter(2)
public String datasetName;
@Parameterized.Parameter(3)
public String testLabel; // dummy variable for the naming of parameterised tests
private static final String TEST_RESULTS_DIR_PATH = "src/main/java/" + DistanceMeasureTest.class.getPackage().getName().replaceAll("\\.", "/") + "/test_results/";
// switch this to true to create test results. False will run the tests instead. Creating new test results will
// always assert fail at the end to ensure this does not get left as "true" and pass tests when it shouldn't
private static boolean CREATE_TEST_RESULTS = false;
public static void main(String[] args) {
// Run main to create test results. Run Junit tests normally to test those results
CREATE_TEST_RESULTS = true;
JUnitCore.main(DistanceMeasureTest.class.getCanonicalName());
}
public void createTestResults(final int numTests) throws Exception {
// generate the param space
final ParamSpace paramSpace = paramSpaceBuilder.build(data);
// setup a random search through parameters
final Random random = new Random(0);
final RandomSearch randomSearch = new RandomSearch();
randomSearch.setRandom(random);
randomSearch.setWithReplacement(true);
randomSearch.buildSearch(paramSpace);
randomSearch.setIterationLimit(numTests);
FileUtils.makeParentDir(testResultsFilePath);
try(final BufferedWriter writer = new BufferedWriter(new FileWriter(testResultsFilePath))) {
writer.write("i,j,distance,params"); // writer the header line
writer.write("\n");
for(int i = 0; i < numTests; i++) {
// indices of the two insts in the dataset
final int instAIndex = random.nextInt(data.numInstances());
int instBIndex; // choose a different inst to compare against
do {
instBIndex = random.nextInt(data.numInstances());
} while(instAIndex == instBIndex);
writer.write(String.valueOf(instAIndex));
writer.write(",");
writer.write(String.valueOf(instBIndex));
final TimeSeriesInstance instA = data.get(instAIndex);
final TimeSeriesInstance instB = data.get(instBIndex);
// last field is the parameters for the distance measure in json format
// randomly pick a param set
if(!randomSearch.hasNext()) throw new IllegalStateException("no remaining params");
ParamSet paramSet = randomSearch.next();
// clone the distance measure and apply parameters. This ensures no parameter crossover between subsequent tests
// (shouldn't happen anyway but it's safer this way!)
distanceMeasure = CopierUtils.deepCopy(this.distanceMeasure);
distanceMeasure.setParams(paramSet);
// compute the distance using the distance measure and compare to precomputed distance from results file
final double distance = distanceMeasure.distance(instA, instB);
writer.write(",");
writer.write(String.valueOf(distance));
// convert params to json
writer.write(",");
// somewhat hacky work around for distance measures which are wrapped in a transformer. For simplicity, we're only recording the parameters for the distance measure, not the wrapper itself
if(distanceMeasure instanceof TransformDistanceMeasure) {
paramSet = ((TransformDistanceMeasure) distanceMeasure).getDistanceMeasure().getParams();
}
writer.write(paramSet.toJson());
writer.write("\n");
}
}
System.out.println("created tests results for " + distanceMeasure.getName() + " on " + datasetName);
}
private String dataPath;
private String testResultsDirPath;
private String testResultsFilePath;
private TimeSeriesInstances data;
private int i = 0;
@Before
public void before() throws IOException {
// load in the data
dataPath = Arrays.asList(BAKED_IN_MTSC_DATASETS).contains(datasetName) ? BAKED_IN_MTSC_DATA_PATH :
BAKED_IN_TSC_DATA_PATH;
data = loadData(dataPath, datasetName);
// setup results file
testResultsDirPath = TEST_RESULTS_DIR_PATH + "/" + datasetName;
testResultsFilePath = testResultsDirPath + "/" + distanceMeasure.getName() + ".csv";
}
@Test
public void test() throws Exception {
i = 0;
if(CREATE_TEST_RESULTS) {
createTestResults(1000);
} else {
testDistances();
}
}
private void testDistances() throws Exception {
try(final BufferedReader reader = new BufferedReader(new FileReader(testResultsFilePath))) {
reader.readLine(); // read the header line
// for each line in the results file
String line;
while((line = reader.readLine()) != null) {
// System.out.println(i);
// split line on the comma
final String[] fields = line.split(",");
// indices of the two insts in the dataset
final int instAIndex = Integer.parseInt(fields[0].trim());
final int instBIndex = Integer.parseInt(fields[1].trim());
TimeSeriesInstance instA = data.get(instAIndex);
TimeSeriesInstance instB = data.get(instBIndex);
// the distance
final double targetDistance = Double.parseDouble(fields[2].trim());
if(targetDistance == Double.POSITIVE_INFINITY) throw new IllegalArgumentException("target distance pos inf");
if(targetDistance == Double.NEGATIVE_INFINITY) throw new IllegalArgumentException("target distance neg inf");
if(Double.isNaN(targetDistance)) throw new IllegalArgumentException("target distance nan");
// last field is the parameters for the distance measure in json format
// load the json str into a paramset
// BEWARE: super hacky fix to combine any remaining fields together into the json string. I.e. if there's a comma in the json (which with <1 parameter there defo is) then the json gets split
ParamSet paramSet = ParamSet.fromJson(StrUtils.join(",", Arrays.copyOfRange(fields, 3, fields.length)));
if(distanceMeasure instanceof TransformDistanceMeasure) {
paramSet = new ParamSet().add(DISTANCE_MEASURE_FLAG, ((TransformDistanceMeasure) distanceMeasure).getDistanceMeasure(), paramSet);
}
distanceMeasure.setParams(paramSet);
// compute the distance using the distance measure and compare to precomputed distance from results file
if(distanceMeasure instanceof MatrixBasedDistanceMeasure) {
((MatrixBasedDistanceMeasure) distanceMeasure).setRecordCostMatrix(false);
}
final double distance = distanceMeasure.distance(instA, instB);
Assert.assertEquals(targetDistance, distance, 0d);
Assert.assertTrue(distance >= 0);
// compute the distance again, this time with a limit attempting to trigger early abandon.
// early abandon will return a distance == the full distance || distance == pos inf.
// if the distance measure does not utilise early abandon then it should return the same distance.
// a limit of half the unlimited distance should be sufficient.
final double abandonedDistance = distanceMeasure.distance(instA, instB, distance / 2);
Assert.assertTrue(abandonedDistance == Double.POSITIVE_INFINITY || abandonedDistance == distance);
// somewhat hacky check against old implementations of each distance measure. Loosely, we check if the
// distance measure has an old implementation, run that and compare
final List<Double> altDistances = altDistance(instA, instB);
for(Double altDistance : altDistances) {
if(altDistance != null) {
if(altDistance != targetDistance) {
System.out.println();
}
Assert.assertEquals(targetDistance, altDistance, 0d);
}
}
// check that distance is same if distance measure is symmetric
if(distanceMeasure.isSymmetric()) {
final double altDistance = distanceMeasure.distance(instB, instA);
Assert.assertEquals(distance, altDistance, 0d);
}
// check recording the cost matrix doesn't alter the distance
if(distanceMeasure instanceof MatrixBasedDistanceMeasure) {
((MatrixBasedDistanceMeasure) distanceMeasure).setRecordCostMatrix(true);
final double altDistance = distanceMeasure.distance(instA, instB);
Assert.assertEquals(distance, altDistance, 0d);
}
i++;
}
}
}
private static TimeSeriesInstances loadData(String dirPath, String datasetName) throws IOException {
final String trainPath = dirPath + "/" + datasetName + "/" + datasetName + "_TRAIN.arff";
final String testPath = dirPath + "/" + datasetName + "/" + datasetName + "_TEST.arff";
final TimeSeriesInstances data = Converter.fromArff(DatasetLoading.loadData(trainPath));
data.addAll(Converter.fromArff(DatasetLoading.loadData(testPath)));
return data;
}
// find an alternative distance value from a different implementation
private List<Double> altDistance(TimeSeriesInstance inst1, TimeSeriesInstance inst2) {
Double oldDistance = null;
Double origDistance = null;
Double monashDistance = null;
final int len = inst1.getMaxLength();
final String name = distanceMeasure.getName().replace("Distance", "");
final double window;
switch(name) {
case "ERP":
window = ((ERPDistance) distanceMeasure).getWindow();
break;
case "DTW":
window = ((DTWDistance) distanceMeasure).getWindow();
break;
case "DDTW":
window = ((DTWDistance) ((TransformDistanceMeasure) distanceMeasure).getDistanceMeasure())
.getWindow();
break;
case "LCSS":
window = ((LCSSDistance) distanceMeasure).getWindow();
break;
default:
window = Double.NaN;
break;
}
final int floorWindow = (int) Math.floor(window * len);
final double floorWindowProportional = (double) floorWindow / len;
if(!inst1.isMultivariate() && !inst2.isMultivariate()) {
// add the previous implementations
if("LCSS".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.lcss(Converter.toArff(inst1, data.getClassLabels()), Converter.toArff(inst2, data.getClassLabels()),
Double.POSITIVE_INFINITY,
floorWindow,
((LCSSDistance) distanceMeasure).getEpsilon());
} else if("DDTW".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.dtw(Converter.toArff(new Derivative().transform(inst1), data.getClassLabels()),
Converter.toArff(new Derivative().transform(inst2), data.getClassLabels()),
Double.POSITIVE_INFINITY,
floorWindow);
} else if("WDDTW".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.wdtw(Converter.toArff(new Derivative().transform(inst1), data.getClassLabels()),
Converter.toArff(new Derivative().transform(inst2), data.getClassLabels()),
Double.POSITIVE_INFINITY,
((WDTWDistance) ((TransformDistanceMeasure) distanceMeasure)
.getDistanceMeasure()).getG());
} else if("DTW".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.dtw(Converter.toArff(inst1, data.getClassLabels()), Converter.toArff(inst2, data.getClassLabels()),
Double.POSITIVE_INFINITY,
floorWindow);
} else if("WDTW".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.wdtw(Converter.toArff(inst1, data.getClassLabels()), Converter.toArff(inst2, data.getClassLabels()),
Double.POSITIVE_INFINITY,
((WDTWDistance) distanceMeasure).getG());
} else if("ERP".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.erp(Converter.toArff(inst1, data.getClassLabels()), Converter.toArff(inst2, data.getClassLabels()),
Double.POSITIVE_INFINITY,
floorWindow, ((ERPDistance) distanceMeasure).getG());
} else if("MSM".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.msm(Converter.toArff(inst1, data.getClassLabels()), Converter.toArff(inst2, data.getClassLabels()),
Double.POSITIVE_INFINITY,
((MSMDistance) distanceMeasure).getC());
} else if("TWED".equals(name)) {
oldDistance = PreviousDistanceMeasureVersions.twed(Converter.toArff(inst1, data.getClassLabels()), Converter.toArff(inst2, data.getClassLabels()),
Double.POSITIVE_INFINITY,
((TWEDistance) distanceMeasure).getLambda(),
((TWEDistance) distanceMeasure).getNu());
}
// convert to raw arrays
final double[] a = inst1.toValueArray()[0];
final double[] b = inst2.toValueArray()[0];
// find the orig distances
if("LCSS".equals(name)) {
origDistance = DistanceMeasuresFromBitbucket
.lcss(a, b, ((LCSSDistance) distanceMeasure).getEpsilon(), floorWindow);
} else if("DTW".equals(name)) {
origDistance = DistanceMeasuresFromBitbucket.dtw(a, b, Double.POSITIVE_INFINITY, window);
} else if("WDTW".equals(name)) {
origDistance = DistanceMeasuresFromBitbucket.wdtw(a, b, Double.POSITIVE_INFINITY, ((WDTWDistance) distanceMeasure).getG());
} else if("ERP".equals(name)) {
origDistance = DistanceMeasuresFromBitbucket.erp(a, b, ((ERPDistance) distanceMeasure).getG(), floorWindowProportional);
} else if("MSM".equals(name)) {
origDistance = DistanceMeasuresFromBitbucket.msm(a, b, ((MSMDistance) distanceMeasure).getC());
} else if("TWED".equals(name)) {
origDistance = DistanceMeasuresFromBitbucket.twed(a, b,
((TWEDistance) distanceMeasure).getNu(), ((TWEDistance) distanceMeasure).getLambda());
}
// find the monash implementation distance
if("LCSS".equals(name)) {
monashDistance = MonashDistanceMeasures.lcss(a, b, Double.POSITIVE_INFINITY, floorWindow, ((LCSSDistance) distanceMeasure).getEpsilon());
} else if("DDTW".equals(name)) {
monashDistance = MonashDistanceMeasures.ddtw(a, b, Double.POSITIVE_INFINITY, floorWindow);
} else if("WDDTW".equals(name)) {
monashDistance = MonashDistanceMeasures.wddtw(a, b, ((WDTWDistance) ((TransformDistanceMeasure) distanceMeasure)
.getDistanceMeasure()).getG());
} else if("DTW".equals(name)) {
monashDistance = MonashDistanceMeasures.dtw(a, b, Double.POSITIVE_INFINITY, floorWindow);
} else if("WDTW".equals(name)) {
monashDistance = MonashDistanceMeasures.wdtw(a, b, ((WDTWDistance) distanceMeasure).getG());
} else if("ERP".equals(name)) {
monashDistance = MonashDistanceMeasures.erp(a, b, floorWindow, ((ERPDistance) distanceMeasure).getG());
} else if("MSM".equals(name)) {
monashDistance = MonashDistanceMeasures.msm(a, b, Double.POSITIVE_INFINITY, ((MSMDistance) distanceMeasure).getC());
} else if("TWED".equals(name)) {
monashDistance = MonashDistanceMeasures.twed(a, b, ((TWEDistance) distanceMeasure).getNu(), ((TWEDistance) distanceMeasure).getLambda());
} else if("E".equals(name)) {
monashDistance = MonashDistanceMeasures.ed(a, b, Double.POSITIVE_INFINITY);
}
}
return Arrays.asList(oldDistance, origDistance, monashDistance);
}
private static class DistanceMeasuresFromBitbucket {
// distance measure code snapshot from bitbucket to github move
// https://github.com/uea-machine-learning/tsml/tree/29b5558ebab6b5dd427ed45d028f52f6e9401e30
public static double dtw(double[] a, double[] b, double cutoff, double r) {
double minDist;
boolean tooBig;
// Set the longest series to a. is this necessary?
double[] temp;
if(a.length<b.length){
temp=a;
a=b;
b=temp;
}
int n=a.length;
int m=b.length;
/* Parameter 0<=r<=1. 0 == no warp, 1 == full warp
generalised for variable window size
* */
int windowSize=(int)(r*n); //Rounded down.
//No Warp, windowSize=1
if(windowSize<1) windowSize=1;
//Full Warp : windowSize=n, otherwise scale between
else if(windowSize<n)
windowSize++;
double[][] matrixD = null;
//Extra memory than required, could limit to windowsize,
// but avoids having to recreate during CV
//for varying window sizes
if(matrixD==null)
matrixD=new double[n][m];
/*
//Set boundary elements to max.
*/
int start,end;
for(int i=0;i<n;i++){
start=windowSize<i?i-windowSize:0;
end=i+windowSize+1<m?i+windowSize+1:m;
for(int j=start;j<end;j++)
matrixD[i][j]=Double.MAX_VALUE;
}
matrixD[0][0]=(a[0]-b[0])*(a[0]-b[0]);
//a is the longer series.
//Base cases for warping 0 to all with max interval r
//Warp a[0] onto all b[1]...b[r+1]
for(int j=1;j<windowSize && j<m;j++)
matrixD[0][j]=matrixD[0][j-1]+(a[0]-b[j])*(a[0]-b[j]);
// Warp b[0] onto all a[1]...a[r+1]
for(int i=1;i<windowSize && i<n;i++)
matrixD[i][0]=matrixD[i-1][0]+(a[i]-b[0])*(a[i]-b[0]);
//Warp the rest,
for (int i=1;i<n;i++){
tooBig=true;
start=windowSize<i?i-windowSize+1:1;
end=i+windowSize<m?i+windowSize:m;
for (int j = start;j<end;j++){
minDist=matrixD[i][j-1];
if(matrixD[i-1][j]<minDist)
minDist=matrixD[i-1][j];
if(matrixD[i-1][j-1]<minDist)
minDist=matrixD[i-1][j-1];
matrixD[i][j]=minDist+(a[i]-b[j])*(a[i]-b[j]);
if(tooBig&&matrixD[i][j]<cutoff)
tooBig=false;
}
//Early abandon
if(tooBig){
return Double.MAX_VALUE;
}
}
//Find the minimum distance at the end points, within the warping window.
return matrixD[n-1][m-1];
}
private static class NumberVector{
private double[] values;
public NumberVector(double[] values){
this.values = values;
}
public int getDimensionality(){
return values.length;
}
public double doubleValue(int dimension){
try{
return values[dimension - 1];
}catch(IndexOutOfBoundsException e) {
throw new IllegalArgumentException("Dimension " + dimension + " out of range.");
}
}
}
public static double lcss(double[] first, double[] second, double epsilon, int delta) {
double[] a = first;
double[] b = second;
int m = first.length;
int n = second.length;
int[][] lcss = new int[m+1][n+1];
int[][] lastX = new int[m+1][n+1];
int[][] lastY = new int[m+1][n+1];
for(int i = 0; i < m; i++){
for(int j = i-delta; j <= i+delta; j++){
// System.out.println("here");
if(j < 0 || j >= n){
//do nothing
}else if(b[j]+epsilon >= a[i] && b[j]-epsilon <=a[i]){
lcss[i+1][j+1] = lcss[i][j]+1;
lastX[i+1][j+1] = i;
lastY[i+1][j+1] = j;
}else if(lcss[i][j+1] > lcss[i+1][j]){
lcss[i+1][j+1] = lcss[i][j+1];
lastX[i+1][j+1] = i;
lastY[i+1][j+1] = j+1;
}else if(lcss[i][j+1] < lcss[i+1][j]){
lcss[i+1][j+1] = lcss[i+1][j];
lastX[i+1][j+1] = i+1;
lastY[i+1][j+1] = j;
} else {
// take the max of left or top. topLeft has no effect as always equal to or less than left or top
lcss[i+1][j+1] = Math.max(lcss[i][j], Math.max(lcss[i][j + 1], lcss[i + 1][j]));
lastX[i+1][j+1] = i;
lastY[i+1][j+1] = j;
}
// orig bugged version
// if(j < 0 || j >= n){
// //do nothing
// }else if(b[j]+epsilon >= a[i] && b[j]-epsilon <=a[i]){
// lcss[i+1][j+1] = lcss[i][j]+1;
// lastX[i+1][j+1] = i;
// lastY[i+1][j+1] = j;
// }else if(lcss[i][j+1] > lcss[i+1][j]){
// lcss[i+1][j+1] = lcss[i][j+1];
// lastX[i+1][j+1] = i;
// lastY[i+1][j+1] = j+1;
// }else{
// lcss[i+1][j+1] = lcss[i+1][j];
// lastX[i+1][j+1] = i+1;
// lastY[i+1][j+1] = j;
// }
}
}
int max = -1;
for(int i = 1; i < lcss[lcss.length-1].length; i++){
if(lcss[lcss.length-1][i] > max){
max = lcss[lcss.length-1][i];
}
}
return 1-((double)max/m);
}
public static double erp(double[] first, double[] second, double g, double bandSize) {
// Current and previous columns of the matrix
NumberVector v1 = new NumberVector(first);
NumberVector v2 = new NumberVector(second);
double[] curr = new double[v2.getDimensionality()];
double[] prev = new double[v2.getDimensionality()];
// size of edit distance band
// bandsize is the maximum allowed distance to the diagonal
// int band = (int) Math.ceil(v2.getDimensionality() * bandSize);
int band = (int) Math.ceil(v2.getDimensionality() * bandSize);
// g parameter for local usage
double gValue = g;
for (int i = 0; i < v1.getDimensionality(); i++) {
// Swap current and prev arrays. We'll just overwrite the new curr.
{
double[] temp = prev;
prev = curr;
curr = temp;
}
int l = i - (band + 1);
if (l < 0) {
l = 0;
}
int r = i + (band + 1);
if (r > (v2.getDimensionality() - 1)) {
r = (v2.getDimensionality() - 1);
}
for (int j = l; j <= r; j++) {
if (Math.abs(i - j) <= band) {
// compute squared distance of feature vectors
double val1 = v1.doubleValue(i + 1);
double val2 = gValue;
double diff = (val1 - val2);
final double d1 = Math.sqrt(diff * diff);
val1 = gValue;
val2 = v2.doubleValue(j + 1);
diff = (val1 - val2);
final double d2 = Math.sqrt(diff * diff);
val1 = v1.doubleValue(i + 1);
val2 = v2.doubleValue(j + 1);
diff = (val1 - val2);
final double d12 = Math.sqrt(diff * diff);
final double dist1 = d1 * d1;
final double dist2 = d2 * d2;
final double dist12 = d12 * d12;
final double cost;
if ((i + j) != 0) {
if ((i == 0) || ((j != 0) && (((prev[j - 1] + dist12) > (curr[j - 1] + dist2)) && ((curr[j - 1] + dist2) < (prev[j] + dist1))))) {
// del
cost = curr[j - 1] + dist2;
} else if ((j == 0) || ((i != 0) && (((prev[j - 1] + dist12) > (prev[j] + dist1)) && ((prev[j] + dist1) < (curr[j - 1] + dist2))))) {
// ins
cost = prev[j] + dist1;
} else {
// match
cost = prev[j - 1] + dist12;
}
} else {
cost = 0;
}
curr[j] = cost;
// steps[i][j] = step;
} else {
curr[j] = Double.POSITIVE_INFINITY; // outside band
}
}
}
return curr[v2.getDimensionality() - 1];
}
public static double twed(double[] a, double[] b, double nu, double lambda) {
int dim=1;
double dist, disti1, distj1;
double[][] ta=new double[a.length][dim];
double[][] tb=new double[a.length][dim];
double[] tsa=new double[a.length];
double[] tsb=new double[b.length];
for(int i=0;i<tsa.length;i++)
tsa[i]=(i+1);
for(int i=0;i<tsb.length;i++)
tsb[i]=(i+1);
int r = ta.length;
int c = tb.length;
int i,j,k;
//Copy over values
for(i=0;i<a.length;i++)
ta[i][0]=a[i];
for(i=0;i<b.length;i++)
tb[i][0]=b[i];
/* allocations in c
double **D = (double **)calloc(r+1, sizeof(double*));
double *Di1 = (double *)calloc(r+1, sizeof(double));
double *Dj1 = (double *)calloc(c+1, sizeof(double));
for(i=0; i<=r; i++) {
D[i]=(double *)calloc(c+1, sizeof(double));
}
*/
double [][]D = new double[r+1][c+1];
double[] Di1 = new double[r+1];
double[] Dj1 = new double[c+1];
// local costs initializations
for(j=1; j<=c; j++) {
distj1=0;
for(k=0; k<dim; k++)
if(j>1){
//CHANGE AJB 8/1/16: Only use power of 2 for speed up,
distj1+=(tb[j-2][k]-tb[j-1][k])*(tb[j-2][k]-tb[j-1][k]);
// OLD VERSION distj1+=Math.pow(Math.abs(tb[j-2][k]-tb[j-1][k]),degree);
// in c: distj1+=pow(fabs(tb[j-2][k]-tb[j-1][k]),degree);
}
else
distj1+=tb[j-1][k]*tb[j-1][k];
//OLD distj1+=Math.pow(Math.abs(tb[j-1][k]),degree);
Dj1[j]=(distj1);
}
for(i=1; i<=r; i++) {
disti1=0;
for(k=0; k<dim; k++)
if(i>1)
disti1+=(ta[i-2][k]-ta[i-1][k])*(ta[i-2][k]-ta[i-1][k]);
// OLD disti1+=Math.pow(Math.abs(ta[i-2][k]-ta[i-1][k]),degree);
else
disti1+=(ta[i-1][k])*(ta[i-1][k]);
//OLD disti1+=Math.pow(Math.abs(ta[i-1][k]),degree);
Di1[i]=(disti1);
for(j=1; j<=c; j++) {
dist=0;
for(k=0; k<dim; k++){
dist+=(ta[i-1][k]-tb[j-1][k])*(ta[i-1][k]-tb[j-1][k]);
// dist+=Math.pow(Math.abs(ta[i-1][k]-tb[j-1][k]),degree);
if(i>1&&j>1)
dist+=(ta[i-2][k]-tb[j-2][k])*(ta[i-2][k]-tb[j-2][k]);
// dist+=Math.pow(Math.abs(ta[i-2][k]-tb[j-2][k]),degree);
}
D[i][j]=(dist);
}
}// for i
// border of the cost matrix initialization
D[0][0]=0;
for(i=1; i<=r; i++)
D[i][0]=D[i-1][0]+Di1[i];
for(j=1; j<=c; j++)
D[0][j]=D[0][j-1]+Dj1[j];
double dmin, htrans, dist0;
int iback;
for (i=1; i<=r; i++){
for (j=1; j<=c; j++){
htrans=Math.abs((tsa[i-1]-tsb[j-1]));
if(j>1&&i>1)
htrans+=Math.abs((tsa[i-2]-tsb[j-2]));
dist0=D[i-1][j-1]+nu*htrans+D[i][j];
dmin=dist0;
if(i>1)
htrans=((tsa[i-1]-tsa[i-2]));
else htrans=tsa[i-1];
dist=Di1[i]+D[i-1][j]+lambda+nu*htrans;
if(dmin>dist){
dmin=dist;
}
if(j>1)
htrans=(tsb[j-1]-tsb[j-2]);
else htrans=tsb[j-1];
dist=Dj1[j]+D[i][j-1]+lambda+nu*htrans;
if(dmin>dist){
dmin=dist;
}
D[i][j] = dmin;
}
}
dist = D[r][c];
return dist;
}
private static double[] initWeights(int seriesLength, double g){
double[] weightVector = new double[seriesLength];
double halfLength = (double)seriesLength/2;
for(int i = 0; i < seriesLength; i++){
weightVector[i] = 1/(1+Math.exp(-g*(i-halfLength)));
}
return weightVector;
}
public static double wdtw(double[] first, double[] second, double cutOffValue, double g) {
double[] weightVector = null;
if(weightVector==null){
weightVector = initWeights(first.length, g);
}
double[][] distances;
//create empty array
distances = new double[first.length][second.length];
//first value
distances[0][0] = weightVector[0]*((first[0]-second[0])*(first[0]-second[0]));
//early abandon if first values is larger than cut off
if(distances[0][0] > cutOffValue){
return Double.MAX_VALUE;
}
//top row
for(int i=1;i<second.length;i++){
distances[0][i] = distances[0][i-1]+weightVector[i]*((first[0]-second[i])*(first[0]-second[i])); //edited by Jay
}
//first column
for(int i=1;i<first.length;i++){
distances[i][0] = distances[i-1][0]+weightVector[i]*((first[i]-second[0])*(first[i]-second[0])); //edited by Jay
}
//warp rest
double minDistance;
for(int i = 1; i<first.length; i++){
boolean overflow = true;
for(int j = 1; j<second.length; j++){
//calculate distances
minDistance = Math.min(distances[i][j-1], Math.min(distances[i-1][j], distances[i-1][j-1]));
distances[i][j] = minDistance+weightVector[Math.abs(i-j)] *((first[i]-second[j])*(first[i]-second[j])); //edited by Jay
if(overflow && distances[i][j] < cutOffValue){
overflow = false; // because there's evidence that the path can continue
}
//
// if(minDistance > cutOffValue && isEarlyAbandon){
// distances[i][j] = Double.MAX_VALUE;
// }else{
// distances[i][j] = minDistance+weightVector[Math.abs(i-j)] *(first[i]-second[j])*(first[i]-second[j]); //edited by Jay
// overflow = false;
// }
}
//early abandon
if(overflow){
return Double.MAX_VALUE;
}
}
return distances[first.length-1][second.length-1];
}
public static double msm(double[] a, double[] b, double c) {
int m, n, i, j;
m = a.length;
n = b.length;
double[][] cost = new double[m][n];
// Initialization
cost[0][0] = Math.abs(a[0] - b[0]);
for (i = 1; i< m; i++) {
cost[i][0] = cost[i-1][0] + editCost(a[i], a[i-1], b[0], c);
}
for (j = 1; j < n; j++) {
cost[0][j] = cost[0][j-1] + editCost(b[j], a[0], b[j-1], c);
}
// Main Loop
for( i = 1; i < m; i++){
for ( j = 1; j < n; j++){
double d1,d2, d3;
d1 = cost[i-1][j-1] + Math.abs(a[i] - b[j] );
d2 = cost[i-1][j] + editCost(a[i], a[i-1], b[j], c);
d3 = cost[i][j-1] + editCost(b[j], a[i], b[j-1], c);
cost[i][j] = Math.min( d1, Math.min(d2,d3) );
}
}
// Output
return cost[m-1][n-1];
}
private static double editCost( double new_point, double x, double y, double c){
double dist = 0;
if ( ( (x <= new_point) && (new_point <= y) ) ||
( (y <= new_point) && (new_point <= x) ) ) {
dist = c;
}
else{
dist = c + Math.min( Math.abs(new_point - x) , Math.abs(new_point - y) );
}
return dist;
}
}
private static class PreviousDistanceMeasureVersions {
// this is the code for the distance measures BEFORE the big overhaul to variable length and row-by-row variants for increased speed
public static double erp(Instance first, Instance second, double limit, int band, double penalty) {
int aLength = first.numAttributes() - 1;
int bLength = second.numAttributes() - 1;
// Current and previous columns of the matrix
double[] curr = new double[bLength];
double[] prev = new double[bLength];
// size of edit distance band
// bandsize is the maximum allowed distance to the diagonal
// int band = (int) Math.ceil(v2.getDimensionality() * bandSize);
if(band < 0) {
band = aLength + 1;
}
// g parameters for local usage
double gValue = penalty;
for(int i = 0;
i < aLength;
i++) {
// Swap current and prev arrays. We'll just overwrite the new curr.
{
double[] temp = prev;
prev = curr;
curr = temp;
}
int l = i - (band + 1);
if(l < 0) {
l = 0;
}
int r = i + (band + 1);
if(r > (bLength - 1)) {
r = (bLength - 1);
}
boolean tooBig = true;
for(int j = l;
j <= r;
j++) {
if(Math.abs(i - j) <= band) {
// compute squared distance of feature vectors
double val1 = first.value(i);
double val2 = gValue;
double diff = (val1 - val2);
final double dist1 = diff * diff;
val1 = gValue;
val2 = second.value(j);
diff = (val1 - val2);
final double dist2 = diff * diff;
val1 = first.value(i);
val2 = second.value(j);
diff = (val1 - val2);
final double dist12 = diff * diff;
final double cost;
if((i + j) != 0) {
if((i == 0) || ((j != 0) && (((prev[j - 1] + dist12) > (curr[j - 1] + dist2)) && (
(curr[j - 1] + dist2) < (prev[j] + dist1))))) {
// del
cost = curr[j - 1] + dist2;
} else if((j == 0) || ((i != 0) && (((prev[j - 1] + dist12) > (prev[j] + dist1)) && (
(prev[j] + dist1) < (curr[j - 1] + dist2))))) {
// ins
cost = prev[j] + dist1;
} else {
// match
cost = prev[j - 1] + dist12;
}
} else {
cost = 0;
}
curr[j] = cost;
if(tooBig && cost < limit) {
tooBig = false;
}
} else {
curr[j] = Double.POSITIVE_INFINITY; // outside band
}
}
if(tooBig) {
return Double.POSITIVE_INFINITY;
}
}
return curr[bLength - 1];
}
private static double[] generateWeights(int seriesLength, double g) {
double halfLength = (double) seriesLength / 2;
double[] weightVector = new double[seriesLength];
for (int i = 0; i < seriesLength; i++) {
weightVector[i] = 1d / (1d + Math.exp(-g * (i - halfLength)));
}
return weightVector;
}
public static double wdtw(Instance a, Instance b, double limit, double g) {
int aLength = a.numAttributes() - 1;
int bLength = b.numAttributes() - 1;
double[] weightVector = generateWeights(aLength, g);
//create empty array
double[][] distances = new double[aLength][bLength];
//first value
distances[0][0] = (a.value(0) - b.value(0)) * (a.value(0) - b.value(0)) * weightVector[0];
//top row
for (int i = 1; i < bLength; i++) {
distances[0][i] =
distances[0][i - 1] + (a.value(0) - b.value(i)) * (a.value(0) - b.value(i)) * weightVector[i]; //edited by Jay
}
//first column
for (int i = 1; i < aLength; i++) {
distances[i][0] =
distances[i - 1][0] + (a.value(i) - b.value(0)) * (a.value(i) - b.value(0)) * weightVector[i]; //edited by Jay
}
//warp rest
double minDistance;
for (int i = 1; i < aLength; i++) {
boolean overflow = true;
for (int j = 1; j < bLength; j++) {
//calculate distance_measures
minDistance = Math.min(distances[i][j - 1], Math.min(distances[i - 1][j], distances[i - 1][j - 1]));
distances[i][j] =
minDistance + (a.value(i) - b.value(j)) * (a.value(i) - b.value(j)) * weightVector[Math.abs(i - j)];
if (overflow && distances[i][j] <= limit) {
overflow = false; // because there's evidence that the path can continue
}
}
//early abandon
if (overflow) {
return Double.POSITIVE_INFINITY;
}
}
return distances[aLength - 1][bLength - 1];
}
public static double dtw(Instance first, Instance second, double limit, int windowSize) {
double minDist;
boolean tooBig;
int aLength = first.numAttributes() - 1;
int bLength = second.numAttributes() - 1;
/* Parameter 0<=r<=1. 0 == no warpingWindow, 1 == full warpingWindow
generalised for variable window size
* */
// int windowSize = warpingWindow + 1; // + 1 to include the current cell
// if(warpingWindow < 0) {
// windowSize = aLength + 1;
// }
if(windowSize < 0) {
windowSize = first.numAttributes() - 1;
} else {
windowSize++;
}
//Extra memory than required, could limit to windowsize,
// but avoids having to recreate during CV
//for varying window sizes
double[][] distanceMatrix = new double[aLength][bLength];
/*
//Set boundary elements to max.
*/
int start, end;
for(int i = 0; i < aLength; i++) {
start = windowSize < i ? i - windowSize : 0;
end = Math.min(i + windowSize + 1, bLength);
for(int j = start; j < end; j++) {
distanceMatrix[i][j] = Double.POSITIVE_INFINITY;
}
}
distanceMatrix[0][0] = (first.value(0) - second.value(0)) * (first.value(0) - second.value(0));
//a is the longer series.
//Base cases for warping 0 to all with max interval r
//Warp first[0] onto all second[1]...second[r+1]
for(int j = 1; j < windowSize && j < bLength; j++) {
distanceMatrix[0][j] =
distanceMatrix[0][j - 1] + (first.value(0) - second.value(j)) * (first.value(0) - second.value(j));
}
// Warp second[0] onto all first[1]...first[r+1]
for(int i = 1; i < windowSize && i < aLength; i++) {
distanceMatrix[i][0] =
distanceMatrix[i - 1][0] + (first.value(i) - second.value(0)) * (first.value(i) - second.value(0));
}
//Warp the rest,
for(int i = 1; i < aLength; i++) {
tooBig = true;
start = windowSize < i ? i - windowSize + 1 : 1;
end = Math.min(i + windowSize, bLength);
if(distanceMatrix[i][start - 1] < limit) {
tooBig = false;
}
for(int j = start; j < end; j++) {
minDist = distanceMatrix[i][j - 1];
if(distanceMatrix[i - 1][j] < minDist) {
minDist = distanceMatrix[i - 1][j];
}
if(distanceMatrix[i - 1][j - 1] < minDist) {
minDist = distanceMatrix[i - 1][j - 1];
}
distanceMatrix[i][j] =
minDist + (first.value(i) - second.value(j)) * (first.value(i) - second.value(j));
if(tooBig && distanceMatrix[i][j] < limit) {
tooBig = false;
}
}
//Early abandon
if(tooBig) {
return Double.POSITIVE_INFINITY;
}
}
//Find the minimum distance at the end points, within the warping window.
double distance = distanceMatrix[aLength - 1][bLength - 1];
return distance;
// double[] a = ExposedDenseInstance.extractAttributeValuesAndClassLabel(first);
// double[] b = ExposedDenseInstance.extractAttributeValuesAndClassLabel(bi);
// double[][] matrixD = null;
// double minDist;
// boolean tooBig;
// // Set the longest series to a. is this necessary?
// double[] temp;
// if(a.length<b.length){
// temp=a;
// a=b;
// b=temp;
// }
// int n=a.length-1;
// int m=b.length-1;
///* Parameter 0<=r<=1. 0 == no warp, 1 == full warp
//generalised for variable window size
//* */
//// windowSize = getWindowSize(n);
// //Extra memory than required, could limit to windowsize,
// // but avoids having to recreate during CV
// //for varying window sizes
// if(matrixD==null)
// matrixD=new double[n][m];
/*
//Set boundary elements to max.
*/
// int start,end;
// for(int i=0;i<n;i++){
// start=windowSize<i?i-windowSize:0;
// end=i+windowSize+1<m?i+windowSize+1:m;
// for(int j=start;j<end;j++)
// matrixD[i][j]=Double.MAX_VALUE;
// }
// matrixD[0][0]=(a[0]-b[0])*(a[0]-b[0]);
// //a is the longer series.
// //Base cases for warping 0 to all with max interval r
// //Warp a[0] onto all b[1]...b[r+1]
// for(int j=1;j<windowSize && j<m;j++)
// matrixD[0][j]=matrixD[0][j-1]+(a[0]-b[j])*(a[0]-b[j]);
//
// // Warp b[0] onto all a[1]...a[r+1]
// for(int i=1;i<windowSize && i<n;i++)
// matrixD[i][0]=matrixD[i-1][0]+(a[i]-b[0])*(a[i]-b[0]);
// //Warp the rest,
// for (int i=1;i<n;i++){
// tooBig=true;
// start=windowSize<i?i-windowSize+1:1;
// end=i+windowSize<m?i+windowSize:m;
// for (int j = start;j<end;j++){
// minDist=matrixD[i][j-1];
// if(matrixD[i-1][j]<minDist)
// minDist=matrixD[i-1][j];
// if(matrixD[i-1][j-1]<minDist)
// minDist=matrixD[i-1][j-1];
// matrixD[i][j]=minDist+(a[i]-b[j])*(a[i]-b[j]);
// if(tooBig&&matrixD[i][j]<cutoff)
// tooBig=false;
// }
// //Early abandon
// if(tooBig){
// return Double.MAX_VALUE;
// }
// }
// //Find the minimum distance at the end points, within the warping window.
// return matrixD[n-1][m-1];
}
private static double findCost(double newPoint, double x, double y, double c) {
double dist = 0;
if(((x <= newPoint) && (newPoint <= y)) ||
((y <= newPoint) && (newPoint <= x))) {
dist = c;
} else {
dist = c + Math.min(Math.abs(newPoint - x), Math.abs(newPoint - y));
}
return dist;
}
public static double msm(Instance a, Instance b, double limit, double c) {
int aLength = a.numAttributes() - 1;
int bLength = b.numAttributes() - 1;
double[][] cost = new double[aLength][bLength];
// Initialization
cost[0][0] = Math.abs(a.value(0) - b.value(0));
for(int i = 1; i < aLength; i++) {
cost[i][0] = cost[i - 1][0] + findCost(a.value(i), a.value(i - 1), b.value(0), c);
}
for(int i = 1; i < bLength; i++) {
cost[0][i] = cost[0][i - 1] + findCost(b.value(i), a.value(0), b.value(i - 1), c);
}
// Main Loop
double min;
for(int i = 1; i < aLength; i++) {
min = Double.POSITIVE_INFINITY;
for(int j = 1; j < bLength; j++) {
double d1, d2, d3;
d1 = cost[i - 1][j - 1] + Math.abs(a.value(i) - b.value(j));
d2 = cost[i - 1][j] + findCost(a.value(i), a.value(i - 1), b.value(j), c);
d3 = cost[i][j - 1] + findCost(b.value(j), a.value(i), b.value(j - 1), c);
cost[i][j] = Math.min(d1, Math.min(d2, d3));
}
for(int j = 0; j < bLength; j++) {
min = Math.min(min, cost[i][j]);
}
if(min > limit) {
return Double.POSITIVE_INFINITY;
}
}
// Output
return cost[aLength - 1][bLength - 1];
}
public static double twed(Instance a, Instance b, double limit, double lambda, double nu) {
int aLength = a.numAttributes() - 1;
int bLength = b.numAttributes() - 1;
int dim = 1;
double dist, disti1, distj1;
double[][] ta = new double[aLength][dim];
double[][] tb = new double[bLength][dim];
double[] tsa = new double[aLength];
double[] tsb = new double[bLength];
for(int i = 0; i < tsa.length; i++) {
tsa[i] = (i + 1);
}
for(int i = 0; i < tsb.length; i++) {
tsb[i] = (i + 1);
}
int r = ta.length;
int c = tb.length;
int i, j, k;
//Copy over values
for(i = 0; i < aLength; i++) {
ta[i][0] = a.value(i);
}
for(i = 0; i < bLength; i++) {
tb[i][0] = b.value(i);
}
/* allocations in c
double **D = (double **)calloc(r+1, sizeof(double*));
double *Di1 = (double *)calloc(r+1, sizeof(double));
double *Dj1 = (double *)calloc(c+1, sizeof(double));
for(i=0; i<=r; i++) {
D[i]=(double *)calloc(c+1, sizeof(double));
}
*/
double[][] D = new double[r + 1][c + 1];
double[] Di1 = new double[r + 1];
double[] Dj1 = new double[c + 1];
// local costs initializations
for(j = 1; j <= c; j++) {
distj1 = 0;
for(k = 0; k < dim; k++) {
if(j > 1) {
//CHANGE AJB 8/1/16: Only use power of 2 for speed up,
distj1 += (tb[j - 2][k] - tb[j - 1][k]) * (tb[j - 2][k] - tb[j - 1][k]);
// OLD VERSION distj1+=Math.pow(Math.abs(tb[j-2][k]-tb[j-1][k]),degree);
// in c: distj1+=pow(fabs(tb[j-2][k]-tb[j-1][k]),degree);
} else {
distj1 += tb[j - 1][k] * tb[j - 1][k];
}
}
//OLD distj1+=Math.pow(Math.abs(tb[j-1][k]),degree);
Dj1[j] = (distj1);
}
for(i = 1; i <= r; i++) {
disti1 = 0;
for(k = 0; k < dim; k++) {
if(i > 1) {
disti1 += (ta[i - 2][k] - ta[i - 1][k]) * (ta[i - 2][k] - ta[i - 1][k]);
}
// OLD disti1+=Math.pow(Math.abs(ta[i-2][k]-ta[i-1][k]),degree);
else {
disti1 += (ta[i - 1][k]) * (ta[i - 1][k]);
}
}
//OLD disti1+=Math.pow(Math.abs(ta[i-1][k]),degree);
Di1[i] = (disti1);
for(j = 1; j <= c; j++) {
dist = 0;
for(k = 0; k < dim; k++) {
dist += (ta[i - 1][k] - tb[j - 1][k]) * (ta[i - 1][k] - tb[j - 1][k]);
// dist+=Math.pow(Math.abs(ta[i-1][k]-tb[j-1][k]),degree);
if(i > 1 && j > 1) {
dist += (ta[i - 2][k] - tb[j - 2][k]) * (ta[i - 2][k] - tb[j - 2][k]);
}
// dist+=Math.pow(Math.abs(ta[i-2][k]-tb[j-2][k]),degree);
}
D[i][j] = (dist);
}
}// for i
// border of the cost matrix initialization
D[0][0] = 0;
for(i = 1; i <= r; i++) {
// D[i][0] = Double.POSITIVE_INFINITY;
D[i][0] = D[i - 1][0] + Di1[i];
}
for(j = 1; j <= c; j++) {
// D[0][j] = Double.POSITIVE_INFINITY;
D[0][j] = D[0][j - 1] + Dj1[j];
}
double dmin, htrans, dist0;
int iback;
for(i = 1; i <= r; i++) {
for(j = 1; j <= c; j++) {
htrans = Math.abs((tsa[i - 1] - tsb[j - 1]));
if(j > 1 && i > 1) {
htrans += Math.abs((tsa[i - 2] - tsb[j - 2]));
}
dist0 = D[i - 1][j - 1] + nu * htrans + D[i][j];
dmin = dist0;
if(i > 1) {
htrans = ((tsa[i - 1] - tsa[i - 2]));
} else {
htrans = tsa[i - 1];
}
dist = Di1[i] + D[i - 1][j] + lambda + nu * htrans;
if(dmin > dist) {
dmin = dist;
}
if(j > 1) {
htrans = (tsb[j - 1] - tsb[j - 2]);
} else {
htrans = tsb[j - 1];
}
dist = Dj1[j] + D[i][j - 1] + lambda + nu * htrans;
if(dmin > dist) {
dmin = dist;
}
D[i][j] = dmin;
}
double min = Double.POSITIVE_INFINITY;
for(int m = 0; m < D[i].length; m++) {
min = Math.min(min, D[i][m]);
}
if(min > limit) {
return Double.POSITIVE_INFINITY;
}
}
dist = D[r][c];
return dist;
}
public static double lcss(Instance a, Instance b, double limit, int delta, double epsilon) {
int aLength = a.numAttributes() - 1;
int bLength = b.numAttributes() - 1;
// 22/10/19 goastler - limit LCSS such that if any value in the current window is larger than the limit then we can stop here, no point in doing the extra work
if(limit != Double.POSITIVE_INFINITY) { // check if there's a limit set
// if so then reverse engineer the max LCSS distance and replace the limit
// this is just the inverse of the return value integer rounded to an LCSS distance
limit = (int) ((1 - limit) * aLength) + 1;
}
int[][] lcss = new int[aLength + 1][bLength + 1];
int warpingWindow = delta;
if(warpingWindow < 0) {
warpingWindow = aLength + 1;
}
for(int i = 0; i < aLength; i++) {
boolean tooBig = true;
for(int j = i - warpingWindow; j <= i + warpingWindow; j++) {
if(j < 0) {
j = -1;
} else if(j >= bLength) {
j = i + warpingWindow;
} else {
if(b.value(j) + epsilon >= a.value(i) && b.value(j) - epsilon <= a
.value(i)) {
lcss[i + 1][j + 1] = lcss[i][j] + 1;
// } else if(lcss[i][j + 1] > lcss[i + 1][j]) {
// lcss[i + 1][j + 1] = lcss[i][j + 1];
// } else {
// lcss[i + 1][j + 1] = lcss[i + 1][j];
}
else {
lcss[i + 1][j + 1] = Math.max(lcss[i + 1][j], Math.max(lcss[i][j], lcss[i][j + 1]));
}
// if this value is less than the limit then fast-fail the limit overflow
if(tooBig && lcss[i + 1][j + 1] <= limit) {
tooBig = false;
}
}
}
// if no element is lower than the limit then early abandon
if(tooBig) {
return Double.POSITIVE_INFINITY;
}
}
int max = -1;
for(int j = 1; j < lcss[lcss.length - 1].length; j++) {
if(lcss[lcss.length - 1][j] > max) {
max = lcss[lcss.length - 1][j];
}
}
return 1 - ((double) max / aLength);
}
}
private static class MonashDistanceMeasures {
public static double min(double A, double B, double C) {
if (A < B) {
if (A < C) {
// A < B and A < C
return A;
} else {
// C < A < B
return C;
}
} else {
if (B < C) {
// B < A and B < C
return B;
} else {
// C < B < A
return C;
}
}
}
public static double squaredDistance(double A, double B) {
double x = A - B;
return x * x;
}
public static double dtw(double[] series1, double[] series2, double bsf, int windowSize) {
// monash's dtw doesn't work properly when the window is zero (i.e. euclidean distance).
// default to their ED to make things work, but this is a bug in their code and therefore their PF / tsml's PF-WRAPPER
if(windowSize == 0) {
return ed(series1, series2, Double.POSITIVE_INFINITY);
}
if (windowSize == -1) {
windowSize = series1.length;
}
int length1 = series1.length;
int length2 = series2.length;
int maxLength = Math.max(length1, length2);
double[] prevRow = new double[maxLength];
double[] currentRow = new double[maxLength];
if (prevRow == null || prevRow.length < maxLength) {
prevRow = new double[maxLength];
}
if (currentRow == null || currentRow.length < maxLength) {
currentRow = new double[maxLength];
}
int i, j;
double prevVal;
double thisSeries1Val = series1[0];
// initialising the first row - do this in prevRow so as to save swapping rows before next row
prevVal = prevRow[0] = squaredDistance(thisSeries1Val, series2[0]);
for (j = 1; j < Math.min(length2, 1 + windowSize); j++) {
prevVal = prevRow[j] = prevVal + squaredDistance(thisSeries1Val, series2[j]);
}
// the second row is a special case
if (length1 >= 2){
thisSeries1Val = series1[1];
if (windowSize>0){
currentRow[0] = prevRow[0]+squaredDistance(thisSeries1Val, series2[0]);
}
// in this special case, neither matrix[1][0] nor matrix[0][1] can be on the (shortest) minimum path
prevVal = currentRow[1]=prevRow[0]+squaredDistance(thisSeries1Val, series2[1]);
int jStop = (windowSize + 2 > length2) ? length2 : windowSize + 2;
for (j = 2; j < jStop; j++) {
// for the second row, matrix[0][j - 1] cannot be on a (shortest) minimum path
prevVal = currentRow[j] = Math.min(prevVal, prevRow[j - 1]) + squaredDistance(thisSeries1Val, series2[j]);
}
}
// third and subsequent rows
for (i = 2; i < length1; i++) {
int jStart;
int jStop = (i + windowSize >= length2) ? length2-1 : i + windowSize;
// the old currentRow becomes this prevRow and so the currentRow needs to use the old prevRow
double[] tmp = prevRow;
prevRow = currentRow;
currentRow = tmp;
thisSeries1Val = series1[i];
if (i - windowSize < 1) {
jStart = 1;
currentRow[0] = prevRow[0] + squaredDistance(thisSeries1Val, series2[0]);
}
else {
jStart = i - windowSize;
}
if (jStart <= jStop){
// If jStart is the start of the window, [i][jStart-1] is outside the window.
// Otherwise jStart-1 must be 0 and the path through [i][0] can never be less than the path directly from [i-1][0]
prevVal = currentRow[jStart] = Math.min(prevRow[jStart - 1], prevRow[jStart])+ squaredDistance(thisSeries1Val, series2[jStart]);
for (j = jStart+1; j < jStop; j++) {
prevVal = currentRow[j] = min(prevRow[j - 1], prevVal, prevRow[j])
+ squaredDistance(thisSeries1Val, series2[j]);
}
if (i + windowSize >= length2) {
// the window overruns the end of the sequence so can have a path through prevRow[jStop]
currentRow[jStop] = min(prevRow[jStop - 1], prevRow[jStop], prevVal) + squaredDistance(thisSeries1Val, series2[jStop]);
}
else {
currentRow[jStop] = Math.min(prevRow[jStop - 1], prevVal) + squaredDistance(thisSeries1Val, series2[jStop]);
}
}
}
double res = currentRow[length2 - 1];
return res;
}
public static double ddtw(double[] a, double[] b, double bsf, int w) {
return dtw(getDeriv(a), getDeriv(b), bsf, w);
}
private static double[] getDeriv(double[] series) {
double[] d = new double[series.length];
for (int i = 1; i < series.length - 1 ; i++) {
d[i] = ((series[i] - series[i - 1]) + ((series[i + 1] - series[i - 1]) / 2.0)) / 2.0;
}
d[0] = d[1];
d[d.length - 1] = d[d.length - 2];
return d;
}
public static double erp(double[] first, double[] second, int windowSize, double gValue) {
double[] curr = null, prev = null;
int m = first.length;
int n = second.length;
if (curr == null || curr.length < m) {
curr = new double[m];
prev = new double[m];
} else {
// FPH: init to 0 just in case, didn't check if
// important
for (int i = 0; i < curr.length; i++) {
curr[i] = 0.0;
prev[i] = 0.0;
}
}
// size of edit distance band
// bandsize is the maximum allowed distance to the diagonal
// int band = (int) Math.ceil(v2.getDimensionality() *
// bandSize);
// int band = (int) Math.ceil(m * bandSize);
int band = windowSize;
// g parameter for local usage
for (int i = 0; i < m; i++) {
// Swap current and prev arrays. We'll just overwrite
// the new curr.
{
double[] temp = prev;
prev = curr;
curr = temp;
}
int l = i - (band + 1);
if (l < 0) {
l = 0;
}
int r = i + (band + 1);
if (r > (m - 1)) {
r = (m - 1);
}
for (int j = l; j <= r; j++) {
if (Math.abs(i - j) <= band) {
// compute squared distance of feature
// vectors
double val1 = first[i];
double val2 = gValue;
double diff = (val1 - val2);
// final double d1 = Math.sqrt(diff * diff);
final double d1 = diff;//FPH simplificaiton
val1 = gValue;
val2 = second[j];
diff = (val1 - val2);
// final double d2 = Math.sqrt(diff * diff);
final double d2 = diff;
val1 = first[i];
val2 = second[j];
diff = (val1 - val2);
// final double d12 = Math.sqrt(diff * diff);
final double d12 = diff;
final double dist1 = d1 * d1;
final double dist2 = d2 * d2;
final double dist12 = d12 * d12;
final double cost;
if ((i + j) != 0) {
if ((i == 0) || ((j != 0) && (((prev[j - 1] + dist12) > (curr[j - 1] + dist2))
&& ((curr[j - 1] + dist2) < (prev[j] + dist1))))) {
// del
cost = curr[j - 1] + dist2;
} else if ((j == 0) || ((i != 0) && (((prev[j - 1] + dist12) > (prev[j] + dist1))
&& ((prev[j] + dist1) < (curr[j - 1] + dist2))))) {
// ins
cost = prev[j] + dist1;
} else {
// match
cost = prev[j - 1] + dist12;
}
} else {
cost = 0;
}
curr[j] = cost;
// steps[i][j] = step;
} else {
curr[j] = Double.POSITIVE_INFINITY; // outside
// band
}
}
}
return curr[m - 1];
}
public static double ed(double[] s, double[] t, double bsf) {
int i = 0;
double total = 0;
//assume s.length == t.length for this implementation
//TODO note <=, if bsf = 0, < will cause problems when early abandoning
for (i = 0; i < s.length & total <= bsf; i++){
total += (s[i] - t[i]) * (s[i] - t[i]);
}
// System.out.println("Euclidean: early abandon after: " + i + " from: " + s.length);
// return Math.sqrt(total);
return total;
}
private static int sim(double a, double b, double epsilon) {
return (Math.abs(a - b) <= epsilon) ? 1 : 0;
}
public static double lcss(double[] series1, double[] series2, double bsf, int windowSize, double epsilon) {
if (windowSize == -1) {
windowSize = series1.length;
}
int length1 = series1.length;
int length2 = series2.length;
int maxLength = Math.max(length1, length2);
int minLength = Math.min(length1, length2);
int [][]matrix = new int[length1][length2];
// int[][] matrix = MemoryManager.getInstance().getIntMatrix(0);
int i, j;
matrix[0][0] = sim(series1[0], series2[0], epsilon);
for (i = 1; i < Math.min(length1, 1 + windowSize); i++) {
matrix[i][0] = (sim(series1[i], series2[0], epsilon)==1)?sim(series1[i], series2[0], epsilon):matrix[i-1][0];
}
for (j = 1; j < Math.min(length2, 1 + windowSize); j++) {
matrix[0][j] = (sim(series1[0], series2[j], epsilon)==1?sim(series1[0], series2[j], epsilon):matrix[0][j-1]);
}
if (j < length2)
matrix[0][j] = Integer.MIN_VALUE;
for (i = 1; i < length1; i++) {
int jStart = (i - windowSize < 1) ? 1 : i - windowSize;
int jStop = (i + windowSize + 1 > length2) ? length2 : i + windowSize + 1;
if (i-windowSize-1>=0)
matrix[i][i-windowSize-1] = Integer.MIN_VALUE;
for (j = jStart; j < jStop; j++) {
if (sim(series1[i], series2[j], epsilon) == 1) {
matrix[i][j] = matrix[i - 1][j - 1] + 1;
} else {
matrix[i][j] = max(matrix[i - 1][j - 1], matrix[i][j - 1], matrix[i - 1][j]);
}
}
if (jStop < length2)
matrix[i][jStop] = Integer.MIN_VALUE;
}
double res = 1.0 - 1.0 * matrix[length1 - 1][length2 - 1] / minLength;
return res;
}
public static final int max(int A, int B, int C) {
if (A > B) {
if (A > C) {
return A;
} else {
// C > A > B
return C;
}
} else {
if (B > C) {
// B > A and B > C
return B;
} else {
// C > B > A
return C;
}
}
}
public static double msm(double[] first, double[] second, double bsf, double c) {
int m = first.length, n = second.length;
int maxLength=(m>=n)?m:n;
double[][]cost = new double[m][n];
// double[][]cost = MemoryManager.getInstance().getDoubleMatrix(0);
if (cost == null || cost.length < m || cost[0].length < n) {
cost = new double[m][n];
}
// Initialization
cost[0][0] = Math.abs(first[0] - second[0]);
for (int i = 1; i < m; i++) {
cost[i][0] = cost[i - 1][0] + calcualteCost(first[i], first[i - 1], second[0], c);
}
for (int i = 1; i < n; i++) {
cost[0][i] = cost[0][i - 1] + calcualteCost(second[i], first[0], second[i - 1], c);
}
// Main Loop
for (int i = 1; i < m; i++) {
for (int j = 1; j < n; j++) {
double d1, d2, d3;
d1 = cost[i - 1][j - 1] + Math.abs(first[i] - second[j]);
d2 = cost[i - 1][j] + calcualteCost(first[i], first[i - 1], second[j], c);
d3 = cost[i][j - 1] + calcualteCost(second[j], first[i], second[j - 1], c);
cost[i][j] = Math.min(d1, Math.min(d2, d3));
}
}
// Output
double res = cost[m - 1][n - 1];
return res;
}
private static final double calcualteCost(double new_point, double x, double y, double c) {
double dist = 0;
if (((x <= new_point) && (new_point <= y)) || ((y <= new_point) && (new_point <= x))) {
dist = c;
} else {
dist = c + Math.min(Math.abs(new_point - x), Math.abs(new_point - y));
}
return dist;
}
private static double twed(double[] ta, double[] tb, double nu, double lambda) {
int m = ta.length;
int n = tb.length;
int maxLength = Math.max(m, n);
double dist, disti1, distj1;
int r = ta.length; // this is just m?!
int c = tb.length; // so is this, but surely it should actually
// be n anyway
int i, j;
/*
* allocations in c double **D = (double **)calloc(r+1,
* sizeof(double*)); double *Di1 = (double *)calloc(r+1,
* sizeof(double)); double *Dj1 = (double *)calloc(c+1,
* sizeof(double)); for(i=0; i<=r; i++) { D[i]=(double
* *)calloc(c+1, sizeof(double)); }
*/
double[][]D = new double[maxLength + 1][maxLength + 1];
double[]Di1 = new double[maxLength + 1];
double[]Dj1 = new double[maxLength + 1];
// double[][] D = MemoryManager.getInstance().getDoubleMatrix(0);
// double[] Di1 = MemoryManager.getInstance().getDoubleArray(0);
// double[] Dj1 = MemoryManager.getInstance().getDoubleArray(1);
// FPH adding initialisation given that using matrices as fields
Di1[0] = 0.0;
Dj1[0] = 0.0;
// local costs initializations
for (j = 1; j <= c; j++) {
distj1 = 0;
if (j > 1) {
// CHANGE AJB 8/1/16: Only use power of
// 2 for speed
distj1 += (tb[j - 2] - tb[j - 1]) * (tb[j - 2] - tb[j - 1]);
// OLD VERSION
// distj1+=Math.pow(Math.abs(tb[j-2][k]-tb[j-1][k]),degree);
// in c:
// distj1+=pow(fabs(tb[j-2][k]-tb[j-1][k]),degree);
} else {
distj1 += tb[j - 1] * tb[j - 1];
}
// OLD distj1+=Math.pow(Math.abs(tb[j-1][k]),degree);
Dj1[j] = (distj1);
}
for (i = 1; i <= r; i++) {
disti1 = 0;
if (i > 1) {
disti1 += (ta[i - 2] - ta[i - 1]) * (ta[i - 2] - ta[i - 1]);
} // OLD
// disti1+=Math.pow(Math.abs(ta[i-2][k]-ta[i-1][k]),degree);
else {
disti1 += (ta[i - 1]) * (ta[i - 1]);
}
// OLD disti1+=Math.pow(Math.abs(ta[i-1][k]),degree);
Di1[i] = (disti1);
for (j = 1; j <= c; j++) {
dist = 0;
dist += (ta[i - 1] - tb[j - 1]) * (ta[i - 1] - tb[j - 1]);
// dist+=Math.pow(Math.abs(ta[i-1][k]-tb[j-1][k]),degree);
if (i > 1 && j > 1) {
dist += (ta[i - 2] - tb[j - 2]) * (ta[i - 2] - tb[j - 2]);
}
// dist+=Math.pow(Math.abs(ta[i-2][k]-tb[j-2][k]),degree);
D[i][j] = (dist);
}
} // for i
// border of the cost matrix initialization
D[0][0] = 0;
for (i = 1; i <= r; i++) {
D[i][0] = D[i - 1][0] + Di1[i];
}
for (j = 1; j <= c; j++) {
D[0][j] = D[0][j - 1] + Dj1[j];
}
double dmin, htrans, dist0;
for (i = 1; i <= r; i++) {
for (j = 1; j <= c; j++) {
htrans = Math.abs(i- j);
if (j > 1 && i > 1) {
htrans += Math.abs((i-1) - (j-1));
}
dist0 = D[i - 1][j - 1] + nu * htrans + D[i][j];
dmin = dist0;
if (i > 1) {
htrans = 1;
} else {
htrans = i;
}
dist = Di1[i] + D[i - 1][j] + lambda + nu * htrans;
if (dmin > dist) {
dmin = dist;
}
if (j > 1) {
htrans = 1;
} else {
htrans = j;
}
dist = Dj1[j] + D[i][j - 1] + lambda + nu * htrans;
if (dmin > dist) {
dmin = dist;
}
D[i][j] = dmin;
}
}
dist = D[r][c];
return dist;
}
private static double[] initWeights(int seriesLength, double g) {
double[] weightVector = new double[seriesLength];
double halfLength = (double) seriesLength / 2;
for (int i = 0; i < seriesLength; i++) {
weightVector[i] = 1d / (1 + Math.exp(-g * (i - halfLength)));
}
return weightVector;
}
public static double wdtw(double[] first, double[] second, double g) {
double[] weightVector = initWeights(first.length, g);
double[] prevRow = new double[second.length];
double[] curRow = new double[second.length];
double second0 = second[0];
double thisDiff;
double prevVal = 0.0;
// put the first row into prevRow to save swapping before moving to the second row
{ double first0 = first[0];
// first value
thisDiff = first0 - second0;
prevVal = prevRow[0] = weightVector[0] * (thisDiff * thisDiff);
// top row
for (int j = 1; j < second.length; j++) {
thisDiff = first0 - second[j];
prevVal = prevRow[j] = prevVal + weightVector[j] * (thisDiff * thisDiff);
}
}
double minDistance;
double firsti = first[1];
// second row is a special case because path can't go through prevRow[j]
thisDiff = firsti - second0;
prevVal = curRow[0] = prevRow[0] + weightVector[1] * (thisDiff * thisDiff);
for (int j = 1; j < second.length; j++) {
// calculate distances
minDistance = Math.min(prevVal, prevRow[j - 1]);
thisDiff = firsti - second[j];
prevVal = curRow[j] = minDistance + weightVector[j-1] * (thisDiff * thisDiff);
}
// warp rest
for (int i = 2; i < first.length; i++) {
// make the old current row into the current previous row and set current row to use the old prev row
double [] tmp = curRow;
curRow = prevRow;
prevRow = tmp;
firsti = first[i];
thisDiff = firsti - second0;
prevVal = curRow[0] = prevRow[0] + weightVector[i] * (thisDiff * thisDiff);
for (int j = 1; j < second.length; j++) {
// calculate distances
minDistance = min(prevVal, prevRow[j], prevRow[j - 1]);
thisDiff = firsti - second[j];
prevVal = curRow[j] = minDistance + weightVector[Math.abs(i - j)] * (thisDiff * thisDiff);
}
}
double res = prevVal;
return res;
}
public static double wddtw(double[] a, double[] b, double g) {
return wdtw(getDeriv(a), getDeriv(b), g);
}
}
}
| 86,653 | 41.855589 | 206 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/IndependentDistanceMeasure.java | package tsml.classifiers.distance_based.distances;
import tsml.classifiers.distance_based.utils.collections.params.ParamSet;
import tsml.data_containers.TimeSeriesInstance;
import tsml.data_containers.TimeSeriesInstances;
import weka.core.Instances;
import java.util.Objects;
public class IndependentDistanceMeasure extends BaseDistanceMeasure {
public IndependentDistanceMeasure(final DistanceMeasure distanceMeasure) {
setDistanceMeasure(distanceMeasure);
}
private DistanceMeasure distanceMeasure;
@Override public boolean isSymmetric() {
return distanceMeasure.isSymmetric();
}
@Override public double distance(final TimeSeriesInstance a, final TimeSeriesInstance b,
final double limit) {
double sum = 0;
for(int i = 0; i < a.getNumDimensions(); i++) {
// extract the single dim from each inst
final TimeSeriesInstance singleDimA = a.getHSlice(i);
final TimeSeriesInstance singleDimB = b.getHSlice(i);
// compute the distance between the single dims
// the limit will be the remainder of the limit after subtracting the current sum
final double distance = distanceMeasure.distance(singleDimA, singleDimB, limit - sum);
// distance will be inf if limit hit, so sum will coalesce to inf as well
sum += distance;
// if the last distance tipped the sum over the limit (or hit the limit itself and sum is now inf) return inf as over limit
if(sum > limit) {
return Double.POSITIVE_INFINITY;
}
}
return sum;
}
@Override public String getName() {
return distanceMeasure.getName() + "_I";
}
@Override public void buildDistanceMeasure(final TimeSeriesInstances data) {
distanceMeasure.buildDistanceMeasure(data);
}
@Override public void buildDistanceMeasure(final Instances data) {
distanceMeasure.buildDistanceMeasure(data);
}
public DistanceMeasure getDistanceMeasure() {
return distanceMeasure;
}
private void setDistanceMeasure(DistanceMeasure distanceMeasure) {
this.distanceMeasure = Objects.requireNonNull(distanceMeasure);
}
@Override public void setParams(final ParamSet paramSet) throws Exception {
setDistanceMeasure(paramSet.get(DISTANCE_MEASURE_FLAG, getDistanceMeasure()));
}
@Override public ParamSet getParams() {
return new ParamSet().add(DISTANCE_MEASURE_FLAG, getDistanceMeasure());
}
@Override public String toString() {
return getName() + " " + distanceMeasure.getParams();
}
}
| 2,672 | 35.121622 | 135 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/MatrixBasedDistanceMeasure.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based.distances;
import java.util.Arrays;
/**
* Abstract distance measure. This takes the weka interface for DistanceFunction and implements some default methods,
* adding several checks and balances also. All distance measures should extends this class. This is loosely based on
* the Transformer pattern whereby the user optionally "fits" some data and can then proceed to use the distance
* measure. Simple distance measures need not fit at all, therefore the fit method is empty for those implementations
* . fit() should always be called before any distance measurements.
* <p>
* Contributors: goastler
*/
public abstract class MatrixBasedDistanceMeasure extends BaseDistanceMeasure {
private boolean recordCostMatrix = false;
// the distance matrix produced by the distance function
private double[][] costMatrix;
private double[] oddRow;
private double[] evenRow;
private int numCols;
private boolean recycleRows;
/**
* Indicate that a new distance is being computed and a corresponding matrix or pair or rows are required
* @param numRows
* @param numCols
*/
protected void setup(int numRows, int numCols, boolean recycleRows) {
oddRow = null;
evenRow = null;
costMatrix = null;
this.numCols = numCols;
this.recycleRows = recycleRows;
if(recordCostMatrix) {
costMatrix = new double[numRows][numCols];
for(double[] array : costMatrix) Arrays.fill(array, getFillerValue());
} else {
oddRow = new double[numCols];
evenRow = new double[numCols];
}
}
protected double getFillerValue() {
return Double.POSITIVE_INFINITY;
}
/**
* Indicate that distance has been computed and any resources can be discarded. This preserves the distance matrix if set to do so, and discards all other resources. This is helpful to avoid the DistanceMeasure(s) retaining various rows / matrices post computation, never to be needed again but remaining in use in memory.
*/
protected void teardown() {
oddRow = null;
evenRow = null;
numCols = -1;
recycleRows = false;
if(!recordCostMatrix) {
costMatrix = null;
}
}
/**
* Get a specified row. This manages the matrix automatically, returning the corresponding row, or recycles the rows if using a paired rows approach, or allocates a fresh row as required.
* @param i
* @return
*/
protected double[] getRow(int i) {
if(recordCostMatrix) {
return costMatrix[i];
} else if(recycleRows) {
return i % 2 == 0 ? evenRow : oddRow;
} else {
return new double[numCols];
}
}
public double[][] costMatrix() {
return costMatrix;
}
public void clear() {
costMatrix = null;
}
public boolean isRecordCostMatrix() {
return recordCostMatrix;
}
public void setRecordCostMatrix(final boolean recordCostMatrix) {
this.recordCostMatrix = recordCostMatrix;
}
}
| 3,925 | 35.018349 | 326 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/DTW.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based.distances.dtw;
/*
Purpose: interface for DTW behaviour
Contributors: goastler
*/
import tsml.classifiers.distance_based.distances.DistanceMeasure;
import tsml.classifiers.distance_based.utils.collections.params.ParamHandler;
public interface DTW extends DistanceMeasure {
String WINDOW_FLAG = "w";
double getWindow();
void setWindow(double window);
}
| 1,180 | 30.918919 | 77 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/DTWDistance.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based.distances.dtw;
import tsml.classifiers.distance_based.distances.MatrixBasedDistanceMeasure;
import tsml.classifiers.distance_based.utils.collections.checks.Checks;
import tsml.classifiers.distance_based.utils.collections.params.ParamSet;
import tsml.data_containers.TimeSeries;
import tsml.data_containers.TimeSeriesInstance;
import utilities.Utilities;
import java.util.Arrays;
/**
* DTW distance measure.
* <p>
* Contributors: goastler
*/
public class DTWDistance extends MatrixBasedDistanceMeasure implements DTW {
public static double cost(final TimeSeriesInstance a, final int aIndex, final TimeSeriesInstance b, final int bIndex) {
double sum = 0;
for(int i = 0; i < a.getNumDimensions(); i++) {
final TimeSeries aDim = a.get(i);
final TimeSeries bDim = b.get(i);
final double aValue = aDim.get(aIndex);
final double bValue = bDim.get(bIndex);
final double sqDiff = StrictMath.pow(aValue - bValue, 2);
sum += sqDiff;
}
return sum;
}
private double window = 1;
@Override public void setWindow(final double window) {
this.window = Checks.requireUnitInterval(window);
}
@Override public double getWindow() {
return window;
}
public double distance(TimeSeriesInstance a, TimeSeriesInstance b, final double limit) {
// make a the longest time series
if(a.getMaxLength() < b.getMaxLength()) {
TimeSeriesInstance tmp = a;
a = b;
b = tmp;
}
final int aLength = a.getMaxLength();
final int bLength = b.getMaxLength();
setup(aLength, bLength, true);
// step is the increment of the mid point for each row
final double step = (double) (bLength - 1) / (aLength - 1);
final double windowSize = this.window * bLength;
// row index
int i = 0;
// start and end of window
int start = 0;
double mid = 0;
int end = Math.min(bLength - 1, (int) Math.floor(windowSize));
int prevEnd; // store end of window from previous row to fill in shifted space with inf
double[] row = getRow(i);
double[] prevRow;
// col index
int j = start;
// process the first row (can only warp left - not top/topLeft)
double min = row[j++] = cost(a, 0, b, 0); // process top left sqaure of mat
// compute the first row
for(; j <= end; j++) {
row[j] = row[j - 1] + cost(a, i, b, j);
min = Math.min(min, row[j]);
}
if(min > limit) return Double.POSITIVE_INFINITY; // quit if beyond limit
i++;
// process remaining rows
for(; i < aLength; i++) {
// reset min for the row
min = Double.POSITIVE_INFINITY;
// change rows
prevRow = row;
row = getRow(i);
// start, end and mid of window
prevEnd = end;
mid = i * step;
// if using variable length time series and window size is fractional then the window may part cover an
// element. Any part covered element is truncated from the window. I.e. mid point of 5.5 with window of 2.3
// would produce a start point of 2.2. The window would start from index 3 as it does not fully cover index
// 2. The same thing happens at the end, 5.5 + 2.3 = 7.8, so the end index is 7 as it does not fully cover 8
start = Math.max(0, (int) Math.ceil(mid - windowSize));
end = Math.min(bLength - 1, (int) Math.floor(mid + windowSize));
j = start;
// set the values above the current row and outside of previous window to inf
Arrays.fill(prevRow, prevEnd + 1, end + 1, Double.POSITIVE_INFINITY);
// set the value left of the window to inf
if(j > 0) row[j - 1] = Double.POSITIVE_INFINITY;
// if assessing the left most column then only mapping option is top - not left or topleft
if(j == 0) {
row[j] = prevRow[j] + cost(a, i, b, j);
min = Math.min(min, row[j++]);
}
// compute the distance for each cell in the row
for(; j <= end; j++) {
row[j] = Math.min(prevRow[j], Math.min(row[j - 1], prevRow[j - 1])) + cost(a, i, b, j);
min = Math.min(min, row[j]);
}
if(min > limit) return Double.POSITIVE_INFINITY; // quit if beyond limit
}
// last value in the current row is the distance
final double distance = row[row.length - 1];
teardown();
return distance;
}
@Override public ParamSet getParams() {
return new ParamSet().add(WINDOW_FLAG, window);
}
@Override public void setParams(final ParamSet paramSet) throws Exception {
setWindow(paramSet.get(WINDOW_FLAG, window));
}
public static void main(String[] args) {
final DTWDistance dm = new DTWDistance();
dm.setWindow(0.2);
dm.setRecordCostMatrix(true);
final double a = dm.distanceUnivariate(new double[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, new double[]{1, 2, 3});
System.out.println();
System.out.println("-----");
System.out.println();
final double b = dm.distanceUnivariate(new double[]{1, 2, 3}, new double[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
System.out.println(a);
System.out.println(b);
if(a != b) {
System.out.println("not eq");
System.out.println();
}
}
}
| 6,495 | 37.43787 | 123 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/DTWDistanceTest.java | /*
* This file is part of the UEA Time Series Machine Learning (TSML) toolbox.
*
* The UEA TSML toolbox is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The UEA TSML toolbox is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with the UEA TSML toolbox. If not, see <https://www.gnu.org/licenses/>.
*/
package tsml.classifiers.distance_based.distances.dtw;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import tsml.classifiers.distance_based.utils.collections.params.ParamHandlerTest;
import tsml.data_containers.TimeSeriesInstances;
import utilities.InstanceTools;
import weka.core.Instances;
import static tsml.classifiers.distance_based.distances.dtw.spaces.DDTWDistanceSpace.newDDTWDistance;
/**
* Purpose: test dtw
* <p>
* Contributors: goastler
*/
public class DTWDistanceTest {
public static Instances buildInstances() {
return InstanceTools.toWekaInstancesWithClass(new double[][] {
{1,2,3,4,5,0},
{6,11,15,2,7,1}
});
}
private Instances instances;
private DTWDistance df;
@Before
public void before() {
instances = buildInstances();
df = new DTWDistance();
df.buildDistanceMeasure(instances);
}
@Test
public void testFullWarp() {
df.setWindow(1);
double distance = df.distance(instances.get(0), instances.get(1));
Assert.assertEquals(distance, 203, 0);
}
@Test
public void testConstrainedWarp() {
df.setWindow(0.4);
double distance = df.distance(instances.get(0), instances.get(1));
Assert.assertEquals(distance, 212, 0);
}
@Test
public void testVariableLengthTimeSeries() {
DTWDistance dtw = new DTWDistance();
dtw.setRecordCostMatrix(true);
dtw.setWindow(1);
TimeSeriesInstances tsinsts = new TimeSeriesInstances(new double[][][]{
{
{7, 6, 1, 7, 7, 7, 3, 3, 5, 6}
},
{
{5, 3, 2, 7, 4, 2, 1, 8, 8, 7, 4, 4, 2, 1, 3}
}
}, new double[]{0, 0});
double distance = dtw.distance(tsinsts.get(0), tsinsts.get(1));
Assert.assertEquals(57, distance, 0d);
// System.out.println("[" + ArrayUtilities.toString(dtw.getDistanceMatrix(), ",", "]," + System.lineSeparator() + "[") + "]");
double otherDistance = dtw.distance(tsinsts.get(1), tsinsts.get(0));
Assert.assertEquals(distance, otherDistance, 0d);
double limit = 10;
distance = dtw.distance(tsinsts.get(0), tsinsts.get(1), limit);
Assert.assertEquals(Double.POSITIVE_INFINITY, distance, 0d);
otherDistance = dtw.distance(tsinsts.get(1), tsinsts.get(0), limit);
Assert.assertEquals(distance, otherDistance, 0d);
}
@Test
public void testVariableLengthTimeSeriesConstrainedWarp() {
DTWDistance dtw = new DTWDistance();
dtw.setRecordCostMatrix(true);
dtw.setWindow(0.25);
TimeSeriesInstances tsinsts = new TimeSeriesInstances(new double[][][]{
{
{7, 6, 1, 7, 7, 7, 3, 3, 5, 6}
},
{
{5, 3, 2, 7, 4, 2, 1, 8, 8, 7, 4, 4, 2, 1, 3}
}
}, new double[]{0, 0});
double distance = dtw.distance(tsinsts.get(0), tsinsts.get(1));
Assert.assertEquals(57, distance, 0d);
// System.out.println("[" + ArrayUtilities.toString(dtw.getDistanceMatrix(), ",", "]," + System.lineSeparator() + "[") + "]");
double otherDistance = dtw.distance(tsinsts.get(1), tsinsts.get(0));
Assert.assertEquals(distance, otherDistance, 0d);
double limit = 20;
distance = dtw.distance(tsinsts.get(0), tsinsts.get(1), limit);
Assert.assertEquals(Double.POSITIVE_INFINITY, distance, 0d);
otherDistance = dtw.distance(tsinsts.get(1), tsinsts.get(0), limit);
Assert.assertEquals(distance, otherDistance, 0d);
}
public static class DTWParamTest extends ParamHandlerTest {
@Override public Object getHandler() {
return new DTWDistance();
}
}
public static class DDTWParamTest extends ParamHandlerTest {
@Override public Object getHandler() {
return newDDTWDistance();
}
}
}
| 4,825 | 35.560606 | 141 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/spaces/DDTWDistanceContinuousSpace.java | package tsml.classifiers.distance_based.distances.dtw.spaces;
import tsml.classifiers.distance_based.utils.collections.params.ParamMap;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.data_containers.TimeSeriesInstances;
import static tsml.classifiers.distance_based.distances.DistanceMeasure.DISTANCE_MEASURE_FLAG;
import static tsml.classifiers.distance_based.distances.dtw.spaces.DDTWDistanceSpace.newDDTWDistance;
import static tsml.classifiers.distance_based.utils.collections.CollectionUtils.newArrayList;
public class DDTWDistanceContinuousSpace implements ParamSpaceBuilder {
@Override public ParamSpace build(final TimeSeriesInstances data) {
return new ParamSpace(new ParamMap().add(DISTANCE_MEASURE_FLAG, newArrayList(newDDTWDistance()), new DTWDistanceContinuousSpace().build(data)));
}
}
| 944 | 51.5 | 152 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/spaces/DDTWDistanceFullWindowSpace.java | package tsml.classifiers.distance_based.distances.dtw.spaces;
import tsml.classifiers.distance_based.utils.collections.params.ParamMap;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.data_containers.TimeSeriesInstances;
import static tsml.classifiers.distance_based.distances.DistanceMeasure.DISTANCE_MEASURE_FLAG;
import static tsml.classifiers.distance_based.distances.dtw.spaces.DDTWDistanceSpace.newDDTWDistance;
import static tsml.classifiers.distance_based.utils.collections.CollectionUtils.newArrayList;
public class DDTWDistanceFullWindowSpace implements ParamSpaceBuilder {
@Override public ParamSpace build(final TimeSeriesInstances data) {
return new ParamSpace(new ParamMap().add(DISTANCE_MEASURE_FLAG, newArrayList(newDDTWDistance()), new DTWDistanceFullWindowSpace().build(data)));
}
}
| 943 | 54.529412 | 152 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/spaces/DDTWDistanceRestrictedContinuousSpace.java | package tsml.classifiers.distance_based.distances.dtw.spaces;
import tsml.classifiers.distance_based.utils.collections.params.ParamMap;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.data_containers.TimeSeriesInstances;
import static tsml.classifiers.distance_based.distances.DistanceMeasure.DISTANCE_MEASURE_FLAG;
import static tsml.classifiers.distance_based.distances.dtw.spaces.DDTWDistanceSpace.newDDTWDistance;
import static tsml.classifiers.distance_based.utils.collections.CollectionUtils.newArrayList;
public class DDTWDistanceRestrictedContinuousSpace implements ParamSpaceBuilder {
@Override public ParamSpace build(final TimeSeriesInstances data) {
return new ParamSpace(new ParamMap().add(DISTANCE_MEASURE_FLAG, newArrayList(newDDTWDistance()), new DTWDistanceRestrictedContinuousSpace().build(data)));
}
}
| 964 | 52.611111 | 162 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/spaces/DDTWDistanceSpace.java | package tsml.classifiers.distance_based.distances.dtw.spaces;
import tsml.classifiers.distance_based.distances.dtw.DTWDistance;
import tsml.classifiers.distance_based.distances.transformed.BaseTransformDistanceMeasure;
import tsml.classifiers.distance_based.distances.transformed.TransformDistanceMeasure;
import tsml.classifiers.distance_based.utils.collections.params.ParamMap;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.data_containers.TimeSeriesInstances;
import tsml.transformers.Derivative;
import static tsml.classifiers.distance_based.distances.DistanceMeasure.DISTANCE_MEASURE_FLAG;
import static tsml.classifiers.distance_based.utils.collections.CollectionUtils.newArrayList;
public class DDTWDistanceSpace implements ParamSpaceBuilder {
@Override public ParamSpace build(final TimeSeriesInstances data) {
return new ParamSpace(new ParamMap().add(DISTANCE_MEASURE_FLAG, newArrayList(newDDTWDistance()), new DTWDistanceSpace().build(data)));
}
/**
* build DDTW
*
* @return
*/
public static TransformDistanceMeasure newDDTWDistance() {
return new BaseTransformDistanceMeasure("DDTWDistance", new Derivative(), new DTWDistance());
}
}
| 1,334 | 45.034483 | 142 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/spaces/DTWDistanceContinuousParams.java | package tsml.classifiers.distance_based.distances.dtw.spaces;
import tsml.classifiers.distance_based.utils.collections.params.ParamMap;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.classifiers.distance_based.utils.collections.params.distribution.double_based.UniformDoubleDistribution;
import tsml.data_containers.TimeSeriesInstances;
import static tsml.classifiers.distance_based.distances.dtw.DTW.WINDOW_FLAG;
public class DTWDistanceContinuousParams implements ParamSpaceBuilder {
@Override public ParamSpace build(final TimeSeriesInstances data) {
return new ParamSpace(new ParamMap().add(WINDOW_FLAG, new UniformDoubleDistribution(0d, 1d)));
}
}
| 796 | 48.8125 | 116 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/spaces/DTWDistanceContinuousSpace.java | package tsml.classifiers.distance_based.distances.dtw.spaces;
import tsml.classifiers.distance_based.distances.dtw.DTWDistance;
import tsml.classifiers.distance_based.utils.collections.params.ParamMap;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.data_containers.TimeSeriesInstances;
import static tsml.classifiers.distance_based.distances.DistanceMeasure.DISTANCE_MEASURE_FLAG;
import static tsml.classifiers.distance_based.utils.collections.CollectionUtils.newArrayList;
public class DTWDistanceContinuousSpace implements ParamSpaceBuilder {
@Override public ParamSpace build(final TimeSeriesInstances data) {
return new ParamSpace(new ParamMap().add(DISTANCE_MEASURE_FLAG, newArrayList(new DTWDistance()), new DTWDistanceContinuousParams().build(data)));
}
}
| 908 | 49.5 | 153 | java |
tsml-java | tsml-java-master/src/main/java/tsml/classifiers/distance_based/distances/dtw/spaces/DTWDistanceFullWindowParams.java | package tsml.classifiers.distance_based.distances.dtw.spaces;
import tsml.classifiers.distance_based.utils.collections.params.ParamMap;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpace;
import tsml.classifiers.distance_based.utils.collections.params.ParamSpaceBuilder;
import tsml.data_containers.TimeSeriesInstances;
import static tsml.classifiers.distance_based.distances.dtw.DTW.WINDOW_FLAG;
import static tsml.classifiers.distance_based.utils.collections.CollectionUtils.newArrayList;
public class DTWDistanceFullWindowParams implements ParamSpaceBuilder {
/**
* Build a param space containing full window for dtw
* @param data
* @return
*/
@Override public ParamSpace build(final TimeSeriesInstances data) {
ParamMap params = new ParamMap();
params.add(WINDOW_FLAG, newArrayList(1d));
return new ParamSpace(params);
}
}
| 911 | 37 | 93 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.