code stringlengths 3 1.18M | language stringclasses 1 value |
|---|---|
/*
* Dataset.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.dataset;
public interface Dataset {
public boolean next();
public int curUserID();
public int curItemID();
public double curRating();
public void reset();
}
| Java |
/*
* FlixsterDataset.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.dataset.impl;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.logging.Level;
import java.util.logging.Logger;
import moa.core.ObjectRepository;
import moa.options.AbstractOptionHandler;
import moa.options.FileOption;
import moa.recommender.dataset.Dataset;
import moa.tasks.TaskMonitor;
public class FlixsterDataset extends AbstractOptionHandler implements Dataset {
private String strLine;
private BufferedReader br;
public FileOption fileOption = new FileOption("file", 'f',
"File to load.", "/home/alex/datasets/ratings.txt", "txt", false);
/*public FlixsterDataset() throws IOException {
super();
FileInputStream fstream = new FileInputStream("/home/alex/datasets/ratings.txt");
DataInputStream in = new DataInputStream(fstream);
br = new BufferedReader(new InputStreamReader(in));
}*/
@Override
public String getPurposeString() {
return "A Jester Dataset reader.";
}
public void init() {
FileInputStream fstream = null;
try {
fstream = new FileInputStream(this.fileOption.getFile());
DataInputStream in = new DataInputStream(fstream);
br = new BufferedReader(new InputStreamReader(in));
} catch (FileNotFoundException ex) {
Logger.getLogger(MovielensDataset.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public boolean next() {
try {
return (strLine = br.readLine()) != null;
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
@Override
public int curUserID() {
String[] split = strLine.split("\\s+");
return Integer.valueOf(split[0]);
}
@Override
public int curItemID() {
String[] split = strLine.split("\\s+");
return Integer.valueOf(split[1]);
}
@Override
public double curRating() {
String[] split = strLine.split("\\s+");
return Double.valueOf(split[2]);
}
public String toString() {
return "Flixster";
}
@Override
public void reset() {
try {
br.close();
//FileInputStream fstream = new FileInputStream("/home/alicia/datasets/flixster/ratings.txt");
//DataInputStream in = new DataInputStream(fstream);
//br = new BufferedReader(new InputStreamReader(in));
this.init();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) {
this.init();
}
@Override
public void getDescription(StringBuilder sb, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| Java |
/*
* JesterDataset.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.dataset.impl;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import moa.recommender.dataset.Dataset;
import java.io.FileNotFoundException;
import java.util.logging.Level;
import java.util.logging.Logger;
import moa.core.ObjectRepository;
import moa.options.AbstractOptionHandler;
import moa.options.FileOption;
import moa.tasks.TaskMonitor;
public class JesterDataset extends AbstractOptionHandler implements Dataset {
private String strLine;
private BufferedReader br;
public FileOption fileOption = new FileOption("file", 'f',
"File to load.", "/home/alicia/datasets/jester/jester_ratings.dat", "dat", false);
/* public JesterDataset() throws IOException {
super();
FileInputStream fstream = new FileInputStream("/home/alicia/datasets/jester/jester_ratings.dat");
DataInputStream in = new DataInputStream(fstream);
br = new BufferedReader(new InputStreamReader(in));
}*/
@Override
public String getPurposeString() {
return "A Jester Dataset reader.";
}
public void init() {
FileInputStream fstream = null;
try {
fstream = new FileInputStream(this.fileOption.getFile());
DataInputStream in = new DataInputStream(fstream);
br = new BufferedReader(new InputStreamReader(in));
} catch (FileNotFoundException ex) {
Logger.getLogger(MovielensDataset.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public boolean next() {
try {
return (strLine = br.readLine()) != null;
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
public String toString() {
return "Jester";
}
@Override
public int curUserID() {
String[] split = strLine.split("\\s+");
return Integer.valueOf(split[0]);
}
@Override
public int curItemID() {
String[] split = strLine.split("\\s+");
return Integer.valueOf(split[1]);
}
@Override
public double curRating() {
String[] split = strLine.split("\\s+");
double rating = Double.valueOf(split[2]);
return (rating / 10) * 2 + 3;
}
@Override
public void reset() {
try {
br.close();
//FileInputStream fstream = new FileInputStream("/home/alicia/datasets/jester/jester_ratings.dat");
//DataInputStream in = new DataInputStream(fstream);
//br = new BufferedReader(new InputStreamReader(in));
this.init();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) {
this.init();
}
@Override
public void getDescription(StringBuilder sb, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| Java |
/*
* MovielensDataset.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.dataset.impl;
import moa.recommender.dataset.Dataset;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.logging.Level;
import java.util.logging.Logger;
import moa.core.ObjectRepository;
import moa.options.AbstractOptionHandler;
import moa.options.FileOption;
import moa.tasks.TaskMonitor;
public class MovielensDataset extends AbstractOptionHandler implements Dataset {
private String strLine;
private BufferedReader br;
public FileOption fileOption = new FileOption("file", 'f',
"File to load.", "/Users/abifet/Downloads/ml-1M/ratings.dat", "dat", false);
@Override
public String getPurposeString() {
return "A Movie Lens Dataset reader.";
}
public void init() {
FileInputStream fstream = null;
try {
//fstream = new FileInputStream("/Users/abifet/Downloads/ml-1M/ratings.dat");
fstream = new FileInputStream(this.fileOption.getFile());
DataInputStream in = new DataInputStream(fstream);
br = new BufferedReader(new InputStreamReader(in));
} catch (FileNotFoundException ex) {
Logger.getLogger(MovielensDataset.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public boolean next() {
try {
return (strLine = ((BufferedReader) br).readLine()) != null;
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
public String toString() {
return "Movielens";
}
@Override
public int curUserID() {
String[] split = strLine.split("::");
return Integer.valueOf(split[0]);
}
@Override
public int curItemID() {
String[] split = strLine.split("::");
return Integer.valueOf(split[1]);
}
@Override
public double curRating() {
String[] split = strLine.split("::");
return Double.valueOf(split[2]);
}
@Override
public void reset() {
try {
br.close();
//FileInputStream fstream = new FileInputStream("/home/alex/datasets/ml-1m/ratings.dat");
//DataInputStream in = new DataInputStream(fstream);
//br = new BufferedReader(new InputStreamReader(in));
this.init();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) {
this.init();
}
@Override
public void getDescription(StringBuilder sb, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| Java |
/*
* MemRecommenderData.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.data;
import moa.core.ObjectRepository;
import moa.options.AbstractOptionHandler;
import moa.tasks.TaskMonitor;
public class MemRecommenderData extends AbstractOptionHandler implements RecommenderData {
moa.recommender.rc.data.impl.MemRecommenderData drm;
@Override
protected void prepareForUseImpl(TaskMonitor monitor, ObjectRepository repository) {
drm = new moa.recommender.rc.data.impl.MemRecommenderData();
}
@Override
public void getDescription(StringBuilder sb, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public moa.recommender.rc.data.RecommenderData getData() {
return drm;
}
}
| Java |
/*
* RecommenderData.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.recommender.data;
public interface RecommenderData {
public moa.recommender.rc.data.RecommenderData getData();
}
| Java |
/*
* RatingPredictor.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.predictor;
import java.io.Serializable;
import java.util.List;
import moa.recommender.rc.data.RecommenderData;
public interface RatingPredictor extends Serializable {
public double predictRating(int userID, int itemID);
public List<Double> predictRatings(int userID, List<Integer> itemIDS);
public RecommenderData getData();
public void train();
}
| Java |
/*
* BaselinePredictor.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.predictor.impl;
import java.util.ArrayList;
import java.util.List;
import moa.recommender.rc.data.RecommenderData;
import moa.recommender.rc.predictor.RatingPredictor;
public class BaselinePredictor implements RatingPredictor {
/**
*
*/
private static final long serialVersionUID = 8444152568941483368L;
protected RecommenderData data;
public BaselinePredictor(RecommenderData data) {
this.data = data;
}
@Override
public double predictRating(int userID, int itemID) {
ArrayList<Integer> itm = new ArrayList<Integer>();
itm.add(itemID);
return predictRatings(userID, itm).get(0);
}
@Override
public List<Double> predictRatings(int userID, List<Integer> itemIDS) {
ArrayList<Double> ret = new ArrayList<Double>(itemIDS.size());
double avg = data.getAvgRatingUser(userID) - data.getGlobalMean();
for (int i = 0; i < itemIDS.size(); ++i) {
int itemID = itemIDS.get(i);
double rat = avg + data.getAvgRatingItem(itemID);
rat = Math.min(Math.max(rat, data.getMinRating()),
data.getMaxRating());
ret.add(rat);
}
return ret;
}
@Override
public RecommenderData getData() {
return data;
}
@Override
public void train() {
}
}
| Java |
/*
* BRISMFPredictor.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.predictor.impl;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import moa.recommender.rc.data.RecommenderData;
import moa.recommender.rc.utils.Pair;
import moa.recommender.rc.utils.Rating;
import moa.recommender.rc.utils.SparseVector;
import moa.recommender.rc.utils.Updatable;
/**
* Implementation of the algorithm described in Scalable
* Collaborative Filtering Approaches for Large Recommender
* Systems (Gábor Takács, István Pilászy, Bottyán Németh,
* and Domonkos Tikk). A feature vector is learned for every
* user and item, so that the prediction of a rating is roughly
* the dot product of the corresponding user and item vector.
* Stochastic gradient descent is used to train the model,
* minimizing its prediction error. Both Tikhonov regularization
* and early stopping are used to reduce overfitting. The
* algorithm allows batch training (from scratch, using all
* ratings available at the moment) as well as incremental,
* by retraining only the affected user and item vectors when
* a new rating is inserted.
*
* <p>Parameters:</p>
* <ul>
* <li> features - the number of features to be trained for each user and
* item</li>
* <li> learning rate - the learning rate used in the regularization</li>
* <li> ratio - the regularization ratio to be used in the Tikhonov
* regularization</li>
* <li> iterations - the number of iterations to be used when retraining
* user and item features (online training). </li>
* </lu>
*
*/
public class BRISMFPredictor implements Updatable {
protected RecommenderData data;
protected int nFeatures;
protected HashMap<Integer, float[]> userFeature;
protected HashMap<Integer, float[]> itemFeature;
protected Random rnd;
protected double lRate = 0.01;
protected double rFactor = 0.02;
protected int nIterations = 30;
public void setLRate(double lRate) {
this.lRate = lRate;
}
public void setRFactor(double rFactor) {
this.rFactor = rFactor;
}
public void setNIterations(int nIterations) {
this.nIterations = nIterations;
}
public RecommenderData getData() {
return data;
}
public BRISMFPredictor(int nFeatures, RecommenderData data, boolean train) {
this.data = data;
this.nFeatures = nFeatures;
this.userFeature = new HashMap<Integer, float[]>();
this.itemFeature = new HashMap<Integer, float[]>();
this.rnd = new Random(12345);
data.attachUpdatable(this);
if (train) train();
}
public BRISMFPredictor(int nFeatures, RecommenderData data, double lRate, double rFactor, boolean train) {
this.data = data;
this.nFeatures = nFeatures;
this.userFeature = new HashMap<Integer, float[]>();
this.itemFeature = new HashMap<Integer, float[]>();
this.rnd = new Random(12345);
this.lRate = lRate;
this.rFactor = rFactor;
data.attachUpdatable(this);
if (train) train();
}
private void resetFeatures(float[] feats, boolean userFeats) {
int n = feats.length;
for (int i = 0; i < n; ++i)
feats[i] = (float)0.01*(rnd.nextFloat()*2 - 1);
if (userFeats) feats[0] = 1;
else feats[1] = 1;
}
public double predictRating(int userID, int itemID) {
float[] userFeats = userFeature.get(userID);
float[] itemFeats = itemFeature.get(itemID);
return predictRating(userFeats, itemFeats);
}
public double predictRating(float userFeats[], float itemFeats[]) {
double ret = data.getGlobalMean();
if (userFeats != null && itemFeats != null)
for (int i = 0; i < nFeatures; ++i)
ret += userFeats[i]*itemFeats[i];
if (ret < data.getMinRating()) ret = data.getMinRating();
else if (ret > data.getMaxRating()) ret = data.getMaxRating();
return ret;
}
public float[] trainUserFeats(List<Integer> itm, List<Double> rat, int nIts) {
float[] userFeats = new float[nFeatures];
resetFeatures(userFeats, true);
int n = itm.size();
for (int k = 0; k < nIts; ++k) {
for (int i = 0; i < n; ++i) {
int itemID = itm.get(i);
float[] itemFeats = itemFeature.get(itemID);
double rating = rat.get(i);
double pred = predictRating(userFeats, itemFeats);
double err = rating - pred;
if (itemFeats != null)
for (int j = 1; j < nFeatures; ++j)
userFeats[j] += lRate*(err*itemFeats[j] - rFactor*userFeats[j]);
}
}
return userFeats;
}
public float[] trainItemFeats(int itemID, List<Integer> usr, List<Double> rat, int nIts) {
float[] itemFeats = new float[nFeatures];
resetFeatures(itemFeats, false);
int n = usr.size();
for (int k = 0; k < nIts; ++k) {
for (int i = 0; i < n; ++i) {
int userID = usr.get(i);
float[] userFeats = userFeature.get(userID);
double rating = rat.get(i);
double pred = predictRating(userFeats, itemFeats);
double err = rating - pred;
if (userFeats != null) {
itemFeats[0] += lRate*(err*userFeats[0] - rFactor*itemFeats[0]);
for (int j = 2; j < nFeatures; ++j)
itemFeats[j] += lRate*(err*userFeats[j] - rFactor*itemFeats[j]);
}
}
}
return itemFeats;
}
public void trainUser(int userID, List<Integer> itm, List<Double> rat, int nIts) {
userFeature.put(userID, trainUserFeats(itm, rat, nIts));
}
public void trainUser(int userID, int nIts) {
SparseVector usrRats = data.getRatingsUser(userID);
ArrayList<Integer> itm = new ArrayList<Integer>();
ArrayList<Double> rat = new ArrayList<Double>();
Iterator<Pair<Integer, Double>> it = usrRats.iterator();
while (it.hasNext()) {
Pair<Integer, Double> p = it.next();
itm.add(p.getFirst());
rat.add(p.getSecond());
}
trainUser(userID, itm, rat, nIts);
}
public void trainUser(int userID, List<Integer> itm, List<Double> rat) {
userFeature.put(userID, trainUserFeats(itm, rat, nIterations));
}
public void trainItem(int itemID) {
SparseVector itmRats = data.getRatingsItem(itemID);
ArrayList<Integer> usr = new ArrayList<Integer>();
ArrayList<Double> rat = new ArrayList<Double>();
Iterator<Pair<Integer, Double>> it = itmRats.iterator();
while (it.hasNext()) {
Pair<Integer, Double> p = it.next();
usr.add(p.getFirst());
rat.add(p.getSecond());
}
trainItem(itemID, usr, rat);
}
public void trainItem(int itemID, int nIts) {
SparseVector itmRats = data.getRatingsItem(itemID);
ArrayList<Integer> usr = new ArrayList<Integer>();
ArrayList<Double> rat = new ArrayList<Double>();
Iterator<Pair<Integer, Double>> it = itmRats.iterator();
while (it.hasNext()) {
Pair<Integer, Double> p = it.next();
usr.add(p.getFirst());
rat.add(p.getSecond());
}
trainItem(itemID, usr, rat, nIts);
}
public void trainUser(int userID) {
SparseVector usrRats = data.getRatingsUser(userID);
ArrayList<Integer> itm = new ArrayList<Integer>();
ArrayList<Double> rat = new ArrayList<Double>();
Iterator<Pair<Integer, Double>> it = usrRats.iterator();
while (it.hasNext()) {
Pair<Integer, Double> p = it.next();
itm.add(p.getFirst());
rat.add(p.getSecond());
}
trainUser(userID, itm, rat);
}
public void trainItem(int itemID, List<Integer> usr, List<Double> rat) {
itemFeature.put(itemID, trainItemFeats(itemID, usr, rat, nIterations));
}
public void trainItem(int itemID, List<Integer> usr, List<Double> rat, int nIts) {
itemFeature.put(itemID, trainItemFeats(itemID, usr, rat, nIts));
}
public void train() {
userFeature.clear();
itemFeature.clear();
int n = data.getNumRatings();
Iterator<Integer> it = data.getUsers().iterator();
while (it.hasNext()) {
float[] feats = new float[nFeatures];
resetFeatures(feats, true);
userFeature.put(it.next(), feats);
}
it = data.getItems().iterator();
while (it.hasNext()) {
float[] feats = new float[nFeatures];
resetFeatures(feats, false);
itemFeature.put(it.next(), feats);
}
int exit = 0;
double lastRMSE = 1e20;
int count = 0;
int trainDiv = Math.max(20, n/1000000);
ArrayList<Rating> ratTest = new ArrayList<Rating>(n/trainDiv);
do {
long start = System.currentTimeMillis();
Iterator<Rating> ratIt = data.ratingIterator();
int idx = 0;
while (ratIt.hasNext()) {
Rating rat = ratIt.next();
if (idx%trainDiv == 0) {
if (count == 0) ratTest.add(rat);
}
else {
int userID = rat.userID;
int itemID = rat.itemID;
double rating = rat.rating;
float[] userFeats = userFeature.get(userID);
float[] itemFeats = itemFeature.get(itemID);
double pred = predictRating(userFeats, itemFeats);
double err = rating - pred;
itemFeats[0] += lRate*(err*userFeats[0] - rFactor*itemFeats[0]);
userFeats[1] += lRate*(err*itemFeats[1] - rFactor*userFeats[1]);
for (int j = 2; j < nFeatures; ++j) {
double uv = userFeats[j];
userFeats[j] += lRate*(err*itemFeats[j] - rFactor*userFeats[j]);
itemFeats[j] += lRate*(err*uv - rFactor*itemFeats[j]);
}
}
++idx;
}
int nTest = ratTest.size();
double sum = 0;
for (int i = 0; i < nTest; ++i) {
int userID = ratTest.get(i).userID;
int itemID = ratTest.get(i).itemID;
double rating = ratTest.get(i).rating;
double pred = predictRating(userID, itemID);
sum += Math.pow(rating - pred, 2);
}
double curRMSE = Math.sqrt(sum/(double)nTest);
System.out.println(curRMSE + " " + (System.currentTimeMillis() - start)/1000);
if (curRMSE + 0.0001 >= lastRMSE) {
++exit;
}
lastRMSE = curRMSE;
++count;
}
while (exit < 1);
}
public float[] getUserFeatures(int userID) {
return userFeature.get(userID);
}
public float[] getItemFeatures(int itemID) {
return itemFeature.get(itemID);
}
public int getNumFeatures() {
return nFeatures;
}
@Override
public void updateNewUser(int userID, List<Integer> ratedItems,
List<Double> ratings) {
if (!ratedItems.isEmpty()) {
trainUser(userID, ratedItems, ratings);
}
}
@Override
public void updateNewItem(int itemID, List<Integer> ratingUsers,
List<Double> ratings) {
if (!ratingUsers.isEmpty()) {
trainItem(itemID, ratingUsers, ratings);
}
}
@Override
public void updateRemoveUser(int userID) {
userFeature.remove(userID);
}
@Override
public void updateRemoveItem(int itemID) {
itemFeature.remove(itemID);
}
//We retrain the user/item separately, depending on a probability
//calculated using the error when predicting the new rating
//TODO: parametrize this
@Override
public void updateSetRating(int userID, int itemID, double rating) {
double nUsr = data.countRatingsUser(userID);
double nItm = data.countRatingsItem(itemID);
double prob1 = Math.pow(0.99, nUsr);
double prob2 = Math.pow(0.99, nItm);
if (nUsr < 5 || rnd.nextDouble() < prob1) {
SparseVector usrRats = data.getRatingsUser(userID);
ArrayList<Integer> itm = new ArrayList<Integer>();
ArrayList<Double> rat = new ArrayList<Double>();
//Train user
boolean found = false;
Iterator<Pair<Integer, Double>> it = usrRats.iterator();
while (it.hasNext()) {
Pair<Integer, Double> p = it.next();
itm.add(p.getFirst());
if (p.getFirst() == itemID) {
found = true;
rat.add(rating);
}
else rat.add(p.getSecond());
}
if (!found) {
itm.add(itemID);
rat.add(rating);
}
trainUser(userID, itm, rat);
}
if (nItm < 5 || rnd.nextDouble() < prob2) {
SparseVector itmRats = data.getRatingsItem(itemID);
//Train item
Iterator<Pair<Integer, Double>> it = itmRats.iterator();
boolean found = false;
ArrayList<Integer> usr = new ArrayList<Integer>();
ArrayList<Double> rat = new ArrayList<Double>();
while (it.hasNext()) {
Pair<Integer, Double> p = it.next();
usr.add(p.getFirst());
if (p.getFirst() == userID) {
found = true;
rat.add(rating);
}
else rat.add(p.getSecond());
}
if (!found) {
usr.add(itemID);
rat.add(rating);
}
trainItem(itemID, usr, rat);
}
}
@Override
public void updateRemoveRating(int userID, int itemID) {
}
public List<Double> predictRatings(int userID, List<Integer> itemIDS) {
int n = itemIDS.size();
ArrayList<Double> ret = new ArrayList<Double>(n);
for (int i = 0; i < n; ++i)
ret.add(predictRating(userID, itemIDS.get(i)));
return ret;
}
}
| Java |
/*
* AbstractRecommenderData.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.data;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import moa.recommender.rc.utils.Updatable;
public abstract class AbstractRecommenderData implements RecommenderData {
/**
*
*/
private static final long serialVersionUID = -5409390358073330733L;
protected ArrayList<Updatable> updatables;
protected boolean disableUpdates = false;
public AbstractRecommenderData() {
this.updatables = new ArrayList<Updatable>();
}
public void disableUpdates(boolean disable) {
this.disableUpdates = disable;
}
public void addUser(int userID, List<Integer> ratedItems, List<Double> ratings) {
Iterator<Updatable> it = updatables.iterator();
while (it.hasNext()) {
Updatable u = it.next();
if (!disableUpdates)
u.updateNewUser(userID, ratedItems, ratings);
}
}
public void removeUser(int userID) {
Iterator<Updatable> it = updatables.iterator();
while (it.hasNext()) {
Updatable u = it.next();
if (!disableUpdates)
u.updateRemoveUser(userID);
}
}
public void addItem(int itemID, List<Integer> ratingUsers, List<Double> ratings) {
Iterator<Updatable> it = updatables.iterator();
while (it.hasNext()) {
Updatable u = it.next();
if (!disableUpdates)
u.updateNewItem(itemID, ratingUsers, ratings);
}
}
public void removeItem(int itemID) {
Iterator<Updatable> it = updatables.iterator();
while (it.hasNext()) {
Updatable u = it.next();
if (!disableUpdates)
u.updateRemoveItem(itemID);
}
}
public void setRating(int userID, int itemID, double rating) {
Iterator<Updatable> it = updatables.iterator();
while (it.hasNext()) {
Updatable u = it.next();
if (!disableUpdates) {
u.updateSetRating(userID, itemID, rating);
}
}
}
public void removeRating(int userID, int itemID) {
Iterator<Updatable> it = updatables.iterator();
while (it.hasNext()) {
Updatable u = it.next();
if (!disableUpdates)
u.updateRemoveRating(userID, itemID);
}
}
public void attachUpdatable(Updatable obj) {
updatables.add(obj);
}
public void clear() {
updatables.clear();
disableUpdates = false;
}
public void close() {
}
}
| Java |
/*
* MemRecommenderData.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.data.impl;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import moa.recommender.rc.data.AbstractRecommenderData;
import moa.recommender.rc.utils.Rating;
import moa.recommender.rc.utils.SparseVector;
public class MemRecommenderData extends AbstractRecommenderData {
private static final long serialVersionUID = 2844235954903772074L;
class EntityStats implements Serializable {
private static final long serialVersionUID = -8933750377510577120L;
public double sum = 0;
public double num = 0;
}
protected Map<Integer, Map<Integer, Double>> ratingsUser;
protected Map<Integer, Map<Integer, Double>> ratingsItem;
protected Map<Integer, EntityStats> usersStats;
protected Map<Integer, EntityStats> itemsStats;
protected int nItems = 0;
protected int nUsers = 0;
protected double sumRatings = 0;
protected int nRatings = 0;
protected double minRating = 0;
protected double maxRating = 0;
protected class RatingIterator implements Iterator<Rating> {
private int currentUser = -1;
private Iterator<Integer> userIt = null;
private Iterator<Entry<Integer, Double>> ratsIt = null;
private boolean calculated = false;
private boolean result = true;
RatingIterator() throws Exception {
}
@Override
public boolean hasNext() {
if (calculated)
return result;
calculated = true;
result = false;
if (ratsIt == null) {
if (!ratingsUser.isEmpty()) {
userIt = ratingsUser.keySet().iterator();
if (userIt.hasNext()) {
Integer first = userIt.next();
currentUser = first;
ratsIt = ratingsUser.get(first).entrySet().iterator();
if (ratsIt.hasNext()) {
result = true;
}
}
}
}
else {
if (ratsIt.hasNext()) {
result = true;
}
else if (userIt.hasNext()) {
Integer first = userIt.next();
currentUser = first;
ratsIt = ratingsUser.get(first).entrySet().iterator();
if (ratsIt.hasNext()) {
result = true;
}
}
}
return result;
}
@Override
public Rating next() {
if (!calculated)
hasNext();
calculated = false;
Entry<Integer, Double> pair = ratsIt.next();
return new Rating(currentUser, pair.getKey(), pair.getValue());
}
@Override
public void remove() {
// TODO Auto-generated method stub
}
}
public MemRecommenderData() {
super();
ratingsItem = new HashMap<Integer, Map<Integer, Double>>();
ratingsUser = new HashMap<Integer, Map<Integer, Double>>();
usersStats = new HashMap<Integer, EntityStats>();
itemsStats = new HashMap<Integer, EntityStats>();
}
@Override
public void addUser(int userID, List<Integer> ratedItems, List<Double> ratings) {
super.addUser(userID, ratedItems, ratings);
ratingsUser.put(userID, new HashMap<Integer, Double>());
usersStats.put(userID, new EntityStats());
int n = ratedItems.size();
for (int i = 0; i < n; ++i)
auxSetRating(userID, ratedItems.get(i), ratings.get(i));
}
//FIXME: have to update item stats!!!
@Override
public void removeUser(int userID) {
super.removeUser(userID);
ratingsUser.remove(userID);
usersStats.remove(userID);
}
@Override
public void addItem(int itemID, List<Integer> ratingUsers, List<Double> ratings) {
super.addItem(itemID, ratingUsers, ratings);
ratingsItem.put(itemID, new HashMap<Integer, Double>());
itemsStats.put(itemID, new EntityStats());
int n = ratingUsers.size();
for (int i = 0; i < n; ++i)
auxSetRating(ratingUsers.get(i), itemID, ratings.get(i));
}
//FIXME: have to update user stats!!!
@Override
public void removeItem(int itemID) {
super.removeItem(itemID);
ratingsItem.remove(itemID);
itemsStats.remove(itemID);
}
private void auxSetRating(int userID, int itemID, double rating) {
if (nRatings == 0) {
minRating = rating;
maxRating = rating;
}
else {
minRating = Math.min(minRating, rating);
maxRating = Math.max(maxRating, rating);
}
EntityStats userStats = usersStats.get(userID);
EntityStats itemStats = itemsStats.get(itemID);
if (userStats == null) {
++nUsers;
ratingsUser.put(userID, new HashMap<Integer, Double>());
userStats = new EntityStats();
usersStats.put(userID, userStats);
}
if (itemStats == null) {
++nItems;
ratingsItem.put(itemID, new HashMap<Integer, Double>());
itemStats = new EntityStats();
itemsStats.put(itemID, itemStats);
}
Map<Integer, Double> ratUser = ratingsUser.get(userID);
Map<Integer, Double> ratItem = ratingsItem.get(itemID);
Double rat = ratUser.get(itemID);
if (rat != null) {
sumRatings -= rat;
userStats.sum -= rat;
userStats.num--;
itemStats.sum -= rat;
itemStats.num--;
--nRatings;
}
userStats.sum += rating;
userStats.num++;
itemStats.sum += rating;
itemStats.num++;
sumRatings += rating;
++nRatings;
ratUser.put(itemID, rating);
ratItem.put(userID, rating);
}
@Override
public void setRating(int userID, int itemID, double rating) {
super.setRating(userID, itemID, rating);
auxSetRating(userID, itemID, rating);
}
@Override
public void removeRating(int userID, int itemID) {
super.removeRating(userID, itemID);
Map<Integer, Double> ratUser = ratingsUser.get(userID);
Map<Integer, Double> ratItem = ratingsItem.get(itemID);
Double rat = ratUser.get(itemID);
EntityStats userStats = usersStats.get(userID);
EntityStats itemStats = itemsStats.get(itemID);
if (rat != null) {
sumRatings -= rat;
--nRatings;
userStats.sum -= rat;
userStats.num--;
itemStats.sum -= rat;
itemStats.num--;
ratUser.remove(itemID);
ratItem.remove(userID);
}
}
@Override
public SparseVector getRatingsUser(int userID) {
Map<Integer, Double> ratUser = ratingsUser.get(userID);
return new SparseVector(ratUser);
}
@Override
public double getRating(int userID, int itemID) {
Map<Integer, Double> ratUser = ratingsUser.get(userID);
return (ratUser.get(itemID) != null ? ratUser.get(itemID) : 0);
}
@Override
public int getNumItems() {
return nItems;
}
@Override
public int getNumUsers() {
return nUsers;
}
@Override
public double getAvgRatingUser(int userID) {
EntityStats stats = usersStats.get(userID);
double sum = (stats != null ? stats.sum : 0);
double num = (stats != null ? stats.num : 0);
double mean = (nRatings > 0 ? sumRatings/(double)nRatings : (minRating + maxRating)/2.0);
return (mean*25 + sum)/(25 + num);
}
@Override
public double getAvgRatingItem(int itemID) {
EntityStats stats = itemsStats.get(itemID);
double sum = (stats != null ? stats.sum : 0);
double num = (stats != null ? stats.num : 0);
double mean = (nRatings > 0 ? sumRatings/(double)nRatings : (minRating + maxRating)/2.0);
return (mean*25 + sum)/(25 + num);
}
@Override
public double getMinRating() {
return minRating;
}
@Override
public double getMaxRating() {
return maxRating;
}
@Override
public Set<Integer> getUsers() {
return usersStats.keySet();
}
@Override
public SparseVector getRatingsItem(int itemID) {
Map<Integer, Double> ratItem = ratingsItem.get(itemID);
return new SparseVector(ratItem);
}
@Override
public Set<Integer> getItems() {
return itemsStats.keySet();
}
@Override
public double getGlobalMean() {
return (nRatings > 0 ? sumRatings/(double)nRatings : (minRating + maxRating)/2.0);
}
@Override
public int countRatingsUser(int userID) {
EntityStats stats = usersStats.get(userID);
return (stats != null ? (int)stats.num : 0);
}
@Override
public int countRatingsItem(int itemID) {
EntityStats stats = itemsStats.get(itemID);
return (stats != null ? (int)stats.num : 0);
}
@Override
public Iterator<Rating> ratingIterator() {
// TODO Auto-generated method stub
return null;
}
@Override
public int getNumRatings() {
return nRatings;
}
@Override
public boolean userExists(int userID) {
return usersStats.containsKey(userID);
}
@Override
public boolean itemExists(int itemID) {
return itemsStats.containsKey(itemID);
}
@Override
public void clear() {
usersStats.clear();
itemsStats.clear();
minRating = maxRating = nItems = nUsers = 0;
sumRatings = nRatings = 0;
ratingsUser.clear();
ratingsItem.clear();
}
}
| Java |
/*
* RecommenderData.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.data;
import java.io.Serializable;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import moa.recommender.rc.utils.Rating;
import moa.recommender.rc.utils.SparseVector;
import moa.recommender.rc.utils.Updatable;
public interface RecommenderData extends Serializable {
public void addUser(int userID, List<Integer> ratedItems, List<Double> ratings);
public void removeUser(int userID);
public void addItem(int itemID, List<Integer> ratingUsers, List<Double> ratings);
public void removeItem(int itemID);
public void setRating(int userID, int itemID, double rating);
public void removeRating(int userID, int itemID);
public SparseVector getRatingsUser(int userID); //TODO:Iterator version for this?
public SparseVector getRatingsItem(int itemID); //TODO:Iterator version for this?
public double getRating(int userID, int itemID);
public int getNumItems();
public int getNumUsers();
public int getNumRatings();
public double getAvgRatingUser(int userID);
public double getAvgRatingItem(int itemID);
public double getMinRating();
public double getMaxRating();
public Set<Integer> getUsers();
public Set<Integer> getItems();
public double getGlobalMean();
public void attachUpdatable(Updatable obj);
public void disableUpdates(boolean disable);
public int countRatingsUser(int userID);
public int countRatingsItem(int itemID);
public Iterator<Rating> ratingIterator();
public boolean userExists(int userID);
public boolean itemExists(int itemID);
public void clear();
public void close();
}
| Java |
/*
* DenseVector.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.utils;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
public class DenseVector extends Vector {
/**
*
*/
private static final long serialVersionUID = -6077169543484777829L;
private ArrayList<Double> list;
public DenseVector() {
list = new ArrayList<Double>();
}
public DenseVector(ArrayList<Double> list) {
this.list = list;
}
@Override
public int size() {
return list.size();
}
@Override
public void set(int index, double val) {
while (index < list.size())
list.add(0.0);
list.set(index, val);
}
@Override
public void remove(int index) {
list.remove(index);
}
@Override
public Double get(int index) {
if (index < 0 || index >= list.size()) return null;
return list.get(index);
}
@Override
public Set<Integer> getIdxs() {
HashSet<Integer> keys = new HashSet<Integer>();
for (int i = 0; i < list.size(); ++i)
keys.add(i);
return keys;
}
@Override
public Vector copy() {
return new DenseVector(new ArrayList<Double>(list));
}
public class DenseVectorIterator implements Iterator<Pair<Integer, Double>> {
private int index = 0;
@Override
public boolean hasNext() {
return index < DenseVector.this.list.size();
}
@Override
public Pair<Integer, Double> next() {
return new Pair<Integer, Double>(index, DenseVector.this.list.get(index++));
}
@Override
public void remove() {
list.remove(index);
}
}
@Override
public Iterator<Pair<Integer, Double>> iterator() {
return new DenseVectorIterator();
}
}
| Java |
/*
* Rating.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.utils;
public class Rating {
public int userID;
public int itemID;
public double rating;
public Rating(int userID, int itemID, double rating) {
this.userID = userID;
this.itemID = itemID;
this.rating = rating;
}
} | Java |
/*
* SparseVector.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.utils;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
public class SparseVector extends Vector {
/**
*
*/
private static final long serialVersionUID = 1971022389328939125L;
private Map<Integer, Double> map;
public SparseVector() {
this.map = new HashMap<Integer, Double>();
}
public SparseVector(Map<Integer, Double> map) {
if (map == null) this.map = new HashMap<Integer, Double>();
else this.map = map;
}
public int size() {
return map.size();
}
public void set(int index, double val) {
map.put(index, val);
}
public void remove(int index) {
map.remove(index);
}
public Set<Integer> getIdxs() {
return map.keySet();
}
public SparseVector copy() {
return new SparseVector(new HashMap<Integer, Double>(this.map));
}
@Override
public Double get(int index) {
return map.get(index);
}
public class SparseVectorIterator implements Iterator<Pair<Integer, Double>> {
private Iterator<Integer> it = SparseVector.this.map.keySet().iterator();
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public Pair<Integer, Double> next() {
Integer idx = it.next();
return new Pair<Integer, Double>(idx, SparseVector.this.map.get(idx));
}
@Override
public void remove() {
it.remove();
}
}
@Override
public Iterator<Pair<Integer, Double>> iterator() {
return new SparseVectorIterator();
}
}
| Java |
/*
* Hash.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.utils;
public class Hash {
public static int hashCode(int a) {
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>>16);
return a;
}
}
| Java |
/*
* Updatable.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.utils;
import java.util.List;
public interface Updatable {
public void updateNewUser(int userID, List<Integer> ratedItems, List<Double> ratings);
public void updateNewItem(int itemID, List<Integer> ratingUsers, List<Double> ratings);
public void updateRemoveUser(int userID);
public void updateRemoveItem(int itemID);
public void updateSetRating(int userID, int itemID, double rating);
public void updateRemoveRating(int userID, int itemID);
}
| Java |
/*
* Vector.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.utils;
import java.io.Serializable;
import java.util.Iterator;
import java.util.Set;
public abstract class Vector implements Serializable {
/**
*
*/
private static final long serialVersionUID = 2440314068879207731L;
abstract public int size();
abstract public void set(int index, double val);
abstract public void remove(int index);
abstract public Double get(int index);
abstract public Iterator<Pair<Integer, Double>> iterator();
abstract public Set<Integer> getIdxs();
public double dotProduct(Vector vec) {
if (size() > vec.size()) return vec.dotProduct(this);
Iterator<Pair<Integer, Double>> it = iterator();
double ret = 0;
while(it.hasNext()) {
Pair<Integer, Double> ind = it.next();
Double val1 = ind.getSecond();
Double val2 = vec.get(ind.getFirst());
if (val2 != null) ret += val1*val2;
}
return ret;
}
public double norm() {
Iterator<Pair<Integer, Double>> it = iterator();
double ret = 0;
while(it.hasNext())
ret += Math.pow(it.next().getSecond(), 2);
return Math.sqrt(ret);
}
abstract public Vector copy();
}
| Java |
/*
* Pair.java
* Copyright (C) 2012 Universitat Politecnica de Catalunya
* @author Alex Catarineu (a.catarineu@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.recommender.rc.utils;
import java.io.Serializable;
public class Pair<T extends Comparable<T>, U extends Comparable<U>> implements Serializable, Comparable<Pair<T, U>> {
/**
*
*/
private static final long serialVersionUID = -1048781440947783998L;
private T first;
private U second;
public Pair(T first, U second) {
this.first = first;
this.second = second;
}
public T getFirst() {
return first;
}
public U getSecond() {
return second;
}
public void setSecond(U second) {
this.second = second;
}
public void setFirst(T first) {
this.first = first;
}
@Override
public int compareTo(Pair<T, U> o) {
int cmp = second.compareTo(o.second);
if (cmp == 0) return first.compareTo(o.first);
return cmp;
}
}
| Java |
/*
* SphereCluster.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.cluster;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import weka.core.DenseInstance;
import weka.core.Instance;
/**
* A simple implementation of the <code>Cluster</code> interface representing
* spherical clusters. The inclusion probability is one inside the sphere and zero
* everywhere else.
*
*/
public class SphereCluster extends Cluster {
private static final long serialVersionUID = 1L;
private double[] center;
private double radius;
private double weight;
public SphereCluster(double[] center, double radius) {
this( center, radius, 1.0 );
}
public SphereCluster() {
}
public SphereCluster( double[] center, double radius, double weightedSize) {
this();
this.center = center;
this.radius = radius;
this.weight = weightedSize;
}
public SphereCluster(int dimensions, double radius, Random random) {
this();
this.center = new double[dimensions];
this.radius = radius;
// Position randomly but keep hypersphere inside the boundaries
double interval = 1.0 - 2 * radius;
for (int i = 0; i < center.length; i++) {
this.center[i] = (random.nextDouble() * interval) + radius;
}
this.weight = 0.0;
}
public SphereCluster(List<?extends Instance> instances, int dimension){
this();
if(instances == null || instances.size() <= 0)
return;
weight = instances.size();
Miniball mb = new Miniball(dimension);
mb.clear();
for (Instance instance : instances) {
mb.check_in(instance.toDoubleArray());
}
mb.build();
center = mb.center();
radius = mb.radius();
mb.clear();
}
/**
* Checks whether two <code>SphereCluster</code> overlap based on radius
* NOTE: overlapRadiusDegree only calculates the overlap based
* on the centers and the radi, so not the real overlap
*
* TODO: should we do this by MC to get the real overlap???
*
* @param other
* @return
*/
public double overlapRadiusDegree(SphereCluster other) {
double[] center0 = getCenter();
double radius0 = getRadius();
double[] center1 = other.getCenter();
double radius1 = other.getRadius();
double radiusBig;
double radiusSmall;
if(radius0 < radius1){
radiusBig = radius1;
radiusSmall = radius0;
}
else{
radiusBig = radius0;
radiusSmall = radius1;
}
double dist = 0;
for (int i = 0; i < center0.length; i++) {
double delta = center0[i] - center1[i];
dist += delta * delta;
}
dist = Math.sqrt(dist);
if(dist > radiusSmall + radiusBig)
return 0;
if(dist + radiusSmall <= radiusBig){
//one lies within the other
return 1;
}
else{
return (radiusSmall+radiusBig-dist)/(2*radiusSmall);
}
}
public void combine(SphereCluster cluster) {
double[] center = getCenter();
double[] newcenter = new double[center.length];
double[] other_center = cluster.getCenter();
double other_weight = cluster.getWeight();
double other_radius = cluster.getRadius();
for (int i = 0; i < center.length; i++) {
newcenter[i] = (center[i]*getWeight()+other_center[i]*other_weight)/(getWeight()+other_weight);
}
center = newcenter;
double r_0 = getRadius() + Math.abs(distance(center, newcenter));
double r_1 = other_radius + Math.abs(distance(other_center, newcenter));
radius = Math.max(r_0, r_1);
weight+= other_weight;
}
public void merge(SphereCluster cluster) {
double[] c0 = getCenter();
double w0 = getWeight();
double r0 = getRadius();
double[] c1 = cluster.getCenter();
double w1 = cluster.getWeight();
double r1 = cluster.getRadius();
//vector
double[] v = new double[c0.length];
//center distance
double d = 0;
for (int i = 0; i < c0.length; i++) {
v[i] = c0[i] - c1[i];
d += v[i] * v[i];
}
d = Math.sqrt(d);
double r = 0;
double[] c = new double[c0.length];
//one lays within the others
if(d + r0 <= r1 || d + r1 <= r0){
if(d + r0 <= r1){
r = r1;
c = c1;
}
else{
r = r0;
c = c0;
}
}
else{
r = (r0 + r1 + d)/2.0;
for (int i = 0; i < c.length; i++) {
c[i] = c1[i] - v[i]/d * (r1-r);
}
}
setCenter(c);
setRadius(r);
setWeight(w0+w1);
}
@Override
public double[] getCenter() {
double[] copy = new double[center.length];
System.arraycopy(center, 0, copy, 0, center.length);
return copy;
}
public void setCenter(double[] center) {
this.center = center;
}
public double getRadius() {
return radius;
}
public void setRadius( double radius ) {
this.radius = radius;
}
@Override
public double getWeight() {
return weight;
}
public void setWeight( double weight ) {
this.weight = weight;
}
@Override
public double getInclusionProbability(Instance instance) {
if (getCenterDistance(instance) <= getRadius()) {
return 1.0;
}
return 0.0;
}
public double getCenterDistance(Instance instance) {
double distance = 0.0;
//get the center through getCenter so subclass have a chance
double[] center = getCenter();
for (int i = 0; i < center.length; i++) {
double d = center[i] - instance.value(i);
distance += d * d;
}
return Math.sqrt(distance);
}
public double getCenterDistance(SphereCluster other) {
return distance(getCenter(), other.getCenter());
}
/*
* the minimal distance between the surface of two clusters.
* is negative if the two clusters overlap
*/
public double getHullDistance(SphereCluster other) {
double distance = 0.0;
//get the center through getCenter so subclass have a chance
double[] center0 = getCenter();
double[] center1 = other.getCenter();
distance = distance(center0, center1);
distance = distance - getRadius() - other.getRadius();
return distance;
}
/*
*/
/**
* When a clusters looses points the new minimal bounding sphere can be
* partly outside of the originating cluster. If a another cluster is
* right next to the original cluster (without overlapping), the new
* cluster can be overlapping with this second cluster. OverlapSave
* will tell you if the current cluster can degenerate so much that it
* overlaps with cluster 'other'
*
* @param other the potentially overlapping cluster
* @return true if cluster can potentially overlap
*/
public boolean overlapSave(SphereCluster other){
//use basic geometry to figure out the maximal degenerated cluster
//comes down to Max(radius *(sin alpha + cos alpha)) which is
double minDist = Math.sqrt(2)*(getRadius() + other.getRadius());
double diff = getCenterDistance(other) - minDist;
if(diff > 0)
return true;
else
return false;
}
private double distance(double[] v1, double[] v2){
double distance = 0.0;
double[] center = getCenter();
for (int i = 0; i < center.length; i++) {
double d = v1[i] - v2[i];
distance += d * d;
}
return Math.sqrt(distance);
}
public double[] getDistanceVector(Instance instance){
return distanceVector(getCenter(), instance.toDoubleArray());
}
public double[] getDistanceVector(SphereCluster other){
return distanceVector(getCenter(), other.getCenter());
}
private double[] distanceVector(double[] v1, double[] v2){
double[] v = new double[v1.length];
for (int i = 0; i < v1.length; i++) {
v[i] = v2[i] - v1[i];
}
return v;
}
/**
* Samples this cluster by returning a point from inside it.
* @param random a random number source
* @return a point that lies inside this cluster
*/
public Instance sample(Random random) {
// Create sample in hypersphere coordinates
//get the center through getCenter so subclass have a chance
double[] center = getCenter();
final int dimensions = center.length;
final double sin[] = new double[dimensions - 1];
final double cos[] = new double[dimensions - 1];
final double length = random.nextDouble() * getRadius();
double lastValue = 1.0;
for (int i = 0; i < dimensions-1; i++) {
double angle = random.nextDouble() * 2 * Math.PI;
sin[i] = lastValue * Math.sin( angle ); // Store cumulative values
cos[i] = Math.cos( angle );
lastValue = sin[i];
}
// Calculate cartesian coordinates
double res[] = new double[dimensions];
// First value uses only cosines
res[0] = center[0] + length*cos[0];
// Loop through 'middle' coordinates which use cosines and sines
for (int i = 1; i < dimensions-1; i++) {
res[i] = center[i] + length*sin[i-1]*cos[i];
}
// Last value uses only sines
res[dimensions-1] = center[dimensions-1] + length*sin[dimensions-2];
return new DenseInstance(1.0, res);
}
@Override
protected void getClusterSpecificInfo(ArrayList<String> infoTitle, ArrayList<String> infoValue) {
super.getClusterSpecificInfo(infoTitle, infoValue);
infoTitle.add("Radius");
infoValue.add(Double.toString(getRadius()));
}
}
| Java |
package moa.cluster;
import java.util.ArrayList;
/**
* Java Porting of the Miniball.h code of <B>Bernd Gaertner</B>.
* Look at http://www.inf.ethz.ch/personal/gaertner/miniball.html<br>
* and related work at
* http://www.inf.ethz.ch/personal/gaertner/texts/own_work/esa99_final.pdf<br>
* for reading about the algorithm and the implementation of it.<p>
* <p>
* If interested in Bounding Sphere algorithms read also published work of
* <B>Emo Welzl</B> "Smallest enclosing disks (balls and Ellipsoid)" and
* the work of <B>Jack Ritter</B> on "Efficient Bounding Spheres" at<br>
* http://tog.acm.org/GraphicsGems/gems/BoundSphere.c?searchterm=calc<p>
* <p><p>
* For Licencing Info report to Bernd Gaertner's one reported below:<p>
*
* Copright (C) 1999-2006, Bernd Gaertner<br>
* $Revision: 1.3 $<br>
* $Date: 2006/11/16 08:01:52 $<br>
*<br>
*This program is free software; you can redistribute it and/or modify<br>
*it under the terms of the GNU General Public License as published by<br>
*the Free Software Foundation; either version 3 of the License, or<br>
*(at your option) any later version.<br>
*<br>
*This program is distributed in the hope that it will be useful,<br>
*but WITHOUT ANY WARRANTY; without even the implied warranty of<br>
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<br>
*GNU General Public License for more details.<br>
*<br>
*You should have received a copy of the GNU General Public License<br>
*along with this program. If not, see <http://www.gnu.org/licenses/>.<br>
*Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA,<br>
*or download the License terms from prep.ai.mit.edu/pub/gnu/COPYING-2.0.<br>
*<br>
*Contact:<br>
*--------<br>
*Bernd Gaertner<br>
*Institute of Theoretical Computer Science<br>
*ETH Zuerich<br>
*CAB G32.2<br>
*CH-8092 Zuerich, Switzerland<br>
*http://www.inf.ethz.ch/personal/gaertner<br>
* Original Java port from Paolo Perissinotto for Jpatch Project by Sascha Ledinsky
* found at http://forum.jpatch.com/viewtopic.php?f=3&t=919
*
* @author Paolo Perissinotto for Jpatch Project by <B>Sascha Ledinsky</B>
*
* @version 1.0
* {@literal Date: 2007/11/18 21:57}
*
* used for moa for calculating most compact sphere cluster
* modified by Timm Jansen (moa@cs.rwth-aachen.de) to be used with high
* dimensional points
*
*/
public class Miniball {
int d;
ArrayList L;
Miniball_b B;
int support_end = 0;
public Miniball(int dim) {
d = dim;
L = new ArrayList();
B = new Miniball_b();
}
class pvt {
int val;
pvt() {
val = 0;
}
void setVal(int i) {
val = i;
}
int getVal() {
return (val);
}
}
class Miniball_b {
int m, s; // size and number of support points
double[] q0 = new double[d];
double[] z = new double[d + 1];
double[] f = new double[d + 1];
double[][] v = new double[d + 1][d];
double[][] a = new double[d + 1][d];
double[][] c = new double[d + 1][d];
double[] sqr_r = new double[d + 1];
double[] current_c = new double[d]; // refers to some c[j]
double current_sqr_r;
double[] getCenter() {
return (current_c);
}
double squared_radius() {
return current_sqr_r;
}
int size() {
return m;
}
int support_size() {
return s;
}
double excess(double[] p) {
double e = -current_sqr_r;
for (int k = 0; k < d; ++k) {
e += mb_sqr(p[k] - current_c[k]);
}
return e;
}
void reset() {
m = 0;
s = 0;
// we misuse c[0] for the center of the empty sphere
for (int j = 0; j < d; j++) {
c[0][j] = 0;
}
current_c = c[0];
current_sqr_r = -1;
}
void pop() {
--m;
}
boolean push(double[] p) {
//System.out.println("Miniball_b:push");
int i, j;
double eps = 1e-32;
if (m == 0) {
for (i = 0; i < d; ++i) {
q0[i] = p[i];
}
for (i = 0; i < d; ++i) {
c[0][i] = q0[i];
}
sqr_r[0] = 0;
} else {
// set v_m to Q_m
for (i = 0; i < d; ++i) {
v[m][i] = p[i] - q0[i];
}
// compute the a_{m,i}, i< m
for (i = 1; i < m; ++i) {
a[m][i] = 0;
for (j = 0; j < d; ++j) {
a[m][i] += v[i][j] * v[m][j];
}
a[m][i] *= (2 / z[i]);
}
// update v_m to Q_m-\bar{Q}_m
for (i = 1; i < m; ++i) {
for (j = 0; j < d; ++j) {
v[m][j] -= a[m][i] * v[i][j];
}
}
// compute z_m
z[m] = 0;
for (j = 0; j < d; ++j) {
z[m] += mb_sqr(v[m][j]);
}
z[m] *= 2;
// reject push if z_m too small
if (z[m] < eps * current_sqr_r) {
return false;
}
// update c, sqr_r
double e = -sqr_r[m - 1];
for (i = 0; i < d; ++i) {
e += mb_sqr(p[i] - c[m - 1][i]);
}
f[m] = e / z[m];
for (i = 0; i < d; ++i) {
c[m][i] = c[m - 1][i] + f[m] * v[m][i];
}
sqr_r[m] = sqr_r[m - 1] + e * f[m] / 2;
}
current_c = c[m];
current_sqr_r = sqr_r[m];
s = ++m;
return true;
}
double slack() {
double min_l = 0;
double[] l = new double[d + 1];
l[0] = 1;
for (int i = s - 1; i > 0; --i) {
l[i] = f[i];
for (int k = s - 1; k > i; --k) {
l[i] -= a[k][i] * l[k];
}
if (l[i] < min_l) {
min_l = l[i];
}
l[0] -= l[i];
}
if (l[0] < min_l) {
min_l = l[0];
}
return ((min_l < 0) ? -min_l : 0);
}
}
/**
* Method clear: clears the ArrayList of the selection points.<br>
* Use it for starting a new selection list to calculate Bounding Sphere on<br>
* or to clear memory references to the list of objects.<br>
* Always use at the end of a Miniball use if you want to reuse later the Miniball object
*
*/
public void clear() {
L.clear();
}
/**
* Adds a point to the list.<br>
* Skip action on null parameter.<br>
* @param p The object to be added to the list
*/
public void check_in(double[] p) {
if (p != null) {
L.add(p);
} else {
System.out.println("Miniball.check_in WARNING: Skipping null point");
}
}
/**
* Recalculate Miniball parameter Center and Radius
*
*/
public void build() {
B.reset();
support_end = 0;
pivot_mb(points_end());
}
void mtf_mb(int i) {
int pj = 0;
support_end = points_begin();
if ((B.size()) == d + 1) {
return;
}
for (int k = points_begin(); k != i;) {
pj = pj + 1;
int j = k++;
double[] sp = (double[]) L.get(j);
if (B.excess(sp) > 0) {
if (B.push(sp)) {
mtf_mb(j);
B.pop();
move_to_front(j);
}
}
}
}
void move_to_front(int j) {
if (support_end <= j) {
support_end++;
}
// L.splice (L.begin(), L, j);
double[] sp = (double[]) L.get(j);
L.remove(j);
L.add(0, sp);
}
void pivot_mb(int i) {
int t = 1;
mtf_mb(t);
double max_e = 0.0, old_sqr_r = -1;
pvt pivot = new pvt();
do {
max_e = max_excess(t, i, pivot);
if (max_e > 0) {
t = support_end;
if (t == pivot.getVal()) {
++t;
}
old_sqr_r = B.squared_radius();
double[] sp = (double[]) L.get(pivot.getVal());
B.push(sp);
mtf_mb(support_end);
B.pop();
move_to_front(pivot.getVal());
}
} while ((max_e > 0) && (B.squared_radius() > old_sqr_r));
}
double max_excess(int t, int i, pvt pivot) {
double[] c = B.getCenter();
double sqr_r = B.squared_radius();
double e, max_e = 0;
for (int k = t; k != i; ++k) {
double[] p = (double[]) L.get(k);
e = -sqr_r;
for (int j = 0; j < d; ++j) {
e += mb_sqr(p[j] - c[j]);
}
if (e > max_e) {
max_e = e;
pivot.setVal(k);
}
}
return max_e;
}
/**
* Return the center of the Miniball
* @return The center (double[])
*/
public double[] center() {
return B.getCenter();
}
/**
* Return the sqaured Radius of the miniball
* @return The square radius
*/
public double squared_radius() {
return B.squared_radius();
}
/**
* Return the Radius of the miniball
* @return The radius
*/
public double radius() {
return ((1 + 0.00001) * Math.sqrt(B.squared_radius()));
}
/**
* Return the actual number of points in the list
* @return the actual number of points
*/
public int nr_points() {
return L.size();
}
int points_begin() {
return (0);
}
int points_end() {
return (L.size());
}
/**
* Return the number of support points (used to calculate the miniball).<br>
* It's and internal info
* @return the number of support points
*/
public int nr_support_points() {
return B.support_size();
}
int support_points_begin() {
return (0);
}
int support_points_end() {
return support_end;
}
double accuracy(double slack) {
double e, max_e = 0;
int n_supp = 0;
int i;
for (i = points_begin(); i != support_end; ++i, ++n_supp) {
double[] sp = (double[]) L.get(i);
if ((e = Math.abs(B.excess(sp))) > max_e) {
max_e = e;
}
}
if (n_supp == nr_support_points()) {
System.out.println("Miniball.accuracy WARNING: STRANGE PROBLEM HERE!");
}
for (i = support_end; i != points_end(); ++i) {
double[] sp = (double[]) L.get(i);
if ((e = B.excess(sp)) > max_e) {
max_e = e;
}
}
slack = B.slack();
return (max_e / squared_radius());
}
boolean is_valid(double tolerance) {
double slack = 0.0;
return ((accuracy(slack) < tolerance) && (slack == 0));
}
double mb_sqr(double r) {
return r * r;
}
}
| Java |
/**
* Clustering.java
*
* Represents a collection of clusters.
*
* @author Timm Jansen (moa@cs.rwth-aachen.de)
* @editor Yunsu Kim
*
* Last edited: 2013/06/02
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.cluster;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import moa.AbstractMOAObject;
import moa.core.AutoExpandVector;
import moa.gui.visualization.DataPoint;
import weka.core.Attribute;
import weka.core.Instance;
public class Clustering extends AbstractMOAObject{
private AutoExpandVector<Cluster> clusters;
public Clustering() {
this.clusters = new AutoExpandVector<Cluster>();
}
public Clustering( Cluster[] clusters ) {
this.clusters = new AutoExpandVector<Cluster>();
for (int i = 0; i < clusters.length; i++) {
this.clusters.add(clusters[i]);
}
}
public Clustering(List<? extends Instance> points){
HashMap<Integer, Integer> labelMap = classValues(points);
int dim = points.get(0).dataset().numAttributes()-1;
int numClasses = labelMap.size();
int noiseLabel;
Attribute classLabel = points.get(0).dataset().classAttribute();
int lastLabelIndex = classLabel.numValues() - 1;
if (classLabel.value(lastLabelIndex) == "noise") {
noiseLabel = lastLabelIndex;
} else {
noiseLabel = -1;
}
ArrayList<Instance>[] sorted_points = (ArrayList<Instance>[]) new ArrayList[numClasses];
for (int i = 0; i < numClasses; i++) {
sorted_points[i] = new ArrayList<Instance>();
}
for (Instance point : points) {
int clusterid = (int)point.classValue();
if(clusterid == noiseLabel) continue;
sorted_points[labelMap.get(clusterid)].add((Instance)point);
}
this.clusters = new AutoExpandVector<Cluster>();
for (int i = 0; i < numClasses; i++) {
if(sorted_points[i].size()>0){
SphereCluster s = new SphereCluster(sorted_points[i],dim);
s.setId(sorted_points[i].get(0).classValue());
s.setGroundTruth(sorted_points[i].get(0).classValue());
clusters.add(s);
}
}
}
public Clustering(ArrayList<DataPoint> points, double overlapThreshold, int initMinPoints){
HashMap<Integer, Integer> labelMap = Clustering.classValues(points);
int dim = points.get(0).dataset().numAttributes()-1;
int numClasses = labelMap.size();
int num = 0;
ArrayList<DataPoint>[] sorted_points = (ArrayList<DataPoint>[]) new ArrayList[numClasses];
for (int i = 0; i < numClasses; i++) {
sorted_points[i] = new ArrayList<DataPoint>();
}
for (DataPoint point : points) {
int clusterid = (int)point.classValue();
if(clusterid == -1) continue;
sorted_points[labelMap.get(clusterid)].add(point);
num++;
}
clusters = new AutoExpandVector<Cluster>();
int microID = 0;
for (int i = 0; i < numClasses; i++) {
ArrayList<SphereCluster> microByClass = new ArrayList<SphereCluster>();
ArrayList<DataPoint> pointInCluster = new ArrayList<DataPoint>();
ArrayList<ArrayList<Instance>> pointInMicroClusters = new ArrayList();
pointInCluster.addAll(sorted_points[i]);
while(pointInCluster.size()>0){
ArrayList<Instance> micro_points = new ArrayList<Instance>();
for (int j = 0; j < initMinPoints && !pointInCluster.isEmpty(); j++) {
micro_points.add((Instance)pointInCluster.get(0));
pointInCluster.remove(0);
}
if(micro_points.size() > 0){
SphereCluster s = new SphereCluster(micro_points,dim);
for (int c = 0; c < microByClass.size(); c++) {
if(((SphereCluster)microByClass.get(c)).overlapRadiusDegree(s) > overlapThreshold ){
micro_points.addAll(pointInMicroClusters.get(c));
s = new SphereCluster(micro_points,dim);
pointInMicroClusters.remove(c);
microByClass.remove(c);
//System.out.println("Removing redundant cluster based on radius overlap"+c);
}
}
for (int j = 0; j < pointInCluster.size(); j++) {
Instance instance = pointInCluster.get(j);
if(s.getInclusionProbability(instance) > 0.8){
pointInCluster.remove(j);
micro_points.add(instance);
}
}
s.setWeight(micro_points.size());
microByClass.add(s);
pointInMicroClusters.add(micro_points);
microID++;
}
}
//
boolean changed = true;
while(changed){
changed = false;
for(int c = 0; c < microByClass.size(); c++) {
for(int c1 = c+1; c1 < microByClass.size(); c1++) {
double overlap = microByClass.get(c).overlapRadiusDegree(microByClass.get(c1));
// System.out.println("Overlap C"+(clustering.size()+c)+" ->C"+(clustering.size()+c1)+": "+overlap);
if(overlap > overlapThreshold){
pointInMicroClusters.get(c).addAll(pointInMicroClusters.get(c1));
SphereCluster s = new SphereCluster(pointInMicroClusters.get(c),dim);
microByClass.set(c, s);
pointInMicroClusters.remove(c1);
microByClass.remove(c1);
changed = true;
break;
}
}
}
}
for (int j = 0; j < microByClass.size(); j++) {
microByClass.get(j).setGroundTruth(sorted_points[i].get(0).classValue());
clusters.add(microByClass.get(j));
}
}
for (int j = 0; j < clusters.size(); j++) {
clusters.get(j).setId(j);
}
}
/**
* @param points
* @return an array with the min and max class label value
*/
public static HashMap<Integer, Integer> classValues(List<? extends Instance> points){
HashMap<Integer,Integer> classes = new HashMap<Integer, Integer>();
int workcluster = 0;
boolean hasnoise = false;
for (int i = 0; i < points.size(); i++) {
int label = (int) points.get(i).classValue();
if(label == -1){
hasnoise = true;
}
else{
if(!classes.containsKey(label)){
classes.put(label,workcluster);
workcluster++;
}
}
}
if(hasnoise)
classes.put(-1,workcluster);
return classes;
}
public Clustering(AutoExpandVector<Cluster> clusters) {
this.clusters = clusters;
}
/**
* add a cluster to the clustering
*/
public void add(Cluster cluster){
clusters.add(cluster);
}
/**
* remove a cluster from the clustering
*/
public void remove(int index){
if(index < clusters.size()){
clusters.remove(index);
}
}
/**
* remove a cluster from the clustering
*/
public Cluster get(int index){
if(index < clusters.size()){
return clusters.get(index);
}
return null;
}
/**
* @return the <code>Clustering</code> as an AutoExpandVector
*/
public AutoExpandVector<Cluster> getClustering() {
return clusters;
}
/**
* @return A deepcopy of the <code>Clustering</code> as an AutoExpandVector
*/
public AutoExpandVector<Cluster> getClusteringCopy() {
return (AutoExpandVector<Cluster>)clusters.copy();
}
/**
* @return the number of clusters
*/
public int size() {
return clusters.size();
}
/**
* @return the number of dimensions of this clustering
*/
public int dimension() {
assert (clusters.size() != 0);
return clusters.get(0).getCenter().length;
}
@Override
public void getDescription(StringBuilder sb, int i) {
sb.append("Clustering Object");
}
public double getMaxInclusionProbability(Instance point) {
double maxInclusion = 0.0;
for (int i = 0; i < clusters.size(); i++) {
maxInclusion = Math.max(clusters.get(i).getInclusionProbability(point),
maxInclusion);
}
return maxInclusion;
}
}
| Java |
/*
* CFCluster.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.cluster;
import java.util.Arrays;
import weka.core.Instance;
/* micro cluster, as defined by Aggarwal et al, On Clustering Massive Data Streams: A Summarization Praradigm
* in the book Data streams : models and algorithms, by Charu C Aggarwal
* @article{
title = {Data Streams: Models and Algorithms},
author = {Aggarwal, Charu C.},
year = {2007},
publisher = {Springer Science+Business Media, LLC},
url = {http://ebooks.ulb.tu-darmstadt.de/11157/},
institution = {eBooks [http://ebooks.ulb.tu-darmstadt.de/perl/oai2] (Germany)},
}
DEFINITION A micro-clusterfor a set of d-dimensionalpoints Xi,. .Xi,
with t i m e s t a m p s ~. . .T,, is the (2-d+3)tuple (CF2", CFlX CF2t, CFlt, n),
wherein CF2" and CFlX each correspond to a vector of d entries. The definition of each of these entries is as follows:
o For each dimension, the sum of the squares of the data values is maintained
in CF2". Thus, CF2" contains d values. The p-th entry of CF2" is equal to
\sum_j=1^n(x_i_j)^2
o For each dimension, the sum of the data values is maintained in C F l X .
Thus, CFIX contains d values. The p-th entry of CFIX is equal to
\sum_j=1^n x_i_j
o The sum of the squares of the time stamps Ti,. .Tin maintained in CF2t
o The sum of the time stamps Ti, . . .Tin maintained in CFlt.
o The number of data points is maintained in n.
*/
public abstract class CFCluster extends SphereCluster {
private static final long serialVersionUID = 1L;
protected double radiusFactor = 1.8;
/**
* Number of points in the cluster.
*/
protected double N;
/**
* Linear sum of all the points added to the cluster.
*/
public double[] LS;
/**
* Squared sum of all the points added to the cluster.
*/
public double[] SS;
/**
* Instantiates an empty kernel with the given dimensionality.
* @param dimensions The number of dimensions of the points that can be in
* this kernel.
*/
public CFCluster(Instance instance, int dimensions) {
this(instance.toDoubleArray(), dimensions);
}
protected CFCluster(int dimensions) {
this.N = 0;
this.LS = new double[dimensions];
this.SS = new double[dimensions];
Arrays.fill(this.LS, 0.0);
Arrays.fill(this.SS, 0.0);
}
public CFCluster(double [] center, int dimensions) {
this.N = 1;
this.LS = center;
this.SS = new double[dimensions];
for (int i = 0; i < SS.length; i++) {
SS[i]=Math.pow(center[i], 2);
}
}
public CFCluster(CFCluster cluster) {
this.N = cluster.N;
this.LS = Arrays.copyOf(cluster.LS, cluster.LS.length);
this.SS = Arrays.copyOf(cluster.SS, cluster.SS.length);
}
public void add(CFCluster cluster ) {
this.N += cluster.N;
addVectors( this.LS, cluster.LS );
addVectors( this.SS, cluster.SS );
}
public abstract CFCluster getCF();
/**
* @return this kernels' center
*/
@Override
public double[] getCenter() {
assert (this.N>0);
double res[] = new double[this.LS.length];
for ( int i = 0; i < res.length; i++ ) {
res[i] = this.LS[i] / N;
}
return res;
}
@Override
public abstract double getInclusionProbability(Instance instance);
/**
* See interface <code>Cluster</code>
* @return The radius of the cluster.
*/
@Override
public abstract double getRadius();
/**
* See interface <code>Cluster</code>
* @return The weight.
* @see Cluster#getWeight()
*/
@Override
public double getWeight() {
return N;
}
public void setN(double N){
this.N = N;
}
public double getN() {
return N;
}
/**
* Adds the second array to the first array element by element. The arrays
* must have the same length.
* @param a1 Vector to which the second vector is added.
* @param a2 Vector to be added. This vector does not change.
*/
public static void addVectors(double[] a1, double[] a2) {
assert (a1 != null);
assert (a2 != null);
assert (a1.length == a2.length) : "Adding two arrays of different "
+ "length";
for (int i = 0; i < a1.length; i++) {
a1[i] += a2[i];
}
}
}
| Java |
/*
* Cluster.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.cluster;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import moa.AbstractMOAObject;
import weka.core.Instance;
public abstract class Cluster extends AbstractMOAObject {
private static final long serialVersionUID = 1L;
private double id = -1;
private double gtLabel = -1;
private HashMap<String, String> measure_values;
public Cluster(){
this.measure_values = new HashMap<String, String>();
}
/**
* @return the center of the cluster
*/
public abstract double[] getCenter();
/**
* Returns the weight of this cluster, not neccessarily normalized.
* It could, for instance, simply return the number of points contined
* in this cluster.
* @return the weight
*/
public abstract double getWeight();
/**
* Returns the probability of the given point belonging to
* this cluster.
*
* @param point
* @return a value between 0 and 1
*/
public abstract double getInclusionProbability(Instance instance);
//TODO: for non sphere cluster sample points, find out MIN MAX neighbours within cluster
//and return the relative distance
//public abstract double getRelativeHullDistance(Instance instance);
@Override
public void getDescription(StringBuilder sb, int i) {
sb.append("Cluster Object");
}
public void setId(double id) {
this.id = id;
}
public double getId() {
return id;
}
public boolean isGroundTruth(){
return gtLabel != -1;
}
public void setGroundTruth(double truth){
gtLabel = truth;
}
public double getGroundTruth(){
return gtLabel;
}
/**
* Samples this cluster by returning a point from inside it.
* @param random a random number source
* @return an Instance that lies inside this cluster
*/
public abstract Instance sample(Random random);
public void setMeasureValue(String measureKey, String value){
measure_values.put(measureKey, value);
}
public void setMeasureValue(String measureKey, double value){
measure_values.put(measureKey, Double.toString(value));
}
public String getMeasureValue(String measureKey){
if(measure_values.containsKey(measureKey))
return measure_values.get(measureKey);
else
return "";
}
protected void getClusterSpecificInfo(ArrayList<String> infoTitle,ArrayList<String> infoValue){
infoTitle.add("ClusterID");
infoValue.add(Integer.toString((int)getId()));
infoTitle.add("Type");
infoValue.add(getClass().getSimpleName());
double c[] = getCenter();
if(c!=null)
for (int i = 0; i < c.length; i++) {
infoTitle.add("Dim"+i);
infoValue.add(Double.toString(c[i]));
}
infoTitle.add("Weight");
infoValue.add(Double.toString(getWeight()));
}
public String getInfo() {
ArrayList<String> infoTitle = new ArrayList<String>();
ArrayList<String> infoValue = new ArrayList<String>();
getClusterSpecificInfo(infoTitle, infoValue);
StringBuffer sb = new StringBuffer();
//Cluster properties
sb.append("<html>");
sb.append("<table>");
int i = 0;
while(i < infoTitle.size() && i < infoValue.size()){
sb.append("<tr><td>"+infoTitle.get(i)+"</td><td>"+infoValue.get(i)+"</td></tr>");
i++;
}
sb.append("</table>");
//Evaluation info
sb.append("<br>");
sb.append("<b>Evaluation</b><br>");
sb.append("<table>");
Iterator miterator = measure_values.entrySet().iterator();
while(miterator.hasNext()) {
Map.Entry e = (Map.Entry)miterator.next();
sb.append("<tr><td>"+e.getKey()+"</td><td>"+e.getValue()+"</td></tr>");
}
sb.append("</table>");
sb.append("</html>");
return sb.toString();
}
}
| Java |
/*
* MOAObject.java
* Copyright (C) 2007 University of Waikato, Hamilton, New Zealand
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa;
import java.io.Serializable;
/**
* Interface implemented by classes in MOA, so that all are serializable,
* can produce copies of their objects, and can measure its memory size.
* They also give a string description.
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @version $Revision: 7 $
*/
public interface MOAObject extends Serializable {
/**
* Gets the memory size of this object.
*
* @return the memory size of this object
*/
public int measureByteSize();
/**
* This method produces a copy of this object.
*
* @return a copy of this object
*/
public MOAObject copy();
/**
* Returns a string representation of this object.
* Used in <code>AbstractMOAObject.toString</code>
* to give a string representation of the object.
*
* @param sb the stringbuilder to add the description
* @param indent the number of characters to indent
*/
public void getDescription(StringBuilder sb, int indent);
}
| Java |
/*
* WekaClusteringAlgorithm.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers;
import moa.cluster.Clustering;
import moa.core.AutoClassDiscovery;
import moa.core.AutoExpandVector;
import moa.core.Measurement;
import moa.options.ClassOption;
import moa.options.IntOption;
import moa.options.MultiChoiceOption;
import moa.options.StringOption;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;
public class WekaClusteringAlgorithm extends AbstractClusterer{
private static final long serialVersionUID = 1L;
public IntOption horizonOption = new IntOption("horizon",
'h', "Range of the window.", 1000);
public MultiChoiceOption wekaAlgorithmOption;
public StringOption parameterOption = new StringOption("parameter", 'p',
"Parameters that will be passed to the weka algorithm. (e.g. '-N 5' for using SimpleKmeans with 5 clusters)", "-N 5 -S 8");
private Class<?>[] clustererClasses;
private Instances instances;
private weka.clusterers.AbstractClusterer clusterer;
public WekaClusteringAlgorithm() {
clustererClasses = findWekaClustererClasses();
String[] optionLabels = new String[clustererClasses.length];
String[] optionDescriptions = new String[clustererClasses.length];
for (int i = 0; i < clustererClasses.length; i++) {
optionLabels[i] = clustererClasses[i].getSimpleName();
optionDescriptions[i] = clustererClasses[i].getName();
// We do have the parameter option info, but not really a place to show it somewhere
/*
//System.out.println(clustererClasses[i].getSimpleName());
for (Class c : clustererClasses[i].getInterfaces()) {
if (c.equals(weka.core.OptionHandler.class)) {
try {
Enumeration options = ((weka.core.OptionHandler)clustererClasses[i].newInstance()).listOptions();
while(options.hasMoreElements()){
weka.core.Option o = (weka.core.Option)options.nextElement();
System.out.print(o.synopsis()+" ");
}
} catch (InstantiationException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
}
}
*/
}
if(clustererClasses!=null && clustererClasses.length > 0){
wekaAlgorithmOption = new MultiChoiceOption("clusterer", 'w',
"Weka cluster algorithm to use.",
optionLabels, optionDescriptions, 6);
}
else{
horizonOption = null;
parameterOption = null;
}
}
@Override
public void resetLearningImpl() {
try {
instances = null;
String clistring = clustererClasses[wekaAlgorithmOption.getChosenIndex()].getName();
clusterer = (weka.clusterers.AbstractClusterer) ClassOption.cliStringToObject(clistring, weka.clusterers.Clusterer.class, null);
String rawOptions = parameterOption.getValue();
String[] options = rawOptions.split(" ");
if(clusterer instanceof weka.core.OptionHandler){
((weka.core.OptionHandler)clusterer).setOptions(options);
Utils.checkForRemainingOptions(options);
}
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void trainOnInstanceImpl(Instance inst) {
if(instances == null){
instances = getDataset(inst.numAttributes(), 0);
}
instances.add(inst);
}
public Clustering getClusteringResult() {
Clustering clustering = null;
try {
clusterer.buildClusterer(instances);
int numClusters = clusterer.numberOfClusters();
Instances dataset = getDataset(instances.numAttributes(), numClusters);
Instances newInstances = new Instances(dataset);
for (int i = 0; i < instances.numInstances(); i++) {
Instance inst = instances.get(i);
int cnum = clusterer.clusterInstance(inst);
Instance newInst = new DenseInstance(inst);
newInst.insertAttributeAt(inst.numAttributes());
newInst.setDataset(dataset);
newInst.setClassValue(cnum);
newInstances.add(newInst);
}
clustering = new Clustering(newInstances);
} catch (Exception e) {
e.printStackTrace();
}
instances = null;
return clustering;
}
public Instances getDataset(int numdim, int numclass) {
FastVector attributes = new FastVector();
for (int i = 0; i < numdim; i++) {
attributes.addElement(new Attribute("att" + (i + 1)));
}
if(numclass > 0){
FastVector classLabels = new FastVector();
for (int i = 0; i < numclass; i++) {
classLabels.addElement("class" + (i + 1));
}
attributes.addElement(new Attribute("class", classLabels));
}
Instances myDataset = new Instances("horizion", attributes, 0);
if(numclass > 0){
myDataset.setClassIndex(myDataset.numAttributes() - 1);
}
return myDataset;
}
private Class<?>[] findWekaClustererClasses() {
AutoExpandVector<Class<?>> finalClasses = new AutoExpandVector<Class<?>>();
Class<?>[] classesFound = AutoClassDiscovery.findClassesOfType("weka.clusterers",
weka.clusterers.AbstractClusterer.class);
for (Class<?> foundClass : classesFound) {
finalClasses.add(foundClass);
}
return finalClasses.toArray(new Class<?>[finalClasses.size()]);
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
}
public boolean isRandomizable() {
return false;
}
public double[] getVotesForInstance(Instance inst) {
return null;
}
@Override
public boolean keepClassLabel(){
return false;
}
@Override
public String getPurposeString() {
String purpose = "MOA Clusterer: " + getClass().getCanonicalName();
if(clustererClasses==null || clustererClasses.length == 0)
purpose+="\nPlease add weka.jar to the classpath to use Weka clustering algorithms.";
return purpose;
}
}
| Java |
/*
* Clusterer.java
* Copyright (C) 2009 University of Waikato, Hamilton, New Zealand
* @author Albert Bifet (abifet@cs.waikato.ac.nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers;
import moa.MOAObject;
import moa.cluster.Clustering;
import moa.core.InstancesHeader;
import moa.core.Measurement;
import moa.gui.AWTRenderable;
import moa.options.OptionHandler;
import weka.core.Instance;
public interface Clusterer extends MOAObject, OptionHandler, AWTRenderable {
public void setModelContext(InstancesHeader ih);
public InstancesHeader getModelContext();
public boolean isRandomizable();
public void setRandomSeed(int s);
public boolean trainingHasStarted();
public double trainingWeightSeenByModel();
public void resetLearning();
public void trainOnInstance(Instance inst);
public double[] getVotesForInstance(Instance inst);
public Measurement[] getModelMeasurements();
public Clusterer[] getSubClusterers();
public Clusterer copy();
public Clustering getClusteringResult();
public boolean implementsMicroClusterer();
public Clustering getMicroClusteringResult();
public boolean keepClassLabel();
}
| Java |
/*
* Node.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Sanchez Villaamil (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustree;
public class Node {
final int NUMBER_ENTRIES = 3;
static int INSERTIONS_BETWEEN_CLEANUPS = 10000;
/**
* The children of this node.
*/
private Entry[] entries;
// Information about the state in the tree.
/**
* The depth at which this <code>Node</code> is in the tree.
*/
private int level;
/**
* Initialze a normal node, which is not fake.
* @param numberDimensions The dimensionality of the data points it
* manipulates.
* @param level The INVERSE level at which this node hangs.
*/
public Node(int numberDimensions, int level) {
this.level = level;
this.entries = new Entry[NUMBER_ENTRIES];
// Generate all entries when we generate a Node.
// That no entry can be null makes it much easier to handle.
for (int i = 0; i < NUMBER_ENTRIES; i++) {
entries[i] = new Entry(numberDimensions);
entries[i].setNode(this);
}
}
/**
* Initialiazes a node which is a fake root depending on the given
* <code>boolean</code>.
* @param numberDimensions The dimensionality of the data points it
* manipulates.
* @param level The level at which this node hangs.
* @param fakeRoot A parameter the says if the node is to be fake or not.
*/
protected Node(int numberDimensions, int numberClasses, int level,
boolean fakeRoot) {
this.level = level;
this.entries = new Entry[NUMBER_ENTRIES];
// Generate all entries when we generate a Node.
// That no entry can be null makes it much easier to handle.
for (int i = 0; i < NUMBER_ENTRIES; i++) {
entries[i] = new Entry(numberDimensions);
}
}
/**
* USED FOR EM_TOP_DOWN BULK LOADING
* @param numberDimensions
* @param level
* @param argEntries
*/
public Node(int numberDimensions, int level, Entry[] argEntries) {
this.level = level;
this.entries = new Entry[NUMBER_ENTRIES];
// Generate all entries when we generate a Node.
// That no entry can be null makes it much easier to handle.
for (int i = 0; i < NUMBER_ENTRIES; i++) {
entries[i] = argEntries[i];
}
}
/**
* Checks if this node is a leaf. A node is a leaf when none of the entries
* in the node have children.
* @return <code>true</code> if the node is leaf, <code>false</code>
* otherwise.
*/
protected boolean isLeaf() {
for (int i = 0; i < entries.length; i++) {
Entry entry = entries[i];
if (entry.getChild() != null) {
return false;
}
}
return true;
}
/**
* Returns the neareast <code>Entry</code> to the given <code>Cluster</code>.
* The distance is minimized over <code>Entry.calcDistance(Cluster)</code>.
* @param buffer The cluster to which the distance has to be compared.
* @return The <code>Entry</code> with minimal distance to the given
* cluster.
* @throws EmptyNodeException This Exception is thrown when this function is
* called on an empty node.
* @see Kernel
* @see Entry#calcDistance(moa.clusterers.tree.Kernel)
*/
public Entry nearestEntry(ClusKernel buffer) {
// TODO: (Fernando) Adapt the nearestEntry(...) function to the new algorithmn.
Entry res = entries[0];
double min = res.calcDistance(buffer);
for (int i = 1; i < entries.length; i++) {
Entry entry = entries[i];
if (entry.isEmpty()) {
break;
}
double distance = entry.calcDistance(buffer);
if (distance < min) {
min = distance;
res = entry;
}
}
return res;
}
/**
* Return the nearest entry to the given one. The
* <code>calcDistance(Entry)</code> function is find the one with the
* shortest distance in this node to the given one.
* @param newEntry The entry to which the entry with the minimal distance
* to it is calculated.
* @return The entry with the minimal distance to the given one.
*/
protected Entry nearestEntry(Entry newEntry) {
assert (!this.entries[0].isEmpty());
Entry res = entries[0];
double min = res.calcDistance(newEntry);
for (int i = 1; i < entries.length; i++) {
if (this.entries[i].isEmpty()) {
break;
}
Entry entry = entries[i];
double distance = entry.calcDistance(newEntry);
if (distance < min) {
min = distance;
res = entry;
}
}
return res;
}
/**
* Return the number of free <code>Entry</code>s in this node.
* @return The number of free <code>Entry</code>s in this node.
* @see Entry
*/
protected int numFreeEntries() {
int res = 0;
for (int i = 0; i < entries.length; i++) {
Entry entry = entries[i];
if (entry.isEmpty()) {
res++;
}
}
assert (NUMBER_ENTRIES == entries.length);
return res;
}
/**
* Add a new <code>Entry</code> to this node. If there is no space left a
* <code>NoFreeEntryException</code> is thrown.
* @param newEntry The <code>Entry</code> to be added.
* @throws NoFreeEntryException Is thrown when there is no space left in
* the node for the new entry.
*/
public void addEntry(Entry newEntry, long currentTime){
newEntry.setNode(this);
int freePosition = getNextEmptyPosition();
entries[freePosition].initializeEntry(newEntry, currentTime);
}
/**
* Returns the position of the next free Entry.
* @return The position of the next free Entry.
* @throws NoFreeEntryException Is thrown when there is no free entry left in
* the node.
*/
private int getNextEmptyPosition(){
int counter;
for (counter = 0; counter < entries.length; counter++) {
Entry e = entries[counter];
if (e.isEmpty()) {
break;
}
}
if (counter == entries.length) {
throw new RuntimeException("Entry added to a node which is already full.");
}
return counter;
}
/**
* If there exists an entry, whose relevance is under the threshold given
* as a parameter to the tree, this entry is returned. Otherwise
* <code>null</code> is returned.
* @return An irrelevant <code>Entry</code> if there exists one,
* <code>null</code> otherwise.
* @see Entry
* @see Entry#isIrrelevant(double)
*/
protected Entry getIrrelevantEntry(double threshold) {
for (int i = 0; i < this.entries.length; i++) {
Entry entry = this.entries[i];
if (entry.isIrrelevant(threshold)) {
return entry;
}
}
return null;
}
/**
* Return an array with references to the children of this node. These are
* not copies, that means side effects are possible!
* @return An array with references to the children of this node.
* @see Entry
*/
public Entry[] getEntries() {
return entries;
}
/**
* Return the level number in the node. This is not the real level. For the
* real level one has to call <code>getLevel(Tree tree)</code>.
* @return The raw level of the node.
* @see #getLevel(moa.clusterers.LiarTree.Tree)
*/
protected int getRawLevel() {
return level;
}
/**
* Returns the level at which this node is in the tree. If a tree is passed
* to which the node does not belonged a value is returned, but it is
* gibberish.
* @param tree The tree to which this node belongs.
* @return The level at which this node hangs.
*/
protected int getLevel(ClusTree tree) {
int numRootSplits = tree.getNumRootSplits();
return numRootSplits - this.getRawLevel();
}
/**
* Clear this Node, which means that the noiseBuffer is cleared, that
* <code>shallowClear</code> is called upon all the entries of the node,
* that the split counter is set to zero and the node is set to not be a
* fake root. Notice that the level stays the same after calling this
* function.
* @see Kernel#clear()
* @see Entry#shallowClear()
*/
// Level stays the same.
protected void clear() {
for (int i = 0; i < NUMBER_ENTRIES; i++) {
entries[i].shallowClear();
}
}
/**
* Merge the two entries at the given position. The entries are reordered in
* the <code>entries</code> array so that the non-empty entries are still
* at the beginning.
* @param pos1 The position of the first entry to be merged. This position
* has to be smaller than the the second position.
* @param pos2 The position of the second entry to be merged. This position
* has to be greater than the the first position.
*/
protected void mergeEntries(int pos1, int pos2) {
assert (this.numFreeEntries() == 0);
assert (pos1 < pos2);
this.entries[pos1].mergeWith(this.entries[pos2]);
for (int i = pos2; i < entries.length - 1; i++) {
entries[i] = entries[i + 1];
}
entries[entries.length - 1].clear();
}
protected void makeOlder(long currentTime, double negLambda) {
for (int i = 0; i < this.entries.length; i++) {
Entry entry = this.entries[i];
if (entry.isEmpty()) {
break;
}
entry.makeOlder(currentTime, negLambda);
}
}
}
| Java |
/*
* ClusTree.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Sanchez Villaamil (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustree;
import java.util.ArrayList;
import java.util.LinkedList;
import moa.clusterers.clustree.util.*;
import moa.cluster.Clustering;
import moa.clusterers.AbstractClusterer;
import moa.core.Measurement;
import moa.options.IntOption;
import weka.core.Instance;
/**
* Citation: ClusTree: Philipp Kranen, Ira Assent, Corinna Baldauf, Thomas Seidl:
* The ClusTree: indexing micro-clusters for anytime stream mining.
* Knowl. Inf. Syst. 29(2): 249-272 (2011)
*/
public class ClusTree extends AbstractClusterer{
private static final long serialVersionUID = 1L;
public IntOption horizonOption = new IntOption("horizon",
'h', "Range of the window.", 1000);
public IntOption maxHeightOption = new IntOption(
"maxHeight", 'H',
"The maximal height of the tree", getDefaultHeight());
protected int getDefaultHeight() {
return 8;
}
private static int INSERTIONS_BETWEEN_CLEANUPS = 10000;
/**
* The root node of the tree.
*/
protected Node root;
// Information about the data represented in this tree.
/**
* Dimensionality of the data points managed by this tree.
*/
private int numberDimensions;
/**
* Parameter for the weighting function use to weight the entries.
*/
protected double negLambda;
/**
* The current height of the tree. Should always be smaller than maxHeight.
*/
private int height;
/**
* The maximal height of the tree.
*/
protected int maxHeight;
/**
* This variable is used to keep the inverse height that is stored in every
* node correct.
*/
private int numRootSplits;
/**
* The threshold for the weighting of an Entry. An Entry is irrelevant, if
* it is in a leaf and the weightedN of the data Cluster is smaller than
* this threshold.
* @see Entry#data
*/
private double weightThreshold = 0.05;
/**
* Number of points inserted into the tree.
*/
private int numberInsertions;
private long timestamp;
/**
* Parameter to determine wich strategy to use
*/
//TODO: Add Option for that
protected boolean breadthFirstStrat = false;
//TODO: cleanup
private Entry alsoUpdate;
@Override
public void resetLearningImpl() {
negLambda = (1.0 / (double) horizonOption.getValue())
* (Math.log(weightThreshold) / Math.log(2));
maxHeight = maxHeightOption.getValue();
numberDimensions = -1;
root = null;
timestamp = 0;
height = 0;
numRootSplits = 0;
numberInsertions = 0;
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
return null;
}
public boolean isRandomizable() {
return false;
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
}
public double[] getVotesForInstance(Instance inst) {
return null;
}
@Override
public boolean implementsMicroClusterer() {
return true;
}
@Override
public void trainOnInstanceImpl(Instance instance) {
timestamp++;
//TODO check if instance contains label
if(root == null){
numberDimensions = instance.numAttributes();
root = new Node(numberDimensions, 0);
}
else{
if(numberDimensions!=instance.numAttributes())
System.out.println("Wrong dimensionality, expected:"+numberDimensions+ "found:"+instance.numAttributes());
}
ClusKernel newPointAsKernel = new ClusKernel(instance.toDoubleArray(), numberDimensions);
insert(newPointAsKernel, new SimpleBudget(1000),timestamp);
}
/**
* Insert a new point in the <code>Tree</code>. The point should be
* represented as a cluster with a single data point(i.e. N = 1). A
* <code>Budget</code> class is also given, which is informed of the number
* of operation the tree does, and informs the tree when it does not have
* time left and should stop the insertion.
* @param newPoint The point to be inserted.
* @param budget The budget and statistics recollector for the insertion.
* @param timestamp The moment at which this point is inserted.
* @see Kernel
* @see Budget
*/
public void insert(ClusKernel newPoint, Budget budget, long timestamp) {
if (breadthFirstStrat){
insertBreadthFirst(newPoint, budget, timestamp);
}
else{
Entry rootEntry = new Entry(this.numberDimensions,
root, timestamp, null, null);
ClusKernel carriedBuffer = new ClusKernel(this.numberDimensions);
Entry toInsertHere = insert(newPoint, carriedBuffer, root, rootEntry,
budget, timestamp);
if (toInsertHere != null) {
this.numRootSplits++;
this.height += this.height < this.maxHeight ? 1 : 0;
Node newRoot = new Node(this.numberDimensions,
toInsertHere.getChild().getRawLevel() + 1);
newRoot.addEntry(rootEntry, timestamp);
newRoot.addEntry(toInsertHere, timestamp);
rootEntry.setNode(newRoot);
toInsertHere.setNode(newRoot);
this.root = newRoot;
}
}
this.numberInsertions++;
if (this.numberInsertions % INSERTIONS_BETWEEN_CLEANUPS == 0) {
cleanUp(this.root, 0);
}
}
/**
* insert newPoint into the tree using the BreadthFirst strategy, i.e.: insert into
* the closest entry in a leaf node.
* @param newPoint
* @param budget
* @param timestamp
* @return
*/
private Entry insertBreadthFirst(ClusKernel newPoint, Budget budget, long timestamp) {
//check all leaf nodes and get the one with the closest entry to newPoint
Node bestFit = findBestLeafNode(newPoint);
bestFit.makeOlder(timestamp, negLambda);
Entry parent = bestFit.getEntries()[0].getParentEntry();
// Search for an Entry with a weight under the threshold.
Entry irrelevantEntry = bestFit.getIrrelevantEntry(this.weightThreshold);
int numFreeEntries = bestFit.numFreeEntries();
Entry newEntry = new Entry(newPoint.getCenter().length,
newPoint, timestamp, parent, bestFit);
//if there is space, add it to the node ( doesn't ever occur, since nodes are created with 3 entries)
if (numFreeEntries>0){
bestFit.addEntry(newEntry, timestamp);
}
//if outdated cluster in this best fitting node, replace it
else if (irrelevantEntry != null) {
irrelevantEntry.overwriteOldEntry(newEntry);
}
//if there is space/outdated cluster on path to top, split. Else merge without split
else {
if (existsOutdatedEntryOnPath(bestFit)||!this.hasMaximalSize()){
// We have to split.
insertHereWithSplit(newEntry, bestFit, timestamp);
}
else {
mergeEntryWithoutSplit(bestFit, newEntry,
timestamp);
}
}
//update all nodes on path to top.
if (bestFit.getEntries()[0].getParentEntry()!=null)
updateToTop(bestFit.getEntries()[0].getParentEntry().getNode());
return null;
}
/**
* This method checks if there is an outdated (or empty) entry on the path from node to root.
* It updates the weights of nodes on path and then checks if it is outdated.
* @param node
* @return true if an outdated/empty entry exists on the path
*/
private boolean existsOutdatedEntryOnPath(Node node) {
if (node == root){
node.makeOlder(timestamp, negLambda);
return node.getIrrelevantEntry(this.weightThreshold)!=null;
}
do{
node = node.getEntries()[0].getParentEntry().getNode();
node.makeOlder(timestamp, negLambda);
for (Entry e : node.getEntries()){
e.recalculateData();
}
if (node.numFreeEntries()>0)
return true;
if (node.getIrrelevantEntry(this.weightThreshold)!=null)
return true;
}while(node.getEntries()[0].getParentEntry()!=null);
return false;
}
/**
* recalculates data for all entries, that lie on the path from the root to the
* Entry toUpdate.
*/
private void updateToTop(Node toUpdate) {
while(toUpdate!=null){
for (Entry e: toUpdate.getEntries())
e.recalculateData();
if (toUpdate.getEntries()[0].getParentEntry()==null)
break;
toUpdate=toUpdate.getEntries()[0].getParentEntry().getNode();
}
}
/**
* Method called by insertBreadthFirst.
* @param toInsert
* @param insertNode
* @param timestamp
* @return
*/
private Entry insertHereWithSplit(Entry toInsert, Node insertNode,
long timestamp) {
//Handle root split
if (insertNode.getEntries()[0].getParentEntry()==null){
root.makeOlder(timestamp, negLambda);
Entry irrelevantEntry = insertNode.getIrrelevantEntry(this.weightThreshold);
int numFreeEntries = insertNode.numFreeEntries();
if (irrelevantEntry != null) {
irrelevantEntry.overwriteOldEntry(toInsert);
}
else if (numFreeEntries>0){
insertNode.addEntry(toInsert, timestamp);
}
else{
this.numRootSplits++;
this.height += this.height < this.maxHeight ? 1 : 0;
Entry oldRootEntry = new Entry(this.numberDimensions,
root, timestamp, null, null);
Node newRoot = new Node(this.numberDimensions,
this.height);
Entry newRootEntry = split(toInsert, root, oldRootEntry, timestamp);
newRoot.addEntry(oldRootEntry, timestamp);
newRoot.addEntry(newRootEntry, timestamp);
this.root = newRoot;
for (Entry c : oldRootEntry.getChild().getEntries())
c.setParentEntry(root.getEntries()[0]);
for (Entry c : newRootEntry.getChild().getEntries())
c.setParentEntry(root.getEntries()[1]);
}
return null;
}
insertNode.makeOlder(timestamp, negLambda);
Entry irrelevantEntry = insertNode.getIrrelevantEntry(this.weightThreshold);
int numFreeEntries = insertNode.numFreeEntries();
if (irrelevantEntry != null) {
irrelevantEntry.overwriteOldEntry(toInsert);
}
else if (numFreeEntries>0){
insertNode.addEntry(toInsert, timestamp);
}
else {
// We have to split.
Entry parentEntry = insertNode.getEntries()[0].getParentEntry();
Entry residualEntry = split(toInsert, insertNode, parentEntry, timestamp);
if (alsoUpdate!=null){
alsoUpdate = residualEntry;
}
Node nodeForResidualEntry = insertNode.getEntries()[0].getParentEntry().getNode();
//recursive call
return insertHereWithSplit(residualEntry, nodeForResidualEntry, timestamp);
}
//no Split
return null;
}
// XXX: Document the insertion when the final implementation is done.
private Entry insertHere(Entry newEntry, Node currentNode,
Entry parentEntry, ClusKernel carriedBuffer, Budget budget,
long timestamp) {
int numFreeEntries = currentNode.numFreeEntries();
// Insert the buffer that we carry.
if (!carriedBuffer.isEmpty()) {
Entry bufferEntry = new Entry(this.numberDimensions,
carriedBuffer, timestamp, parentEntry, currentNode);
if (numFreeEntries <= 1) {
// Distance from buffer to entries.
Entry nearestEntryToCarriedBuffer =
currentNode.nearestEntry(newEntry);
double distanceNearestEntryToBuffer =
nearestEntryToCarriedBuffer.calcDistance(newEntry);
// Distance between buffer and point to insert.
double distanceBufferNewEntry =
newEntry.calcDistance(carriedBuffer);
// Best distance between Entrys in the Node.
BestMergeInNode bestMergeInNode =
calculateBestMergeInNode(currentNode);
// See what the minimal distance is and do the correspoding
// action.
if (distanceNearestEntryToBuffer <= distanceBufferNewEntry
&& distanceNearestEntryToBuffer <= bestMergeInNode.distance) {
// Aggregate buffer entry to nearest entry in node.
nearestEntryToCarriedBuffer.aggregateEntry(bufferEntry,
timestamp, this.negLambda);
} else if (distanceBufferNewEntry <= distanceNearestEntryToBuffer
&& distanceBufferNewEntry <= bestMergeInNode.distance) {
newEntry.mergeWith(bufferEntry);
} else {
currentNode.mergeEntries(bestMergeInNode.entryPos1,
bestMergeInNode.entryPos2);
currentNode.addEntry(bufferEntry, timestamp);
}
} else {
assert (currentNode.isLeaf());
currentNode.addEntry(bufferEntry, timestamp);
}
}
// Normally the insertion of the carries buffer does not change the
// number of free entries, but in case of future changes we calculate
// the number again.
numFreeEntries = currentNode.numFreeEntries();
// Search for an Entry with a weight under the threshold.
Entry irrelevantEntry = currentNode.getIrrelevantEntry(this.weightThreshold);
if (currentNode.isLeaf() && irrelevantEntry != null) {
irrelevantEntry.overwriteOldEntry(newEntry);
} else if (numFreeEntries >= 1) {
currentNode.addEntry(newEntry, timestamp);
} else {
if (currentNode.isLeaf() && (this.hasMaximalSize()
|| !budget.hasMoreTime())) {
mergeEntryWithoutSplit(currentNode, newEntry,
timestamp);
} else {
// We have to split.
return split(newEntry, currentNode, parentEntry, timestamp);
}
}
return null;
}
/**
* This method calculates the distances between the new point and each Entry in a leaf node.
* It returns the node that contains the entry with the smallest distance
* to the new point.
* @param newPoint
* @return best fitting node
*/
private Node findBestLeafNode(ClusKernel newPoint) {
double minDist = Double.MAX_VALUE;
Node bestFit = null;
for (Node e: collectLeafNodes(root)){
if (newPoint.calcDistance(e.nearestEntry(newPoint).getData())<minDist){
bestFit = e;
minDist = newPoint.calcDistance(e.nearestEntry(newPoint).getData());
}
}
if (bestFit!=null)
return bestFit;
else
return root;
}
private ArrayList<Node> collectLeafNodes(Node curr){
ArrayList<Node> toReturn = new ArrayList<Node>();
if (curr==null)
return toReturn;
if (curr.isLeaf()){
toReturn.add(curr);
return toReturn;
}
else{
for (Entry e : curr.getEntries())
toReturn.addAll(collectLeafNodes(e.getChild()));
return toReturn;
}
}
// TODO: Expand all function that work on entries to work with the Budget.
private Entry insert(ClusKernel pointToInsert, ClusKernel carriedBuffer,
Node currentNode, Entry parentEntry, Budget budget, long timestamp) {
assert (currentNode != null);
assert (currentNode.isLeaf()
|| currentNode.getEntries()[0].getChild() != null);
currentNode.makeOlder(timestamp, this.negLambda);
// This variable will be changed from to null to an actual reference
// in the following if-else block if we have to insert something here,
// either because this is a leaf, or because of split propagation.
Entry toInsertHere = null;
if (currentNode.isLeaf()) {
// At the end of the function the entry will be inserted.
toInsertHere = new Entry(this.numberDimensions,
pointToInsert, timestamp, parentEntry, currentNode);
} else {
Entry bestEntry = currentNode.nearestEntry(pointToInsert);
bestEntry.aggregateCluster(pointToInsert, timestamp,
this.negLambda);
boolean isCarriedBufferEmpty = carriedBuffer.isEmpty();
Entry bestBufferEntry = null;
if (!isCarriedBufferEmpty) {
bestBufferEntry = currentNode.nearestEntry(carriedBuffer);
bestBufferEntry.aggregateCluster(carriedBuffer, timestamp,
this.negLambda);
}
if (!budget.hasMoreTime()) {
bestEntry.aggregateToBuffer(pointToInsert, timestamp,
this.negLambda);
if (!isCarriedBufferEmpty) {
bestBufferEntry.aggregateToBuffer(carriedBuffer,
timestamp, this.negLambda);
}
return null;
}
// If the way of the buffer differs from the way of the point to
// be inserted, leave the buffer here.
if (!isCarriedBufferEmpty && (bestEntry != bestBufferEntry)) {
bestBufferEntry.aggregateToBuffer(carriedBuffer, timestamp,
this.negLambda);
carriedBuffer.clear();
}
// Take the buffer of the best entry for the point to be inserted
// along.
ClusKernel takeAlongBuffer = bestEntry.emptyBuffer(timestamp,
this.negLambda);
carriedBuffer.add(takeAlongBuffer);
// Recursive call.
toInsertHere = insert(pointToInsert, carriedBuffer,
bestEntry.getChild(), bestEntry, budget, timestamp);
}
// If the above block has a new Entry for this place insert it.
if (toInsertHere != null) {
return this.insertHere(toInsertHere, currentNode, parentEntry,
carriedBuffer, budget, timestamp);
}
// If nothing else needs to be done in all the above levels
// return null to signalize it.
return null;
}
/**
* Inserts an <code>Entry</code> into a <code>Node</code> without inducing
* a split.
* @param node The node at which the entry is to be inserted.
* @param newEntry The entry to be inserted.
* @param timestamp The moment at which this occurs.
*/
private void mergeEntryWithoutSplit(Node node,
Entry newEntry, long timestamp) {
Entry nearestEntryToCarriedBuffer =
node.nearestEntry(newEntry);
double distanceNearestEntryToBuffer =
nearestEntryToCarriedBuffer.calcDistance(newEntry);
BestMergeInNode bestMergeInNode =
calculateBestMergeInNode(node);
if (distanceNearestEntryToBuffer < bestMergeInNode.distance) {
nearestEntryToCarriedBuffer.aggregateEntry(newEntry, timestamp,
this.negLambda);
} else {
node.mergeEntries(bestMergeInNode.entryPos1,
bestMergeInNode.entryPos2);
node.addEntry(newEntry, timestamp);
}
}
/**
* Calculates the best merge possible between two nodes in a node. This
* means that the pair with the smallest distance is found.
* @param node The node in which these two entries have to be found.
* @return An object which encodes the two position of the entries with the
* smallest distance in the node and the distance between them.
* @see BestMergeInNode
* @see Entry#calcDistance(tree.Entry)
*/
private BestMergeInNode calculateBestMergeInNode(Node node) {
assert (node.numFreeEntries() == 0);
Entry[] entries = node.getEntries();
int toMerge1 = -1;
int toMerge2 = -1;
double distanceBetweenMergeEntries = Double.NaN;
double minDistance = Double.MAX_VALUE;
for (int i = 0; i < entries.length; i++) {
Entry e1 = entries[i];
for (int j = i + 1; j < entries.length; j++) {
Entry e2 = entries[j];
double distance = e1.calcDistance(e2);
if (distance < minDistance) {
toMerge1 = i;
toMerge2 = j;
distanceBetweenMergeEntries = distance;
}
}
}
assert (toMerge1 != -1 && toMerge2 != -1);
if (Double.isNaN(distanceBetweenMergeEntries)) {
throw new RuntimeException("The minimal distance between two "
+ "Entrys in a Node was Double.MAX_VAUE. That can hardly "
+ "be right.");
}
return new BestMergeInNode(toMerge1, toMerge2,
distanceBetweenMergeEntries);
}
private boolean hasMaximalSize() {
// TODO: Improve hasMaximalSize(). For now it just works somehow for testing.
return this.height == this.maxHeight;
}
/**
* Performs a (2,2) split on the given node with the given entry. This
* implementation only works if the nodes have three entries each. The split
* will generate two new nodes. One of them will be put where the old node
* was, and for the other a new <code>Entry</code> will be generated and
* returned.
* @param newEntry The entry to be added to the node.
* @param node The node that is going to be splitted.
* @param parentEntry The entry in the tree that points at the node that
* is going to be splitted.
* @param timestamp The moment at which this split occurs.
* @return An entry which points at the second node created in the split.
* This entry has to be introduced later in the tree.
*/
private Entry split(Entry newEntry, Node node, Entry parentEntry,
long timestamp) {
// The implemented split function only works in trees where node
// have three entries.
// Splitting only makes sense on full nodes.
assert (node.numFreeEntries() == 0);
assert (parentEntry.getChild() == node);
// All the entries we have to separate in two nodes.
Entry[] allEntries = new Entry[4];
Entry[] nodeEntries = node.getEntries();
for (int i = 0; i < nodeEntries.length; i++) {
allEntries[i] = new Entry(nodeEntries[i]);
}
allEntries[3] = newEntry;
// Clear the given node, since we are going to refill it later.
node = new Node(this.numberDimensions, node.getRawLevel());
// Calculate the distance of all the possible pairings, since we want
// to do a (2,2) split.
double select01 = allEntries[0].calcDistance(allEntries[1])
+ allEntries[2].calcDistance(allEntries[3]);
double select02 = allEntries[0].calcDistance(allEntries[2])
+ allEntries[1].calcDistance(allEntries[3]);
double select03 = allEntries[0].calcDistance(allEntries[3])
+ allEntries[1].calcDistance(allEntries[2]);
// See which of the pairings is minimal and distribute the entries
// accordingly.
Node residualNode = new Node(this.numberDimensions,
node.getRawLevel());
if (select01 < select02) {
if (select01 < select03) {//select01 smallest
node.addEntry(allEntries[0], timestamp);
node.addEntry(allEntries[1], timestamp);
residualNode.addEntry(allEntries[2], timestamp);
residualNode.addEntry(allEntries[3], timestamp);
} else {//select03 smallest
node.addEntry(allEntries[0], timestamp);
node.addEntry(allEntries[3], timestamp);
residualNode.addEntry(allEntries[1], timestamp);
residualNode.addEntry(allEntries[2], timestamp);
}
} else {
if (select02 < select03) {//select02 smallest
node.addEntry(allEntries[0], timestamp);
node.addEntry(allEntries[2], timestamp);
residualNode.addEntry(allEntries[1], timestamp);
residualNode.addEntry(allEntries[3], timestamp);
} else {//select03 smallest
node.addEntry(allEntries[0], timestamp);
node.addEntry(allEntries[3], timestamp);
residualNode.addEntry(allEntries[1], timestamp);
residualNode.addEntry(allEntries[2], timestamp);
}
}
// Set the other node into the tree.
parentEntry.setChild(node);
parentEntry.recalculateData();
int count = 0;
for (Entry e : node.getEntries()){
e.setParentEntry(parentEntry);
if (e.getData().getN() != 0)
count++;
}
//System.out.println(count);
// Generate a new entry for the residual node.
Entry residualEntry = new Entry(this.numberDimensions,
residualNode, timestamp, parentEntry, node);
count=0;
for (Entry e: residualNode.getEntries()){
e.setParentEntry(residualEntry);
if (e.getData().getN() != 0)
count++;
}
//System.out.println(count);
return residualEntry;
}
/**
* Return the number of time the tree has grown in size. If the tree grows
* and is then cutted from a certain depth, it also counts.
* @return The number of times the root node was splitted.
*/
public int getNumRootSplits() {
return numRootSplits;
}
/**
* Return the current height of the tree. This should never be greater than
* <code>maxHeight</code>.
* @return The height of the tree.
* @see #maxHeight
*/
public int getHeight() {
assert (height <= maxHeight);
return height;
}
private void cleanUp(Node currentNode, int level) {
if (currentNode == null) {
return;
}
Entry[] entries = currentNode.getEntries();
if (level == this.maxHeight) {
for (int i = 0; i < entries.length; i++) {
Entry e = entries[i];
e.setChild(null);
}
} else {
for (int i = 0; i < entries.length; i++) {
Entry e = entries[i];
cleanUp(e.getChild(), level + 1);
}
}
}
/**
* @param currentTime The current time
* @return The kernels at the leaf level as a clustering
*/
//TODO: Microcluster unter dem Threshhold nich zur�ckgeben (WIe bei outdated entries)
@Override
public Clustering getMicroClusteringResult() {
return getClustering(timestamp, -1);
}
@Override
public Clustering getClusteringResult() {
return null;
}
/**
* @param currentTime The current time
* @return The kernels at the given level as a clustering.
*/
public Clustering getClustering(long currentTime, int targetLevel) {
if (root == null) {
return null;
}
Clustering clusters = new Clustering();
LinkedList<Node> queue = new LinkedList<Node>();
queue.add(root);
while (!queue.isEmpty()) {
Node current = queue.remove();
// if (current == null)
// continue;
int currentLevel = current.getLevel(this);
boolean isLeaf = (current.isLeaf() && currentLevel <= maxHeight)
|| currentLevel == maxHeight;
if (currentLevel == targetLevel
|| (targetLevel == - 1 && isLeaf)) {
assert (currentLevel <= maxHeight);
Entry[] entries = current.getEntries();
for (int i = 0; i < entries.length; i++) {
Entry entry = entries[i];
if (entry == null || entry.isEmpty()) {
continue;
}
// XXX
entry.makeOlder(currentTime, this.negLambda);
if (entry.isIrrelevant(this.weightThreshold))
continue;
ClusKernel gaussKernel = new ClusKernel(entry.getData());
// long diff = currentTime - entry.getTimestamp();
// if (diff > 0) {
// gaussKernel.makeOlder(diff, negLambda);
// }
clusters.add(gaussKernel);
}
} else if (!current.isLeaf()) {
Entry[] entries = current.getEntries();
for (int i = 0; i < entries.length; i++) {
Entry entry = entries[i];
if (entry.isEmpty()) {
continue;
}
if (entry.isIrrelevant(weightThreshold)) {
continue;
}
queue.add(entry.getChild());
}
}
}
return clusters;
}
/**************************************************************************
* LOCAL CLASSES
**************************************************************************/
/**
* A class to code the return value of searching the smallest merge in a
* node.
*/
class BestMergeInNode {
/**
* The position of the first entry in the array of the node.
*/
public int entryPos1;
/**
* The position of the second entry in the array of the node.
*/
public int entryPos2;
/**
* The distance between the two entries.
*/
public double distance;
/**
* The constructor of this return value. It will automatically make
* sure that the first position is the smaller one of the two.
* @param pos1 One of the position.
* @param pos2 One of the position.
* @param distance The distance between the entries at these positions.
*/
public BestMergeInNode(int pos1, int pos2,
double distance) {
assert (pos1 != pos2);
this.distance = distance;
if (pos1 < pos2) {
this.entryPos1 = pos1;
this.entryPos2 = pos2;
} else {
this.entryPos1 = pos2;
this.entryPos2 = pos1;
}
}
}
}
| Java |
/*
* Entry.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Sanchez Villaamil (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustree;
public class Entry {
/**
* The actual entry data.
*/
public ClusKernel data;
/**
* The buffer of this entry. It can also be seen as the buffer of the child
* node, it is here just to simplify the insertion recuersion.
*/
private ClusKernel buffer;
/**
* A reference to the next node in the tree. <code>null</code> if we are
* at a leaf, or this is an entry is part of a lying <code>Node</code>.
*/
private Node child;
/**
* A reference to the Entry's parent Entry
*/
private Entry parentEntry;
/**
* A reference to the Node, that contains this Entry
*/
private Node node;
/**
* Last time this entry was changed.
*/
private long timestamp;
/**
* The timestamp to be used when no operation has yet been done on this
* entry.
* @see #timestamp
*/
private static final long defaultTimestamp = 0;
/**
* Constructor for the entry. To be used when we want to create an empty
* entry. Notice that the timestamp will be set to zero, since there is no
* reason to know when an empty entry was generated.
* @param numberDimensions The dimensionality of the data point in tree
* where this entry is used.
*/
public Entry(int numberDimensions) {
this.data = new ClusKernel(numberDimensions);
this.buffer = new ClusKernel(numberDimensions);
this.child = null;
this.timestamp = Entry.defaultTimestamp;
}
/**
* Constructor that creates an <code>Entry</code> that points to the given
* node. The values of <code>data</code> will be calculated for this.
* @param numberDimensions The dimensionality of the node.
* @param node The node to which the new <code>Entry</code> should point.
* @param currentTime The timestamp for the moment where this Entry was
* was generated.
* @see Node
* @see #data
*/
protected Entry(int numberDimensions,
Node node, long currentTime, Entry parentEntry, Node containerNode) {
this(numberDimensions);
this.child = node;
this.parentEntry = parentEntry;
this.node = containerNode;
Entry[] entries = node.getEntries();
for (int i = 0; i < entries.length; i++) {
Entry entry = entries[i];
entry.setParentEntry(this);
if (entry.isEmpty()) {
break;
}
this.add(entry);
}
this.timestamp = currentTime;
}
/**
* Constructuctor that creates an <code>Entry</code> with an empty buffer
* and the <code>data</code> given by the <code>Kernel</code>.
* @param numberDimensions The dimensionality of the information in the
* cluster.
* @param cluster The cluster from which the information is to be extracted.
* @param currentTime The timestamp for the moment where this Entry was
* was generated.
* @see Kernel
* @see #data
*/
public Entry(int numberDimensions, ClusKernel cluster, long currentTime) {
this(numberDimensions);
this.data.add(cluster);
this.timestamp = currentTime;
}
/**
* extended constructor with containerNode and parentEntry
* @param numberDimensions
* @param cluster
* @param currentTime
* @param parentEntry
* @param containerNode
*/
protected Entry(int numberDimensions, ClusKernel cluster, long currentTime, Entry parentEntry, Node containerNode) {
this(numberDimensions);
this.parentEntry = parentEntry;
this.data.add(cluster);
this.node = containerNode;
this.timestamp = currentTime;
}
/**
* Copy constructor. Everythin is copied, including the child.
* @param other
*/
protected Entry(Entry other) {
this.parentEntry = other.parentEntry;
this.node = other.node;
this.buffer = new ClusKernel(other.buffer);
this.data = new ClusKernel(other.data);
this.timestamp = other.timestamp;
this.child = other.child;
if (other.getChild()!=null)
for (Entry e : other.getChild().getEntries()){
e.setParentEntry(this);
}
}
public Node getNode() {
return node;
}
public void setNode(Node node) {
this.node = node;
}
/**
* Clear the Entry. All points in the buffer and in the data cluster are
* lost, the connection to the child is lost and the timestamp is set to
* the default value.
*/
protected void clear() {
this.data.clear();
this.buffer.clear();
this.child = null;
this.timestamp = Entry.defaultTimestamp;
}
/**
* Clear the <code>data</code> and the <code>buffer Custer</code> in this
* entry. This function does not clear the child of this <code>Entry</code>.
* @see #data
* @see #buffer
* @see Kernel
*/
protected void shallowClear() {
this.buffer.clear();
this.data.clear();
}
/**
* Calculates the distance to the data in this entry.
* @param cluster The Kernel cluster to which the distance is to be
* calculated.
* @return The distance to the data <code>Kernel</code> in this
* <code>Entry</code>
* @see Kernel
* @see #data
*/
protected double calcDistance(ClusKernel cluster) {
return data.calcDistance(cluster);
}
/**
* Calculates the distance to the data in this entry of the data in the
* given entry.
* @param other The <code>Entry</code> to which the distance is to be
* calculated.
* @return The distance to the data <code>Kernel</code> in this
* <code>Entry</code> of the data <code>Kernel</code> in the other
* <code>Entry</code>.
* @see Kernel
* @see #data
*/
public double calcDistance(Entry other) {
return this.getData().calcDistance(other.getData());
}
/**
* When this entry is empty, give it it's first values. It makes sense to
* have this operation separated from the aggregation, because the
* aggregation first weights the values in <code>data</code> and
* <code>Kernel</code>, which makes no sense in an empty entry.
* @param other The entry with the information to be used to initialize
* this entry.
* @param currentTime The time at which this is happening.
*/
protected void initializeEntry(Entry other, long currentTime) {
assert (this.isEmpty());
assert (other.getBuffer().isEmpty());
this.data.add(other.data);
this.timestamp = currentTime;
this.child = other.child;
if (child!=null){
for (Entry e : child.getEntries()){
e.setParentEntry(this);
}
}
}
/**
* Add the data cluster of another entry to the data cluster of this entry.
* By using this function the timestamp does not get updated, nor does this
* entry get older.
* @param other The entry of which the data cluster should be added to
* the local data cluster.
* @see #data
* @see Kernel#add(tree.Kernel)
*/
public void add(Entry other) {
this.data.add(other.data);
}
/**
* Aggregate the <code>data</code> in the <code>Kernel</code> of the other
* <code>Entry</code>.
* @param other The <code>Entry</code> to be aggregated.
* @see #data
* @see Kernel
*/
protected void aggregateEntry(Entry other, long currentTime,
double negLambda) {
this.data.aggregate(other.data, currentTime - this.timestamp,
negLambda);
this.timestamp = currentTime;
}
/**
* Aggregate the given <code>Kernel</code> to the <code>data</code> cluster
* of this entry.
* @param otherData The <code>Entry</code> to be aggregated.
* @see #data
* @see Kernel
*/
protected void aggregateCluster(ClusKernel otherData, long currentTime,
double negLambda) {
this.getData().aggregate(otherData, currentTime - this.timestamp,
negLambda);
this.timestamp = currentTime;
}
/**
* Aggregate the given <code>Kernel</code> to the <code>buffer</code>
* cluster of this entry.
* @param pointToInsert The cluster to aggregate to the buffer.
* @param currentTime The time at which the aggregation occurs.
* @param negLambda A parameter needed to weight the current state of the
* buffer.
*/
protected void aggregateToBuffer(ClusKernel pointToInsert, long currentTime,
double negLambda) {
ClusKernel currentBuffer = this.getBuffer();
currentBuffer.aggregate(pointToInsert, currentTime - this.timestamp,
negLambda);
this.timestamp = currentTime;
}
/**
* Merge this entry witht the given <code>Entry</code>. This adds the data
* cluster of the given Entry to the data cluster of this entry and sets the
* timestamp to the newest one of the the two entries.
* @param other The entry from which the data cluster is added.
* @see Kernel#add(tree.Kernel)
*/
protected void mergeWith(Entry other) {
// We should only merge entries in leafs, and leafes should have empty
// buffers.
assert (this.child == null);
assert (other.child == null);
assert (other.buffer.isEmpty());
this.data.add(other.data);
if (this.timestamp < other.timestamp) {
this.timestamp = other.timestamp;
}
}
/**
* Getter for the buffer. It is the real object, that means side effects are
* possible!
* @return A reference to the buffer in this entry.
*/
protected ClusKernel getBuffer() {
return buffer;
}
/**
* Return the reference to the child of this <code>Entry</code> to navigate
* in the tree.
* @return A reference to the child of this <code>Entry</code>
*/
public Node getChild() {
return child;
}
/**
* Getter for the data. It is the real object, that means side effects are
* possible!
* @return A reference to the data <code>Kernel</code> in this entry.
* @see Kernel
*/
protected ClusKernel getData() {
return data;
}
public Entry getParentEntry() {
return parentEntry;
}
public void setParentEntry(Entry parent) {
this.parentEntry = parent;
}
/**
* Setter for the child in this entry. Use to build the tree.
* @param child The <code>Node</code> that should be a child of this
* <code>Entry</code>
* @see Node
*/
public void setChild(Node child) {
this.child = child;
}
/**
* Return the current timestamp.
* @return The current timestamp.
*/
public long getTimestamp() {
return timestamp;
}
/**
* Clear the buffer in this entry and return a copy. No side effects are
* possible (given that the copy constructor of <code>Kernel</code> makes
* a deep copy).
* @return A copy of the buffer.
*/
protected ClusKernel emptyBuffer(long currentTime, double negLambda) {
this.buffer.makeOlder(currentTime - this.timestamp, negLambda);
ClusKernel bufferCopy = new ClusKernel(this.buffer);
this.buffer.clear();
return bufferCopy;
}
/**
* Check if this <code>Entry</code> is empty or not. An <code>Entry</code>
* is empty if the <code>data Kernel</code> is empty, since then the buffer
* has to be empty.
* @return <code>true</code> if the data cluster has no data points,
* <code>false</code> otherwise.
*/
protected boolean isEmpty() {
// Assert that if the data cluster is empty, the buffer cluster is
// empty too.
assert ((this.data.isEmpty() && this.buffer.isEmpty())
|| !this.data.isEmpty());
return this.data.isEmpty();
}
/**
* Overwrites the LS, SS and weightedN in the data cluster of this
* <code>Entry</code> to the values of the data cluster in the given
* <code>Entry</code>, but adds N and classCount of the cluster in the given
* Entry to the data cluster in this one. This function is useful when the
* weight of an entry becomes to small, and we want to forget the
* information of the old points.
* @param newEntry The cluster that should overwrite the information.
*/
protected void overwriteOldEntry(Entry newEntry) {
assert (this.getBuffer().isEmpty());
assert (newEntry.getBuffer().isEmpty());
this.data.overwriteOldCluster(newEntry.data);
newEntry.setParentEntry(this.parentEntry);
if (newEntry.getChild()!=null)
for (Entry e : newEntry.getChild().getEntries())
e.setParentEntry(this);
//this.setParentEntry(newEntry.getParentEntry());
this.child=newEntry.child;
}
/**
* This functions reads every entry in the child node and calculates the
* corresponding <code>data Kernel</code>. Timestamps are not changed.
* @see #data
* @see Kerne
*/
protected void recalculateData() {
Node currentChild = this.getChild();
if (currentChild != null) {
ClusKernel currentData = this.getData();
currentData.clear();
Entry[] entries = currentChild.getEntries();
for (int i = 0; i < entries.length; i++) {
currentData.add(entries[i].getData());
}
} else {
this.clear();
}
}
/**
* Returns true if this entry is irrelevant with respecto the given
* threshold. This is done by comparing the weighted N of this Entry to
* the threshold, if it is smaller, than the entry is deemed to be
* irrelevant.
* @param threshold The threshold under which entries at leafs can be
* erased.
* @return True if this entry is deemed irrelevant, false otherwise.
*/
protected boolean isIrrelevant(double threshold) {
return this.getData().getWeight() < threshold;
}
/**
* Ages this entrie's data AND buffer according to the given
* time and aging constant.
* @param currentTime the current time
* @param negLambda the aging constant
*/
protected void makeOlder(long currentTime, double negLambda) {
// assert (currentTime > this.timestamp) : "currentTime : "
// + currentTime + ", this.timestamp: " + this.timestamp;
long diff = currentTime - this.timestamp;
this.buffer.makeOlder(diff, negLambda);
this.data.makeOlder(diff, negLambda);
this.timestamp = currentTime;
}
}
| Java |
/*
* ClusKernel.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Sanchez Villaamil (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustree;
import moa.clusterers.clustree.util.*;
import java.util.Arrays;
import moa.cluster.CFCluster;
import moa.cluster.Cluster;
import weka.core.Instance;
/**
* Representation of an Entry in the tree
*/
public class ClusKernel extends CFCluster{
/**
* Numeric epsilon.
*/
public static final double EPSILON = 0.00000001;
public static final double MIN_VARIANCE = 1e-50; // 1e-100; // 0.0000001;
/**
* Counting of the number of N as normal N is weighted by how much time passes between
* updates. If weighted N is under a threshhold, we may consider
* the cluster irrelevant and we can delete it.
*/
private double totalN;
/**
* A constructor that makes a Kernel which just represents the given point.
* @param point The point to be converted into a corresponding Kernel.
* @param numberClasses The number of classes possible for points in this
* experiment(<code>Tree</code>).
*/
public ClusKernel(double[] point, int dim) {
super(point, dim);
this.totalN = 1;
}
/**
* Constructor of the Cluster.
* @param numberDimensions Dimensionality of the points added that can be
* added to this cluster
* @param numberClasses The number of classes possible for points in this
* experiment(<code>Tree</code>).
*/
protected ClusKernel(int numberDimensions) {
super(numberDimensions);
this.totalN = 0;
}
/**
* Instantiates a copy of the given cluster.
* @param other The <code>Cluster</code> of which we make a copy.
*/
protected ClusKernel(ClusKernel other) {
super(other);
this.totalN = other.getTotalN();
}
/**
* Adds the given cluster to this cluster, without making this cluster
* older.
* @param other
*/
public void add(ClusKernel other) {
super.add(other);
this.totalN += other.totalN;
}
/**
* Make this cluster older bei weighting it and add to this cluster the
* given cluster. If we want to add somethin to the cluster, but don't
* want to weight it, we should use the function <code>add(Cluster)</code>.
* @param other The other cluster to be added to this one.
* @param timeDifference The time elapsed between the last update of the
* <code>Entry</code> to which this cluster belongs and the update that
* caused the call to this function.
* @param negLambda A parameter needed to weight the cluster.
* @see #add(tree.Kernel)
*/
protected void aggregate(ClusKernel other, long timeDifference, double negLambda) {
makeOlder(timeDifference, negLambda);
add(other);
}
/**
* Make this cluster older. This means multiplying weighted N, LS and SS
* with a weight factor given by the time difference and the parameter
* negLambda.
* @param timeDifference The time elapsed between this current update and
* the last one.
* @param negLambda
*/
protected void makeOlder(long timeDifference, double negLambda) {
if (timeDifference == 0) {
return;
}
//double weightFactor = AuxiliaryFunctions.weight(negLambda, timeDifference);
assert (negLambda < 0);
assert (timeDifference > 0);
double weightFactor = Math.pow(2.0, negLambda * timeDifference);
this.N *= weightFactor;
for (int i = 0; i < LS.length; i++) {
LS[i] *= weightFactor;
SS[i] *= weightFactor;
}
}
/**
* Calculate the distance to this other cluster. The other cluster is
* normaly just a single data point(i.e. N = 1).
* @param other The other cluster to which the distance is calculated.
* @return The distance between this cluster and the other.
*/
public double calcDistance(ClusKernel other) {
// TODO: (Fernando, Felix) Adapt the distance function to the new algorithmn.
double N1 = this.getWeight();
double N2 = other.getWeight();
double[] thisLS = this.LS;
double[] otherLS = other.LS;
double res = 0.0;
for (int i = 0; i < thisLS.length; i++) {
double substracted = (thisLS[i] / N1) - (otherLS[i] / N2);
res += substracted * substracted;
}
// TODO INFO: added sqrt to the computation [PK 10.09.10]
return Math.sqrt(res);
}
/**
* Returns the weighted number of points in the cluster.
* @return The weighted number of points in the cluster.
*/
private double getTotalN() {
return totalN;
}
/**
* Check if this cluster is empty or not.
* @return <code>true</code> if the cluster has no data points,
* <code>false</code> otherwise.
*/
protected boolean isEmpty() {
return this.totalN == 0;
}
/**
* Remove all points from this cluster.
*/
protected void clear() {
this.totalN = 0;
this.N = 0.0;
Arrays.fill(this.LS, 0.0);
Arrays.fill(this.SS, 0.0);
}
/**
* Overwrites the LS, SS and weightedN in this cluster to the values of the
* given cluster but adds N and classCount of the given cluster to this one.
* This function is useful when the weight of an entry becomes to small, and
* we want to forget the information of the old points.
* @param other The cluster that should overwrite the information.
*/
protected void overwriteOldCluster(ClusKernel other) {
this.totalN = other.totalN;
this.N = other.N;
//AuxiliaryFunctions.overwriteDoubleArray(this.LS, other.LS);
//AuxiliaryFunctions.overwriteDoubleArray(this.SS, other.SS);
assert (LS.length == other.LS.length);
System.arraycopy(other.LS, 0, LS, 0, LS.length);
assert (SS.length == other.SS.length);
System.arraycopy(other.SS, 0, SS, 0, SS.length);
}
@Override
public double getWeight() {
return this.N;
}
@Override
public CFCluster getCF(){
return this;
}
/**
* @return this kernels' center
*/
public double[] getCenter() {
assert (!this.isEmpty());
double res[] = new double[this.LS.length];
double weightedSize = this.getWeight();
for (int i = 0; i < res.length; i++) {
res[i] = this.LS[i] / weightedSize;
}
return res;
}
// @Override
// public double getInclusionProbability(Instance instance) {
//
// double dist = calcNormalizedDistance(instance.toDoubleArray());
// double res = AuxiliaryFunctions.distanceProbabilty(dist, LS.length);
// assert (res >= 0.0 && res <= 1.0) : "Bad confidence " + res + " for"
// + " distance " + dist;
//
// return res;
// }
@Override
public double getInclusionProbability(Instance instance) {
//trivial cluster
if(N == 1){
double distance = 0.0;
for (int i = 0; i < LS.length; i++) {
double d = LS[i] - instance.value(i);
distance += d * d;
}
distance = Math.sqrt(distance);
if( distance < EPSILON )
return 1.0;
return 0.0;
}
else{
double dist = calcNormalizedDistance(instance.toDoubleArray());
if(dist <= getRadius()){
return 1;
}
else{
return 0;
}
// double res = AuxiliaryFunctions.distanceProbabilty(dist, LS.length);
// return res;
}
}
/**
* See interface <code>Cluster</code>
* @return The radius of the cluster.
* @see Cluster#getRadius()
*/
@Override
public double getRadius() {
//trivial cluster
if(N == 1) return 0;
return getDeviation()*radiusFactor;
}
private double getDeviation(){
double[] variance = getVarianceVector();
double sumOfDeviation = 0.0;
for (int i = 0; i < variance.length; i++) {
double d = Math.sqrt(variance[i]);
sumOfDeviation += d;
}
return sumOfDeviation / variance.length;
}
public double[] getVarianceVector() {
double[] res = new double[this.LS.length];
for (int i = 0; i < this.LS.length; i++) {
double ls = this.LS[i];
double ss = this.SS[i];
double lsDivN = ls / this.getWeight();
double lsDivNSquared = lsDivN * lsDivN;
double ssDivN = ss / this.getWeight();
res[i] = ssDivN - lsDivNSquared;
// Due to numerical errors, small negative values can occur.
// We correct this by settings them to almost zero.
if (res[i] <= 0.0) {
if (res[i] > -EPSILON) {
res[i] = MIN_VARIANCE;
}
}
else{
}
}
return res;
}
/**
* Calculate the normalized euclidean distance (Mahalanobis distance for
* distribution w/o covariances) to a point.
* @param other The point to which the distance is calculated.
* @return The normalized distance to the cluster center.
*
* TODO: check whether WEIGHTING is correctly applied to variances
*/
//???????
private double calcNormalizedDistance(double[] point) {
double[] variance = getVarianceVector();
double[] center = getCenter();
double res = 0.0;
for (int i = 0; i < center.length; i++) {
double diff = center[i] - point[i];
res += (diff * diff);// variance[i];
}
return Math.sqrt(res);
}
}
| Java |
/*
* Budget.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Sanchez Villaamil (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustree.util;
/**
* This is an interface for classes that are to be given along with every data
* point inserted in the tree. The tree has to inform the implementation of
* this <code>Budget</code> interface of every operation it does, and ask at the
* places where it can stop prematurely if it should. This models the arrival of
* a new data point in the stream, before there was enough time to insert the
* current one.
*/
public interface Budget {
/**
* A function for the tree to ask if there is budget(time) left.
* @return A <code>boolean</code> that is <code>true</code> if the tree
* should go on, <code>false</code> otherwise.
*/
public boolean hasMoreTime();
/**
* Inform the <code>Budget</code> class that an integer addition has been
* performed by the tree.
*/
public void integerAddition();
/**
* Inform the <code>Budget</code> that a certain number of integer additions
* have been done.
* @param number the number of additions done.
*/
public void integerAddition(int number);
/**
* Inform the <code>Budget</code> class that a double addition has been
* performed by the tree.
*/
public void doubleAddition();
/**
* Inform the <code>Budget</code> that a certain number of double additions
* have been performed.
* @param number the number of additions done.
*/
public void doubleAddition(int number);
/**
* Inform the <code>Budget</code> class that a integer multiplicaton has
* been performed by the tree.
*/
public void integerMultiplication();
/**
* Inform the <code>Budget</code> that a certain number of integer
* multiplications have been performed.
* @param number the number of multiplication done.
*/
public void integerMultiplication(int number);
/**
* Inform the <code>Budget</code> class that a double multiplicaton has
* been performed by the tree.
*/
public void doubleMultiplication();
/**
* Inform the <code>Budget</code> that a certain number of double
* multiplications have been performed.
* @param number the number of multiplications done.
*/
public void doubleMultiplication(int number);
/**
* Inform the <code>Budget</code> class that a integer division has
* been performed by the tree.
*/
public void integerDivision();
/**
* Inform the <code>Budget</code> that a certain number of integer divisions
* have been performed.
* @param number the number of division done.
*/
public void integerDivision(int number);
/**
* Inform the <code>Budget</code> class that a double division has
* been performed by the tree.
*/
public void doubleDivision();
/**
* Inform the <code>Budget</code> that a certain number of double divisions
* have been performed.
* @param number the number of divisions done.
*/
public void doubleDivision(int number);
}
| Java |
/*
* SimpleBudget.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Reidl (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustree.util;
public class SimpleBudget implements Budget {
public static final int INT_ADD = 1;
public static final int INT_MULT = 1;
public static final int INT_DIV = 1;
public static final int DOUBLE_ADD = 1;
public static final int DOUBLE_MULT = 1;
public static final int DOUBLE_DIV = 10;
private int time;
public SimpleBudget(int time) {
assert (time >= 0);
this.time = time;
}
@Override
public boolean hasMoreTime() {
return time > 0;
}
@Override
public void integerAddition() {
time -= INT_ADD;
}
@Override
public void integerAddition(int number) {
time -= INT_ADD * number;
}
@Override
public void doubleAddition() {
time -= DOUBLE_ADD;
}
@Override
public void doubleAddition(int number) {
time -= DOUBLE_ADD * number;
}
@Override
public void integerMultiplication() {
time -= INT_MULT;
}
@Override
public void integerMultiplication(int number) {
time -= INT_MULT * number;
}
@Override
public void doubleMultiplication() {
time -= DOUBLE_MULT;
}
@Override
public void doubleMultiplication(int number) {
time -= DOUBLE_MULT * number;
}
@Override
public void integerDivision() {
time -= INT_DIV;
}
@Override
public void integerDivision(int number) {
time -= INT_DIV * number;
}
@Override
public void doubleDivision() {
time -= DOUBLE_DIV;
}
@Override
public void doubleDivision(int number) {
time -= DOUBLE_DIV * number;
}
}
| Java |
/*
* ExactSTORM.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.Angiulli;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.Vector;
import moa.clusterers.outliers.Angiulli.ISBIndex.ISBNode;
import moa.clusterers.outliers.Angiulli.ISBIndex.ISBSearchResult;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.Instance;
// The algorithm is presented in "Distance-based outlier queries in data streams: the novel task and algorithms.
//Data Mining and Knowledge Discovery, 20(2):290–324,2010.
public class ExactSTORM extends STORMBase {
public class ISBNodeExact extends ISBNode {
public int count_after;
// nn_before:
// A list that needs O(logn) time for ordered insertion and search.
// It must be able to perform a search in the list using e.g. <=.
private ArrayList<Long> nn_before;
public ISBNodeExact(Instance inst, StreamObj obj, Long id, int k) {
super(inst, obj, id);
m_k = k;
count_after = 0;
nn_before = new ArrayList<Long>();
}
public void AddPrecNeigh(Long id) {
int pos = Collections.binarySearch(nn_before, id);
if (pos < 0) {
// item does not exist, so add it to the right position
nn_before.add(-(pos + 1), id);
}
}
public int CountPrecNeighs(Long sinceId) {
if (nn_before.size() > 0) {
// get number of neighs with id >= sinceId
int startPos;
int pos = Collections.binarySearch(nn_before, sinceId);
if (pos < 0) {
// item does not exist, should insert at position startPos
startPos = -(pos + 1);
} else {
// item exists at startPos
startPos = pos;
}
if (startPos < nn_before.size()) {
return nn_before.size() - startPos;
}
}
return 0;
}
public void PrintPrecNeighs() {
Print(" nn_before: ");
Iterator it = nn_before.iterator();
while (it.hasNext()) {
Print((Long)it.next() + " ");
}
Println(" ");
}
}
public FloatOption radiusOption = new FloatOption("radius", 'r', "Search radius.", 0.1);
public IntOption kOption = new IntOption("k", 't', "Parameter k.", 50);
public IntOption queryFreqOption = new IntOption("queryFreq", 'q', "Query frequency.", 1);
public ExactSTORM()
{
// System.out.println("DistanceOutliersExact: created");
}
@Override
public String getObjectInfo(Object obj) {
if (obj == null) return null;
ISBNodeExact node = (ISBNodeExact) obj;
ArrayList<String> infoTitle = new ArrayList<String>();
ArrayList<String> infoValue = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
// show node position
for (int i = 0; i < node.obj.dimensions(); i++) {
infoTitle.add("Dim" + (i+1));
infoValue.add(String.format("%.3f", node.obj.get(i)));
}
// show node properties
infoTitle.add("id");
infoValue.add(String.format("%d", node.id));
infoTitle.add("count_after");
infoValue.add(String.format("%d", node.count_after));
infoTitle.add("|nn_before|");
infoValue.add(String.format("%d", node.CountPrecNeighs(GetWindowStart())));
sb.append("<html>");
sb.append("<table>");
int i = 0;
while(i < infoTitle.size() && i < infoValue.size()){
sb.append("<tr><td><b>"+infoTitle.get(i)+":</b></td><td>"+infoValue.get(i)+"</td></tr>");
i++;
}
sb.append("</table>");
sb.append("</html>");
return sb.toString();
}
@Override
public void Init() {
super.Init();
m_WindowSize = windowSizeOption.getValue();
m_radius = radiusOption.getValue();
m_k = kOption.getValue();
m_QueryFreq = queryFreqOption.getValue();
Println("Init DistanceOutliersExact:");
Println(" window_size: " + m_WindowSize);
Println(" radius: " + m_radius);
Println(" k: " + m_k);
Println(" query_freq: " + m_QueryFreq);
objId = FIRST_OBJ_ID; // init object identifier
// create fifo
windowNodes = new Vector<ISBNode>();
// create ISB
ISB = new ISBIndex(m_radius, m_k);
// init statistics
m_nBothInlierOutlier = 0;
m_nOnlyInlier = 0;
m_nOnlyOutlier = 0;
}
void RemoveNode(ISBNode node) {
// remove node from ISB
ISB.Remove(node);
// remove from fifo
windowNodes.remove(node);
// remove from outliers
RemoveExpiredOutlier(new Outlier(node.inst, node.id, node));
// update statistics
UpdateStatistics(node);
}
void DeleteExpiredNode() {
if (windowNodes.size() <= 0)
return;
// get oldest node
ISBNode node = windowNodes.get(0);
// check if node has expired
if (node.id < GetWindowStart()) {
if (bTrace) {
Print("Delete expired node: ");
PrintNode(node);
}
// remove node
RemoveNode(node);
}
}
@Override
protected void ProcessNewStreamObj(Instance inst)
{
if (bShowProgress) ShowProgress("Processed " + objId + " stream objects.");
// PrintInstance(inst);
double[] values = getInstanceValues(inst);
StreamObj obj = new StreamObj(values);
if (bTrace) Println("\n- - - - - - - - - - - -\n");
// create new ISB node
ISBNodeExact nodeNew = new ISBNodeExact(inst, obj, objId, m_k);
if (bTrace) {
Print("New obj: ");
PrintNode(nodeNew);
}
// update object identifier
objId++;
// delete a node if it has expired
DeleteExpiredNode();
// init nodeNew
nodeNew.count_after = 1;
// perform range query search
if (bTrace) Println("Perform range query seach:");
nRangeQueriesExecuted++;
Vector<ISBIndex.ISBSearchResult> nodes = ISB.RangeSearch(nodeNew, m_radius);
// process each returned node
for (ISBSearchResult res : nodes) {
ISBNodeExact n = (ISBNodeExact) res.node;
if (bTrace) {
Printf(" Found at d=%.2f: ", res.distance);
PrintNode(res.node);
}
n.count_after++;
nodeNew.AddPrecNeigh(res.node.id);
}
if (bTrace) Println("Insert new node to ISB.");
ISB.Insert(nodeNew);
// insert node at window
windowNodes.add(nodeNew);
if (bTrace) PrintWindow();
if (CanSearch()) {
// invoke query function to detect outliers
SearchOutliers();
} else {
// update statistics outlierness of new node
UpdateNodeStatistics(nodeNew);
}
}
void SearchOutliers() {
if (bTrace) Println("Invoke query: ");
ISBNodeExact node;
// process each node in the ISB (also in window)
for (int i = 0; i < windowNodes.size(); i++) {
node = (ISBNodeExact) windowNodes.get(i);
if (bTrace) {
Print(" Process node: ");
PrintNode(node);
}
UpdateNodeType(node);
}
}
void UpdateNodeType(ISBNodeExact node) {
int succ_neighs, prec_neighs;
// get number of succeeding neighbors
succ_neighs = node.count_after;
if (bTrace) Println(" succ_neighs: " + succ_neighs);
// get number of preceding neighbors with
// non-expired objects determined
prec_neighs = node.CountPrecNeighs(GetWindowStart());
if (bTrace) {
Println(" GetWindowStart(): " + GetWindowStart());
node.PrintPrecNeighs();
Println(" prec_neighs: " + prec_neighs);
}
// check if node is an outlier
if (succ_neighs + prec_neighs < m_k) {
SaveOutlier(node);
if (bTrace) {
Print("*** Outlier: ");
PrintNode(node);
}
} else {
RemoveOutlier(node);
}
}
void UpdateNodeStatistics(ISBNodeExact node) {
int succ_neighs = node.count_after;
int prec_neighs = node.CountPrecNeighs(GetWindowStart());
if (succ_neighs + prec_neighs < m_k) {
node.nOutlier++; // update statistics
} else {
node.nInlier++; // update statistics
}
}
}
| Java |
/*
* ApproxSTORM.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.Angiulli;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Random;
import java.util.Set;
import java.util.Vector;
import moa.clusterers.outliers.Angiulli.ISBIndex.ISBNode;
import moa.clusterers.outliers.Angiulli.ISBIndex.ISBSearchResult;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.Instance;
public class ApproxSTORM extends STORMBase {
public class ISBNodeAppr extends ISBNode {
public Long count_after, count_before;
public double fract_before;
public ISBNodeAppr(Instance inst, StreamObj obj, Long id, int k) {
super(inst, obj, id);
m_k = k;
count_after = 0L;
count_before = 0L;
fract_before = 0;
}
}
public FloatOption radiusOption = new FloatOption("radius", 'r', "Search radius.", 0.1);
public IntOption kOption = new IntOption("k", 't', "Parameter k.", 50);
public IntOption queryFreqOption = new IntOption("queryFreq", 'q', "Query frequency.", 1);
public FloatOption pOption = new FloatOption("p", 'p', "Parameter p.", 0.1);
Set<ISBNode> safe_inliers; // list of safe inliers
int m_FractWindowSize;
Random m_Random;
@Override
public String getObjectInfo(Object obj) {
if (obj == null) return null;
ISBNodeAppr node = (ISBNodeAppr) obj;
ArrayList<String> infoTitle = new ArrayList<String>();
ArrayList<String> infoValue = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
// show node position
for (int i = 0; i < node.obj.dimensions(); i++) {
infoTitle.add("Dim" + (i+1));
infoValue.add(String.format("%.3f", node.obj.get(i)));
}
// show node properties
infoTitle.add("id");
infoValue.add(String.format("%d", node.id));
infoTitle.add("count_after");
infoValue.add(String.format("%d", node.count_after));
infoTitle.add("count_before");
infoValue.add(String.format("%d", node.count_before));
sb.append("<html>");
sb.append("<table>");
int i = 0;
while(i < infoTitle.size() && i < infoValue.size()){
sb.append("<tr><td><b>"+infoTitle.get(i)+":</b></td><td>"+infoValue.get(i)+"</td></tr>");
i++;
}
sb.append("</table>");
sb.append("</html>");
return sb.toString();
}
public ApproxSTORM()
{
// System.out.println("DistanceOutliersAppr: created");
}
@Override
public void Init() {
super.Init();
m_WindowSize = windowSizeOption.getValue();
m_radius = radiusOption.getValue();
m_k = kOption.getValue();
m_QueryFreq = queryFreqOption.getValue();
m_FractWindowSize = (int) (pOption.getValue() * m_WindowSize);
Println("Init DistanceOutliersAppr:");
Println(" window_size: " + m_WindowSize);
Println(" radius: " + m_radius);
Println(" k: " + m_k);
Println(" query_freq: " + m_QueryFreq);
m_Random = new Random();
objId = FIRST_OBJ_ID; // init object identifier
// create fifo
windowNodes = new Vector<ISBNode>();
// create ISB
ISB = new ISBIndex(m_radius, m_k);
// create safe_inliers list
safe_inliers = new HashSet<ISBNode>();
// init statistics
m_nBothInlierOutlier = 0;
m_nOnlyInlier = 0;
m_nOnlyOutlier = 0;
}
void AddSafeInlier(ISBNode node) {
safe_inliers.add(node);
}
ISBNode GetSafeInlier(int idx) {
ISBNode node = null;
Iterator it = safe_inliers.iterator();
while (idx >= 0) {
node = (ISBNodeAppr)it.next();
idx--;
}
return node;
}
boolean IsSafeInlier(ISBNodeAppr node) {
return node.count_after >= m_k;
}
void PrintSafeInliers() {
Print("Safe inliers: ");
ISBNode node;
Iterator it = safe_inliers.iterator();
while (it.hasNext()) {
node = (ISBNode) it.next();
Print(node.id + " ");
}
Println(" ");
}
void RemoveNode(ISBNode node) {
// remove node from ISB
ISB.Remove(node);
// remove from fifo
windowNodes.remove(node);
// remove node from safe_inliers
safe_inliers.remove(node);
// remove from outliers
RemoveExpiredOutlier(new Outlier(node.inst, node.id, node));
// update statistics
UpdateStatistics(node);
}
void RemoveSafeInlier(ISBNode node) {
// remove node from ISB
ISB.Remove(node);
// remove node from safe_inliers
safe_inliers.remove(node);
}
void DeleteExpiredNode() {
if (windowNodes.size() <= 0)
return;
// get oldest node
ISBNode node = windowNodes.get(0);
// check if node has expired
if (node.id < GetWindowStart()) {
if (bTrace) {
Print("Delete expired node: ");
PrintNode(node);
}
// remove node
RemoveNode(node);
}
}
@Override
protected void ProcessNewStreamObj(Instance inst)
{
if (bShowProgress) ShowProgress("Processed " + objId + " stream objects.");
// PrintInstance(inst);
double[] values = getInstanceValues(inst);
StreamObj obj = new StreamObj(values);
if (bTrace) Println("\n- - - - - - - - - - - -\n");
// delete a node if it has expired
DeleteExpiredNode();
// create new ISB node
ISBNodeAppr nodeNew = new ISBNodeAppr(inst, obj, objId, m_k);
if (bTrace) {
Print("New obj: ");
PrintNode(nodeNew);
}
// update object identifier
objId++;
// init nodeNew
nodeNew.count_after = 1L;
nodeNew.count_before = 0L;
// perform range query search
if (bTrace) Println("Perform range query seach:");
nRangeQueriesExecuted++;
Vector<ISBIndex.ISBSearchResult> nodes = ISB.RangeSearch(nodeNew, m_radius);
// process each returned node
int nSafeInliers;
Long count_si_before = 0L;
for (ISBSearchResult res : nodes) {
ISBNodeAppr n = (ISBNodeAppr) res.node;
if (bTrace) {
Printf(" Found at d=%.2f: ", res.distance);
PrintNode(res.node);
}
n.count_after++;
if (IsSafeInlier(n)) {
if (bTrace) Println(" Safe inlier: id=" + n.id);
AddSafeInlier(n);
count_si_before++;
}
nSafeInliers = safe_inliers.size();
if (nSafeInliers > m_FractWindowSize) {
// get a random safe inlier: 0 <= idx < nSafeInliers
int idx = m_Random.nextInt(nSafeInliers);
ISBNode si = GetSafeInlier(idx);
if (bTrace) Println(" Remove random safe inlier: id=" + si.id);
// remove node from ISB and safe-inliers-list
RemoveSafeInlier(si);
}
nodeNew.count_before++;
}
// Set fract_before of curr_node which is determined as the ratio
// between the number of preceding neighbors of curr_node in ISB
// which are safe inliers and the total number of safe inliers in
// ISB, at the arrival time of curr_node.
nSafeInliers = safe_inliers.size();
if (nSafeInliers > 0) {
nodeNew.fract_before = (double)count_si_before / (double)nSafeInliers;
}
else {
if (bTrace) Println("Set fract before: no safe inliers yet, set 0.");
nodeNew.fract_before = 0;
}
if (bTrace) {
Println("Node: ");
Println(" count_after=" + nodeNew.count_after);
Println(" count_before=" + nodeNew.count_before);
Printf(" fract_before=%.3f\n", nodeNew.fract_before);
Println("Insert new node to ISB.");
}
// insert node to ISB
ISB.Insert(nodeNew);
// insert node at window
windowNodes.add(nodeNew);
if (bTrace) {
PrintWindow();
PrintSafeInliers();
}
if (CanSearch()) {
// invoke query function to detect outliers
SearchOutliers();
} else {
// update statistics outlierness of new node
UpdateNodeStatistics(nodeNew);
}
}
void SearchOutliers() {
if (bTrace) Println("Invoke query: ");
ISBNodeAppr node;
// process each node in the ISB (also in window)
for (int i = 0; i < windowNodes.size(); i++) {
node = (ISBNodeAppr) windowNodes.get(i);
if (bTrace) {
Print(" Process node: ");
PrintNode(node);
}
UpdateNodeType(node);
}
}
void UpdateNodeType(ISBNodeAppr node) {
double succ_neighs, prec_neighs;
// get number of succeeding neighbors
succ_neighs = node.count_after;
if (bTrace) Println(" succ_neighs: " + succ_neighs);
// get number of preceding neighbors
prec_neighs = node.fract_before * (double)Math.abs((node.id + m_WindowSize) - GetWindowEnd());
if (bTrace) Println(" prec_neighs: " + prec_neighs);
// check if node is an outlier
if (succ_neighs + prec_neighs < m_k) {
SaveOutlier(node);
if (bTrace) {
Print("*** Outlier: ");
PrintNode(node);
}
} else {
RemoveOutlier(node);
}
}
void UpdateNodeStatistics(ISBNodeAppr node) {
double succ_neighs = node.count_after;
double prec_neighs = node.fract_before * (double)Math.abs((node.id + m_WindowSize) - GetWindowEnd());
if (succ_neighs + prec_neighs < m_k) {
node.nOutlier++; // update statistics
} else {
node.nInlier++; // update statistics
}
}
}
| Java |
/*
* MyMTree.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.Angiulli;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.ComposedSplitFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions;
import moa.clusterers.outliers.utils.mtree.MTree;
import moa.clusterers.outliers.utils.mtree.PartitionFunctions;
import moa.clusterers.outliers.utils.mtree.PromotionFunction;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
import moa.clusterers.outliers.utils.mtree.utils.Utils;
class MyMTree extends MTree<StreamObj> {
private static final PromotionFunction<StreamObj> nonRandomPromotion = new PromotionFunction<StreamObj>() {
@Override
public Pair<StreamObj> process(Set<StreamObj> dataSet, DistanceFunction<? super StreamObj> distanceFunction) {
return Utils.minMax(dataSet);
}
};
MyMTree() {
super(2, DistanceFunctions.EUCLIDEAN,
new ComposedSplitFunction<StreamObj>(
nonRandomPromotion,
new PartitionFunctions.BalancedPartition<StreamObj>()));
}
public void add(StreamObj data) {
super.add(data);
_check();
}
public boolean remove(StreamObj data) {
boolean result = super.remove(data);
_check();
return result;
}
DistanceFunction<? super StreamObj> getDistanceFunction() {
return distanceFunction;
}
};
| Java |
/*
* StreamObj.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.Angiulli;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions.EuclideanCoordinate;
public class StreamObj implements EuclideanCoordinate, Comparable<StreamObj> {
private final double[] values;
private final int hashCode;
public StreamObj(double... values) {
this.values = values;
int h = 1;
for (double value : values) {
h = 31 * (int) h + (int) value;
}
this.hashCode = h;
}
@Override
public int dimensions() {
return values.length;
}
@Override
public double get(int index) {
return values[index];
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof StreamObj) {
StreamObj that = (StreamObj) obj;
if (this.dimensions() != that.dimensions()) {
return false;
}
for (int i = 0; i < this.dimensions(); i++) {
if (this.values[i] != that.values[i]) {
return false;
}
}
return true;
} else {
return false;
}
}
@Override
public int compareTo(StreamObj that) {
int dimensions = Math.min(this.dimensions(), that.dimensions());
for (int i = 0; i < dimensions; i++) {
double v1 = this.values[i];
double v2 = that.values[i];
if (v1 > v2) {
return +1;
}
if (v1 < v2) {
return -1;
}
}
if (this.dimensions() > dimensions) {
return +1;
}
if (that.dimensions() > dimensions) {
return -1;
}
return 0;
}
} | Java |
/*
* ISBIndex.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.Angiulli;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import weka.core.Instance;
public class ISBIndex {
public abstract static class ISBNode {
public Instance inst;
public StreamObj obj;
public Long id;
// statistics
public int nOutlier;
public int nInlier;
public ISBNode(Instance inst, StreamObj obj, Long id) {
this.inst = inst;
this.obj = obj;
this.id = id;
// init statistics
nOutlier = 0;
nInlier = 0;
}
}
MyMTree mtree;
Map<Integer, Set<ISBNode>> mapNodes;
double m_radius;
int m_k; // k nearest neighbors
public ISBIndex(double radius, int k) {
mtree = new MyMTree();
mapNodes = new HashMap<Integer, Set<ISBNode>>();
m_radius = radius;
m_k = k;
}
public static class ISBSearchResult {
public ISBNode node;
public double distance;
public ISBSearchResult(ISBNode n, double distance) {
this.node = n;
this.distance = distance;
}
}
public Vector<ISBSearchResult> RangeSearch(ISBNode node, double radius) {
Vector<ISBSearchResult> results = new Vector<ISBSearchResult>();
// execute range search at mtree
StreamObj obj;
double d;
MyMTree.Query query = mtree.getNearestByRange(node.obj, radius);
for (MyMTree.ResultItem q : query) {
// get next obj found within range
obj = q.data;
// get distance of obj from query
d = q.distance;
// get all nodes referencing obj
Vector<ISBNode> nodes = MapGetNodes(obj);
for (int i = 0; i < nodes.size(); i++)
results.add(new ISBSearchResult(nodes.get(i), d));
}
return results;
}
public void Insert(ISBNode node) {
// insert object of node at mtree
mtree.add(node.obj);
// insert node at map
MapInsert(node);
}
public void Remove(ISBNode node) {
// remove from map
MapDelete(node);
// check if stream object at mtree is still being referenced
if (MapCountObjRefs(node.obj) <= 0) {
// delete stream object from mtree
mtree.remove(node.obj);
}
}
Vector<ISBNode> MapGetNodes(StreamObj obj) {
int h = obj.hashCode();
Vector<ISBNode> v = new Vector<ISBNode>();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode node;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
node = i.next();
if (node.obj.equals(obj))
v.add(node);
}
}
return v;
}
int MapCountObjRefs(StreamObj obj) {
int h = obj.hashCode();
int iCount = 0;
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode n;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
n = i.next();
if (n.obj.equals(obj))
iCount++;
}
}
return iCount;
}
void MapInsert(ISBNode node) {
int h = node.obj.hashCode();
Set<ISBNode> s;
if (mapNodes.containsKey(h)) {
s = mapNodes.get(h);
s.add(node);
}
else {
s = new HashSet<ISBNode>();
s.add(node);
mapNodes.put(h, s);
}
}
void MapDelete(ISBNode node) {
int h = node.obj.hashCode();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
s.remove(node);
if (s.isEmpty()) { // ### added
mapNodes.remove(h);
}
}
}
}
| Java |
/*
* Test.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.Angiulli;
import moa.streams.ArffFileStream;
import moa.streams.clustering.RandomRBFGeneratorEvents;
import weka.core.Instance;
public class Test {
public static void main(String[] args) throws Exception
{
//if (true) return;
int numInstances = 10000;
RandomRBFGeneratorEvents stream = new RandomRBFGeneratorEvents();
stream.prepareForUse();
//DistanceOutliersAppr myOutlierDetector= new DistanceOutliersAppr();
ExactSTORM myOutlierDetector= new ExactSTORM();
myOutlierDetector.queryFreqOption.setValue(1);
myOutlierDetector.setModelContext(stream.getHeader());
myOutlierDetector.prepareForUse();
Long tmStart = System.currentTimeMillis();
int numberSamples = 0;
int w = myOutlierDetector.windowSizeOption.getValue();
while (stream.hasMoreInstances() && (numberSamples < numInstances)) {
Instance newInst = stream.nextInstance();
myOutlierDetector.processNewInstanceImpl(newInst);
numberSamples++;
if (numberSamples % 100 == 0) {
//System.out.println("Processed " + numberSamples + " stream objects.");
}
if ((numberSamples % (w / 2)) == 0) {
//myOutlierDetector.PrintOutliers();
}
}
// myOutlierDetector.PrintOutliers();
System.out.println("Total time = " + (System.currentTimeMillis() - tmStart) + " ms");
}
}
| Java |
/*
* STORMBase.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.Angiulli;
import java.util.Vector;
import moa.clusterers.outliers.Angiulli.ISBIndex.ISBNode;
import moa.clusterers.outliers.MyBaseOutlierDetector;
import moa.options.FlagOption;
public abstract class STORMBase extends MyBaseOutlierDetector {
public FlagOption waitWinFullOption = new FlagOption("waitWinFull", 'a', "Output outliers when windows is full.");
protected static final Long FIRST_OBJ_ID = 1L;
// object identifier increments with each new data stream object
protected Long objId;
// list used to find expired nodes
protected Vector<ISBNode> windowNodes;
protected ISBIndex ISB;
protected int m_WindowSize;
protected double m_radius;
protected int m_k;
// perform a query every m_QueryFreq objects
protected int m_QueryFreq;
// statistics
public int m_nBothInlierOutlier;
public int m_nOnlyInlier;
public int m_nOnlyOutlier;
@Override
public String getStatistics() {
StringBuilder sb = new StringBuilder();
sb.append("Statistics:\n\n");
// get counters of expired nodes
int nBothInlierOutlier = m_nBothInlierOutlier;
int nOnlyInlier = m_nOnlyInlier;
int nOnlyOutlier = m_nOnlyOutlier;
// add counters of non expired nodes
for (ISBNode node : windowNodes) {
if ((node.nInlier > 0) && (node.nOutlier > 0))
nBothInlierOutlier++;
else if (node.nInlier > 0)
nOnlyInlier++;
else
nOnlyOutlier++;
}
int sum = nBothInlierOutlier + nOnlyInlier + nOnlyOutlier;
if (sum > 0) {
sb.append(String.format(" Nodes always inlier: %d (%.1f%%)\n", nOnlyInlier, (100 * nOnlyInlier) / (double)sum));
sb.append(String.format(" Nodes always outlier: %d (%.1f%%)\n", nOnlyOutlier, (100 * nOnlyOutlier) / (double)sum));
sb.append(String.format(" Nodes both inlier and outlier: %d (%.1f%%)\n", nBothInlierOutlier, (100 * nBothInlierOutlier) / (double)sum));
sb.append(" (Sum: " + sum + ")\n");
}
sb.append("\n Total range queries: " + nRangeQueriesExecuted + "\n");
sb.append(" Max memory usage: " + iMaxMemUsage + " MB\n");
sb.append(" Total process time: " + String.format("%.2f ms", nTotalRunTime / 1000.0) + "\n");
return sb.toString();
}
Long GetWindowEnd() {
return objId - 1;
}
Long GetWindowStart() {
Long x = GetWindowEnd() - m_WindowSize + 1;
if (x < FIRST_OBJ_ID)
x = FIRST_OBJ_ID;
return x;
}
boolean IsWinFull() {
return (GetWindowEnd() >= FIRST_OBJ_ID + m_WindowSize - 1);
}
boolean CanSearch() {
if (IsWinFull() || !waitWinFullOption.isSet()) {
if ((GetWindowEnd() - FIRST_OBJ_ID + 1) % m_QueryFreq == 0) {
// perform query every m_QueryFreq objects
return true;
}
}
return false;
}
void SaveOutlier(ISBNode node) {
AddOutlier(new Outlier(node.inst, node.id, node));
node.nOutlier++; // update statistics
}
void RemoveOutlier(ISBNode node) {
RemoveOutlier(new Outlier(node.inst, node.id, node));
node.nInlier++; // update statistics
}
protected void UpdateStatistics(ISBNode node) {
if ((node.nInlier > 0) && (node.nOutlier > 0))
m_nBothInlierOutlier++;
else if (node.nInlier > 0)
m_nOnlyInlier++;
else
m_nOnlyOutlier++;
}
@Override
protected boolean IsNodeIdInWin(long id) {
if ((GetWindowStart() <= id) && (id <= GetWindowEnd()) )
return true;
else
return false;
}
void PrintWindow() {
Println("Window [" + GetWindowStart() + "-" + GetWindowEnd() + "]: ");
ISBNode node;
for (int i = 0; i < windowNodes.size(); i++) {
node = windowNodes.get(i);
Print(" Node: ");
PrintNode(node);
}
}
void PrintNode(ISBNode n) {
Print("id=" + n.id + " (");
int dim = n.obj.dimensions();
for (int d = 0; d < dim; d++) {
Print(Double.toString(n.obj.get(d)));
if (d < dim - 1)
Print(", ");
}
Println(")");
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
/**
* A {@linkplain SplitFunction split function} that is defined by composing
* a {@linkplain PromotionFunction promotion function} and a
* {@linkplain PartitionFunction partition function}.
*
* @param <DATA> The type of the data objects.
*/
public class ComposedSplitFunction<DATA> implements SplitFunction<DATA> {
private PromotionFunction<DATA> promotionFunction;
private PartitionFunction<DATA> partitionFunction;
/**
* The constructor of a {@link SplitFunction} composed by a
* {@link PromotionFunction} and a {@link PartitionFunction}.
*/
public ComposedSplitFunction(
PromotionFunction<DATA> promotionFunction,
PartitionFunction<DATA> partitionFunction
)
{
this.promotionFunction = promotionFunction;
this.partitionFunction = partitionFunction;
}
@Override
public SplitResult<DATA> process(Set<DATA> dataSet, DistanceFunction<? super DATA> distanceFunction) {
Pair<DATA> promoted = promotionFunction.process(dataSet, distanceFunction);
Pair<Set<DATA>> partitions = partitionFunction.process(promoted, dataSet, distanceFunction);
return new SplitResult<DATA>(promoted, partitions);
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
/**
* An object with partitions a set of data into two sub-sets.
*
* @param <DATA> The type of the data on the sets.
*/
public interface PartitionFunction<DATA> {
/**
* Executes the partitioning.
*
* @param promoted The pair of data objects that will guide the partition
* process.
* @param dataSet The original set of data objects to be partitioned.
* @param distanceFunction A {@linkplain DistanceFunction distance function}
* to be used on the partitioning.
* @return A pair of partition sub-sets. Each sub-set must correspond to one
* of the {@code promoted} data objects.
*/
Pair<Set<DATA>> process(Pair<DATA> promoted, Set<DATA> dataSet, DistanceFunction<? super DATA> distanceFunction);
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
/**
* Some pre-defined implementations of {@linkplain PartitionFunction partition
* functions}.
*/
public final class PartitionFunctions {
/**
* Don't let anyone instantiate this class.
*/
private PartitionFunctions() {}
/**
* A {@linkplain PartitionFunction partition function} that tries to
* distribute the data objects equally between the promoted data objects,
* associating to each promoted data objects the nearest data objects.
*
* @param <DATA> The type of the data objects.
*/
public static class BalancedPartition<DATA> implements PartitionFunction<DATA> {
/**
* Processes the balanced partition.
*
* <p>The algorithm is roughly equivalent to this:
* <pre>
* While dataSet is not Empty:
* X := The object in dataSet which is nearest to promoted.<b>first</b>
* Remove X from dataSet
* Add X to result.<b>first</b>
*
* Y := The object in dataSet which is nearest to promoted.<b>second</b>
* Remove Y from dataSet
* Add Y to result.<b>second</b>
*
* Return result
* </pre>
*
* @see mtree.PartitionFunction#process(mtree.utils.Pair, java.util.Set, mtree.DistanceFunction)
*/
@Override
public Pair<Set<DATA>> process(
final Pair<DATA> promoted,
Set<DATA> dataSet,
final DistanceFunction<? super DATA> distanceFunction
)
{
List<DATA> queue1 = new ArrayList<DATA>(dataSet);
// Sort by distance to the first promoted data
Collections.sort(queue1, new Comparator<DATA>() {
@Override
public int compare(DATA data1, DATA data2) {
double distance1 = distanceFunction.calculate(data1, promoted.first);
double distance2 = distanceFunction.calculate(data2, promoted.first);
return Double.compare(distance1, distance2);
}
});
List<DATA> queue2 = new ArrayList<DATA>(dataSet);
// Sort by distance to the second promoted data
Collections.sort(queue2, new Comparator<DATA>() {
@Override
public int compare(DATA data1, DATA data2) {
double distance1 = distanceFunction.calculate(data1, promoted.second);
double distance2 = distanceFunction.calculate(data2, promoted.second);
return Double.compare(distance1, distance2);
}
});
Pair<Set<DATA>> partitions = new Pair<Set<DATA>>(new HashSet<DATA>(), new HashSet<DATA>());
int index1 = 0;
int index2 = 0;
while(index1 < queue1.size() || index2 != queue2.size()) {
while(index1 < queue1.size()) {
DATA data = queue1.get(index1++);
if(!partitions.second.contains(data)) {
partitions.first.add(data);
break;
}
}
while(index2 < queue2.size()) {
DATA data = queue2.get(index2++);
if(!partitions.first.contains(data)) {
partitions.second.add(data);
break;
}
}
}
return partitions;
}
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.PriorityQueue;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.SplitFunction.SplitResult;
/**
* The main class that implements the M-Tree.
*
* @param <DATA> The type of data that will be indexed by the M-Tree. Objects of
* this type are stored in HashMaps and HashSets, so their
* {@code hashCode()} and {@code equals()} methods must be consistent.
*/
public class MTree<DATA> {
/**
* The type of the results for nearest-neighbor queries.
*/
public class ResultItem {
private ResultItem(DATA data, double distance) {
this.data = data;
this.distance = distance;
}
/** A nearest-neighbor. */
public DATA data;
/**
* The distance from the nearest-neighbor to the query data object
* parameter.
*/
public double distance;
}
// Exception classes
private static class SplitNodeReplacement extends Exception {
// A subclass of Throwable cannot be generic. :-(
// So, we have newNodes declared as Object[] instead of Node[].
private Object newNodes[];
private SplitNodeReplacement(Object... newNodes) {
this.newNodes = newNodes;
}
}
private static class RootNodeReplacement extends Exception {
// A subclass of Throwable cannot be generic. :-(
// So, we have newRoot declared as Object instead of Node.
private Object newRoot;
private RootNodeReplacement(Object newRoot) {
this.newRoot = newRoot;
}
}
private static class NodeUnderCapacity extends Exception { }
private static class DataNotFound extends Exception { }
/**
* An {@link Iterable} class which can be iterated to fetch the results of a
* nearest-neighbors query.
*
* <p>The neighbors are presented in non-decreasing order from the {@code
* queryData} argument to the {@link MTree#getNearest(Object, double, int)
* getNearest*()}
* call.
*
* <p>The query on the M-Tree is executed during the iteration, as the
* results are fetched. It means that, by the time when the <i>n</i>-th
* result is fetched, the next result may still not be known, and the
* resources allocated were only the necessary to identify the <i>n</i>
* first results.
*/
public class Query implements Iterable<ResultItem> {
private class ResultsIterator implements Iterator<ResultItem> {
private class ItemWithDistances <U> implements Comparable<ItemWithDistances<U>> {
private U item;
private double distance;
private double minDistance;
public ItemWithDistances(U item, double distance, double minDistance) {
this.item = item;
this.distance = distance;
this.minDistance = minDistance;
}
@Override
public int compareTo(ItemWithDistances<U> that) {
if(this.minDistance < that.minDistance) {
return -1;
} else if(this.minDistance > that.minDistance) {
return +1;
} else {
return 0;
}
}
}
private ResultItem nextResultItem = null;
private boolean finished = false;
private PriorityQueue<ItemWithDistances<Node>> pendingQueue = new PriorityQueue<ItemWithDistances<Node>>();
private double nextPendingMinDistance;
private PriorityQueue<ItemWithDistances<Entry>> nearestQueue = new PriorityQueue<ItemWithDistances<Entry>>();
private int yieldedCount;
private ResultsIterator() {
if(MTree.this.root == null) {
finished = true;
return;
}
double distance = MTree.this.distanceFunction.calculate(Query.this.data, MTree.this.root.data);
double minDistance = Math.max(distance - MTree.this.root.radius, 0.0);
pendingQueue.add(new ItemWithDistances<Node>(MTree.this.root, distance, minDistance));
nextPendingMinDistance = minDistance;
}
@Override
public boolean hasNext() {
if(finished) {
return false;
}
if(nextResultItem == null) {
fetchNext();
}
if(nextResultItem == null) {
finished = true;
return false;
} else {
return true;
}
}
@Override
public ResultItem next() {
if(hasNext()) {
ResultItem next = nextResultItem;
nextResultItem = null;
return next;
} else {
throw new NoSuchElementException();
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private void fetchNext() {
assert !finished;
if(finished || yieldedCount >= Query.this.limit) {
finished = true;
return;
}
while(!pendingQueue.isEmpty() || !nearestQueue.isEmpty()) {
if(prepareNextNearest()) {
return;
}
assert !pendingQueue.isEmpty();
ItemWithDistances<Node> pending = pendingQueue.poll();
Node node = pending.item;
for(IndexItem child : node.children.values()) {
if(Math.abs(pending.distance - child.distanceToParent) - child.radius <= Query.this.range) {
double childDistance = MTree.this.distanceFunction.calculate(Query.this.data, child.data);
double childMinDistance = Math.max(childDistance - child.radius, 0.0);
if(childMinDistance <= Query.this.range) {
if(child instanceof MTree.Entry) {
@SuppressWarnings("unchecked")
Entry entry = (Entry)child;
nearestQueue.add(new ItemWithDistances<Entry>(entry, childDistance, childMinDistance));
} else {
@SuppressWarnings("unchecked")
Node childNode = (Node)child;
pendingQueue.add(new ItemWithDistances<Node>(childNode, childDistance, childMinDistance));
}
}
}
}
if(pendingQueue.isEmpty()) {
nextPendingMinDistance = Double.POSITIVE_INFINITY;
} else {
nextPendingMinDistance = pendingQueue.peek().minDistance;
}
}
finished = true;
}
private boolean prepareNextNearest() {
if(!nearestQueue.isEmpty()) {
ItemWithDistances<Entry> nextNearest = nearestQueue.peek();
if(nextNearest.distance <= nextPendingMinDistance) {
nearestQueue.poll();
nextResultItem = new ResultItem(nextNearest.item.data, nextNearest.distance);
++yieldedCount;
return true;
}
}
return false;
}
}
private Query(DATA data, double range, int limit) {
this.data = data;
this.range = range;
this.limit = limit;
}
@Override
public Iterator<ResultItem> iterator() {
return new ResultsIterator();
}
private DATA data;
private double range;
private int limit;
}
/**
* The default minimum capacity of nodes in an M-Tree, when not specified in
* the constructor call.
*/
public static final int DEFAULT_MIN_NODE_CAPACITY = 50;
protected int minNodeCapacity;
protected int maxNodeCapacity;
protected DistanceFunction<? super DATA> distanceFunction;
protected SplitFunction<DATA> splitFunction;
protected Node root;
/**
* Constructs an M-Tree with the specified distance function.
* @param distanceFunction The object used to calculate the distance between
* two data objects.
*/
public MTree(DistanceFunction<? super DATA> distanceFunction,
SplitFunction<DATA> splitFunction) {
this(DEFAULT_MIN_NODE_CAPACITY, distanceFunction, splitFunction);
}
/**
* Constructs an M-Tree with the specified minimum node capacity and
* distance function.
* @param minNodeCapacity The minimum capacity for the nodes of the tree.
* @param distanceFunction The object used to calculate the distance between
* two data objects.
* @param splitFunction The object used to process the split of nodes if
* they are full when a new child must be added.
*/
public MTree(int minNodeCapacity,
DistanceFunction<? super DATA> distanceFunction,
SplitFunction<DATA> splitFunction) {
this(minNodeCapacity, 2 * minNodeCapacity - 1, distanceFunction, splitFunction);
}
/**
* Constructs an M-Tree with the specified minimum and maximum node
* capacities and distance function.
* @param minNodeCapacity The minimum capacity for the nodes of the tree.
* @param maxNodeCapacity The maximum capacity for the nodes of the tree.
* @param distanceFunction The object used to calculate the distance between
* two data objects.
* @param splitFunction The object used to process the split of nodes if
* they are full when a new child must be added.
*/
public MTree(int minNodeCapacity, int maxNodeCapacity,
DistanceFunction<? super DATA> distanceFunction,
SplitFunction<DATA> splitFunction)
{
if(minNodeCapacity < 2 || maxNodeCapacity <= minNodeCapacity ||
distanceFunction == null) {
throw new IllegalArgumentException();
}
if(splitFunction == null) {
splitFunction = new ComposedSplitFunction<DATA>(
new PromotionFunctions.RandomPromotion<DATA>(),
new PartitionFunctions.BalancedPartition<DATA>()
);
}
this.minNodeCapacity = minNodeCapacity;
this.maxNodeCapacity = maxNodeCapacity;
this.distanceFunction = distanceFunction;
this.splitFunction = splitFunction;
this.root = null;
}
/**
* Adds and indexes a data object.
*
* <p>An object that is already indexed should not be added. There is no
* validation regarding this, and the behavior is undefined if done.
*
* @param data The data object to index.
*/
public void add(DATA data) {
if(root == null) {
root = new RootLeafNode(data);
try {
root.addData(data, 0);
} catch (SplitNodeReplacement e) {
throw new RuntimeException("Should never happen!");
}
} else {
double distance = distanceFunction.calculate(data, root.data);
try {
root.addData(data, distance);
} catch(SplitNodeReplacement e) {
Node newRoot = new RootNode(data);
root = newRoot;
for(int i = 0; i < e.newNodes.length; i++) {
@SuppressWarnings("unchecked")
Node newNode = (Node) e.newNodes[i];
distance = distanceFunction.calculate(root.data, newNode.data);
root.addChild(newNode, distance);
}
}
}
}
/**
* Removes a data object from the M-Tree.
* @param data The data object to be removed.
* @return {@code true} if and only if the object was found.
*/
public boolean remove(DATA data) {
if(root == null) {
return false;
}
double distanceToRoot = distanceFunction.calculate(data, root.data);
try {
root.removeData(data, distanceToRoot);
} catch(RootNodeReplacement e) {
@SuppressWarnings("unchecked")
Node newRoot = (Node) e.newRoot;
root = newRoot;
} catch(DataNotFound e) {
return false;
} catch (NodeUnderCapacity e) {
throw new RuntimeException("Should have never happened", e);
}
return true;
}
/**
* Performs a nearest-neighbors query on the M-Tree, constrained by distance.
* @param queryData The query data object.
* @param range The maximum distance from {@code queryData} to fetched
* neighbors.
* @return A {@link Query} object used to iterate on the results.
*/
public Query getNearestByRange(DATA queryData, double range) {
return getNearest(queryData, range, Integer.MAX_VALUE);
}
/**
* Performs a nearest-neighbors query on the M-Tree, constrained by the
* number of neighbors.
* @param queryData The query data object.
* @param limit The maximum number of neighbors to fetch.
* @return A {@link Query} object used to iterate on the results.
*/
public Query getNearestByLimit(DATA queryData, int limit) {
return getNearest(queryData, Double.POSITIVE_INFINITY, limit);
}
/**
* Performs a nearest-neighbor query on the M-Tree, constrained by distance
* and/or the number of neighbors.
* @param queryData The query data object.
* @param range The maximum distance from {@code queryData} to fetched
* neighbors.
* @param limit The maximum number of neighbors to fetch.
* @return A {@link Query} object used to iterate on the results.
*/
public Query getNearest(DATA queryData, double range, int limit) {
return new Query(queryData, range, limit);
}
/**
* Performs a nearest-neighbor query on the M-Tree, without constraints.
* @param queryData The query data object.
* @return A {@link Query} object used to iterate on the results.
*/
public Query getNearest(DATA queryData) {
return new Query(queryData, Double.POSITIVE_INFINITY, Integer.MAX_VALUE);
}
protected void _check() {
if(root != null) {
root._check();
}
}
private class IndexItem {
DATA data;
protected double radius;
double distanceToParent;
private IndexItem(DATA data) {
this.data = data;
this.radius = 0;
this.distanceToParent = -1;
}
int _check() {
_checkRadius();
_checkDistanceToParent();
return 1;
}
private void _checkRadius() {
assert radius >= 0;
}
protected void _checkDistanceToParent() {
assert !(this instanceof MTree.RootLeafNode);
assert !(this instanceof MTree.RootNode);
assert distanceToParent >= 0;
}
}
private abstract class Node extends IndexItem {
protected Map<DATA, IndexItem> children = new HashMap<DATA, IndexItem>();
protected Rootness rootness;
protected Leafness<DATA> leafness;
private
<R extends NodeTrait & Rootness, L extends NodeTrait & Leafness<DATA>>
Node(DATA data, R rootness, L leafness) {
super(data);
rootness.thisNode = this;
this.rootness = rootness;
leafness.thisNode = this;
this.leafness = leafness;
}
private final void addData(DATA data, double distance) throws SplitNodeReplacement {
doAddData(data, distance);
checkMaxCapacity();
}
int _check() {
super._check();
_checkMinCapacity();
_checkMaxCapacity();
int childHeight = -1;
for(Map.Entry<DATA, IndexItem> e : children.entrySet()) {
DATA data = e.getKey();
IndexItem child = e.getValue();
assert child.data.equals(data);
_checkChildClass(child);
_checkChildMetrics(child);
int height = child._check();
if(childHeight < 0) {
childHeight = height;
} else {
assert childHeight == height;
}
}
return childHeight + 1;
}
protected void doAddData(DATA data, double distance) {
leafness.doAddData(data, distance);
}
protected void doRemoveData(DATA data, double distance) throws DataNotFound {
leafness.doRemoveData(data, distance);
}
private final void checkMaxCapacity() throws SplitNodeReplacement {
if(children.size() > MTree.this.maxNodeCapacity) {
DistanceFunction<? super DATA> cachedDistanceFunction = DistanceFunctions.cached(MTree.this.distanceFunction);
SplitResult<DATA> splitResult = MTree.this.splitFunction.process(children.keySet(), cachedDistanceFunction);
Node newNode0 = null;
Node newNode1 = null;
for(int i = 0; i < 2; ++i) {
DATA promotedData = splitResult.promoted.get(i);
Set<DATA> partition = splitResult.partitions.get(i);
Node newNode = newSplitNodeReplacement(promotedData);
for(DATA data : partition) {
IndexItem child = children.get(data);
children.remove(data);
double distance = cachedDistanceFunction.calculate(promotedData, data);
newNode.addChild(child, distance);
}
if(i == 0) {
newNode0 = newNode;
} else {
newNode1 = newNode;
}
}
assert children.isEmpty();
throw new SplitNodeReplacement(newNode0, newNode1);
}
}
protected Node newSplitNodeReplacement(DATA data) {
return leafness.newSplitNodeReplacement(data);
}
protected void addChild(IndexItem child, double distance) {
leafness.addChild(child, distance);
}
void removeData(DATA data, double distance) throws RootNodeReplacement, NodeUnderCapacity, DataNotFound {
doRemoveData(data, distance);
if(children.size() < getMinCapacity()) {
throw new NodeUnderCapacity();
}
}
protected int getMinCapacity() {
return rootness.getMinCapacity();
}
private void updateMetrics(IndexItem child, double distance) {
child.distanceToParent = distance;
updateRadius(child);
}
private void updateRadius(IndexItem child) {
if (child != null) // ### added by mits
this.radius = Math.max(this.radius, child.distanceToParent + child.radius);
}
void _checkMinCapacity() {
rootness._checkMinCapacity();
}
private void _checkMaxCapacity() {
assert children.size() <= MTree.this.maxNodeCapacity;
}
private void _checkChildClass(IndexItem child) {
leafness._checkChildClass(child);
}
private void _checkChildMetrics(IndexItem child) {
double dist = MTree.this.distanceFunction.calculate(child.data, this.data);
assert child.distanceToParent == dist;
double sum = child.distanceToParent + child.radius;
assert sum <= this.radius;
}
protected void _checkDistanceToParent() {
rootness._checkDistanceToParent();
}
private MTree<DATA> mtree() {
return MTree.this;
}
}
private abstract class NodeTrait {
protected Node thisNode;
}
private interface Leafness<DATA> {
void doAddData(DATA data, double distance);
void addChild(MTree<DATA>.IndexItem child, double distance);
void doRemoveData(DATA data, double distance) throws DataNotFound;
MTree<DATA>.Node newSplitNodeReplacement(DATA data);
void _checkChildClass(MTree<DATA>.IndexItem child);
}
private interface Rootness {
int getMinCapacity();
void _checkDistanceToParent();
void _checkMinCapacity();
}
private class RootNodeTrait extends NodeTrait implements Rootness {
@Override
public int getMinCapacity() {
throw new RuntimeException("Should not be called!");
}
@Override
public void _checkDistanceToParent() {
assert thisNode.distanceToParent == -1;
}
@Override
public void _checkMinCapacity() {
thisNode._checkMinCapacity();
}
};
private class NonRootNodeTrait extends NodeTrait implements Rootness {
@Override
public int getMinCapacity() {
return MTree.this.minNodeCapacity;
}
@Override
public void _checkMinCapacity() {
assert thisNode.children.size() >= thisNode.mtree().minNodeCapacity;
}
@Override
public void _checkDistanceToParent() {
assert thisNode.distanceToParent >= 0;
}
};
private class LeafNodeTrait extends NodeTrait implements Leafness<DATA> {
public void doAddData(DATA data, double distance) {
Entry entry = thisNode.mtree().new Entry(data);
assert !thisNode.children.containsKey(data);
thisNode.children.put(data, entry);
assert thisNode.children.containsKey(data);
thisNode.updateMetrics(entry, distance);
}
public void addChild(IndexItem child, double distance) {
assert !thisNode.children.containsKey(child.data);
thisNode.children.put(child.data, child);
assert thisNode.children.containsKey(child.data);
thisNode.updateMetrics(child, distance);
}
public Node newSplitNodeReplacement(DATA data) {
return thisNode.mtree().new LeafNode(data);
}
@Override
public void doRemoveData(DATA data, double distance) throws DataNotFound {
if(thisNode.children.remove(data) == null) {
throw new DataNotFound();
}
}
public void _checkChildClass(IndexItem child) {
assert child instanceof MTree.Entry;
}
}
class NonLeafNodeTrait extends NodeTrait implements Leafness<DATA> {
public void doAddData(DATA data, double distance) {
class CandidateChild {
Node node;
double distance;
double metric;
private CandidateChild(Node node, double distance, double metric) {
this.node = node;
this.distance = distance;
this.metric = metric;
}
}
CandidateChild minRadiusIncreaseNeeded = new CandidateChild(null, -1.0, Double.POSITIVE_INFINITY);
CandidateChild nearestDistance = new CandidateChild(null, -1.0, Double.POSITIVE_INFINITY);
for(IndexItem item : thisNode.children.values()) {
@SuppressWarnings("unchecked")
Node child = (Node)item;
double childDistance = thisNode.mtree().distanceFunction.calculate(child.data, data);
if(childDistance > child.radius) {
double radiusIncrease = childDistance - child.radius;
if(radiusIncrease < minRadiusIncreaseNeeded.metric) {
minRadiusIncreaseNeeded = new CandidateChild(child, childDistance, radiusIncrease);
}
} else {
if(childDistance < nearestDistance.metric) {
nearestDistance = new CandidateChild(child, childDistance, childDistance);
}
}
}
CandidateChild chosen = (nearestDistance.node != null)
? nearestDistance
: minRadiusIncreaseNeeded;
Node child = chosen.node;
try {
child.addData(data, chosen.distance);
thisNode.updateRadius(child);
} catch(SplitNodeReplacement e) {
// Replace current child with new nodes
IndexItem _ = thisNode.children.remove(child.data);
assert _ != null;
for(int i = 0; i < e.newNodes.length; ++i) {
@SuppressWarnings("unchecked")
Node newChild = (Node) e.newNodes[i];
distance = thisNode.mtree().distanceFunction.calculate(thisNode.data, newChild.data);
thisNode.addChild(newChild, distance);
}
}
}
public void addChild(IndexItem newChild_, double distance) {
@SuppressWarnings("unchecked")
Node newChild = (Node) newChild_;
class ChildWithDistance {
Node child;
double distance;
private ChildWithDistance(Node child, double distance) {
this.child = child;
this.distance = distance;
}
}
Deque<ChildWithDistance> newChildren = new ArrayDeque<ChildWithDistance>();
newChildren.addFirst(new ChildWithDistance(newChild, distance));
while(!newChildren.isEmpty()) {
ChildWithDistance cwd = newChildren.removeFirst();
newChild = cwd.child;
distance = cwd.distance;
if(thisNode.children.containsKey(newChild.data)) {
@SuppressWarnings("unchecked")
Node existingChild = (Node) thisNode.children.get(newChild.data);
assert existingChild.data.equals(newChild.data);
// Transfer the _children_ of the newChild to the existingChild
for(IndexItem grandchild : newChild.children.values()) {
existingChild.addChild(grandchild, grandchild.distanceToParent);
}
newChild.children.clear();
try {
existingChild.checkMaxCapacity();
} catch(SplitNodeReplacement e) {
IndexItem _ = thisNode.children.remove(existingChild.data);
assert _ != null;
for(int i = 0; i < e.newNodes.length; ++i) {
@SuppressWarnings("unchecked")
Node newNode = (Node) e.newNodes[i];
distance = thisNode.mtree().distanceFunction.calculate(thisNode.data, newNode.data);
newChildren.addFirst(new ChildWithDistance(newNode, distance));
}
}
} else {
thisNode.children.put(newChild.data, newChild);
thisNode.updateMetrics(newChild, distance);
}
}
}
public Node newSplitNodeReplacement(DATA data) {
return new InternalNode(data);
}
public void doRemoveData(DATA data, double distance) throws DataNotFound {
for(IndexItem childItem : thisNode.children.values()) {
@SuppressWarnings("unchecked")
Node child = (Node)childItem;
if(Math.abs(distance - child.distanceToParent) <= child.radius) {
double distanceToChild = thisNode.mtree().distanceFunction.calculate(data, child.data);
if(distanceToChild <= child.radius) {
try {
child.removeData(data, distanceToChild);
thisNode.updateRadius(child);
return;
} catch(DataNotFound e) {
// If DataNotFound was thrown, then the data was not found in the child
} catch(NodeUnderCapacity e) {
Node expandedChild = balanceChildren(child);
thisNode.updateRadius(expandedChild);
return;
} catch (RootNodeReplacement e) {
throw new RuntimeException("Should never happen!");
}
}
}
}
throw new DataNotFound();
}
private Node balanceChildren(Node theChild) {
// Tries to find anotherChild which can donate a grand-child to theChild.
Node nearestDonor = null;
double distanceNearestDonor = Double.POSITIVE_INFINITY;
Node nearestMergeCandidate = null;
double distanceNearestMergeCandidate = Double.POSITIVE_INFINITY;
for(IndexItem child : thisNode.children.values()) {
@SuppressWarnings("unchecked")
Node anotherChild = (Node)child;
if(anotherChild == theChild) continue;
double distance = thisNode.mtree().distanceFunction.calculate(theChild.data, anotherChild.data);
if(anotherChild.children.size() > anotherChild.getMinCapacity()) {
if(distance < distanceNearestDonor) {
distanceNearestDonor = distance;
nearestDonor = anotherChild;
}
} else {
if(distance < distanceNearestMergeCandidate) {
distanceNearestMergeCandidate = distance;
nearestMergeCandidate = anotherChild;
}
}
}
if(nearestDonor == null) {
// Merge
for(IndexItem grandchild : theChild.children.values()) {
//if (thisNode == null) System.out.println("thisNode is null");
//if (thisNode.mtree() == null) System.out.println("thisNode.mtree() is null");
//if (grandchild == null) System.out.println("grandchild is null");
//if (grandchild.data == null) System.out.println("grandchild.data is null");
//if (nearestMergeCandidate == null) System.out.println("nearestMergeCandidate is null");
//if (nearestMergeCandidate.data == null) System.out.println("nearestMergeCandidate.data is null");
if (nearestMergeCandidate != null) { // ### added by mits
double distance = thisNode.mtree().distanceFunction.calculate(grandchild.data, nearestMergeCandidate.data);
nearestMergeCandidate.addChild(grandchild, distance);
}
}
IndexItem removed = thisNode.children.remove(theChild.data);
assert removed != null;
return nearestMergeCandidate;
} else {
// Donate
// Look for the nearest grandchild
IndexItem nearestGrandchild = null;
double nearestGrandchildDistance = Double.POSITIVE_INFINITY;
for(IndexItem grandchild : nearestDonor.children.values()) {
double distance = thisNode.mtree().distanceFunction.calculate(grandchild.data, theChild.data);
if(distance < nearestGrandchildDistance) {
nearestGrandchildDistance = distance;
nearestGrandchild = grandchild;
}
}
IndexItem _ = nearestDonor.children.remove(nearestGrandchild.data);
assert _ != null;
theChild.addChild(nearestGrandchild, nearestGrandchildDistance);
return theChild;
}
}
public void _checkChildClass(IndexItem child) {
assert child instanceof MTree.InternalNode
|| child instanceof MTree.LeafNode;
}
}
private class RootLeafNode extends Node {
private RootLeafNode(DATA data) {
super(data, new RootNodeTrait(), new LeafNodeTrait());
}
void removeData(DATA data, double distance) throws RootNodeReplacement, DataNotFound {
try {
super.removeData(data, distance);
} catch (NodeUnderCapacity e) {
assert children.isEmpty();
throw new RootNodeReplacement(null);
}
}
protected int getMinCapacity() {
return 1;
}
void _checkMinCapacity() {
assert children.size() >= 1;
}
}
private class RootNode extends Node {
private RootNode(DATA data) {
super(data, new RootNodeTrait(), new NonLeafNodeTrait());
}
void removeData(DATA data, double distance) throws RootNodeReplacement, NodeUnderCapacity, DataNotFound {
try {
super.removeData(data, distance);
} catch(NodeUnderCapacity e) {
// Promote the only child to root
@SuppressWarnings("unchecked")
Node theChild = (Node)(children.values().iterator().next());
Node newRoot;
if(theChild instanceof MTree.InternalNode) {
newRoot = new RootNode(theChild.data);
} else {
assert theChild instanceof MTree.LeafNode;
newRoot = new RootLeafNode(theChild.data);
}
for(IndexItem grandchild : theChild.children.values()) {
distance = MTree.this.distanceFunction.calculate(newRoot.data, grandchild.data);
newRoot.addChild(grandchild, distance);
}
theChild.children.clear();
throw new RootNodeReplacement(newRoot);
}
}
@Override
protected int getMinCapacity() {
return 2;
}
@Override
void _checkMinCapacity() {
assert children.size() >= 2;
}
}
private class InternalNode extends Node {
private InternalNode(DATA data) {
super(data, new NonRootNodeTrait(), new NonLeafNodeTrait());
}
};
private class LeafNode extends Node {
public LeafNode(DATA data) {
super(data, new NonRootNodeTrait(), new LeafNodeTrait());
}
}
private class Entry extends IndexItem {
private Entry(DATA data) {
super(data);
}
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree.utils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
/**
* Some utilities.
*/
public final class Utils {
/**
* Don't let anyone instantiate this class.
*/
private Utils() {}
/**
* Identifies the minimum and maximum elements from an iterable, according
* to the natural ordering of the elements.
* @param items An {@link Iterable} object with the elements
* @param <T> The type of the elements.
* @return A pair with the minimum and maximum elements.
*/
public static <T extends Comparable<T>> Pair<T> minMax(Iterable<T> items) {
Iterator<T> iterator = items.iterator();
if(!iterator.hasNext()) {
return null;
}
T min = iterator.next();
T max = min;
while(iterator.hasNext()) {
T item = iterator.next();
if(item.compareTo(min) < 0) {
min = item;
}
if(item.compareTo(max) > 0) {
max = item;
}
}
return new Pair<T>(min, max);
}
/**
* Randomly chooses elements from the collection.
* @param collection The collection.
* @param n The number of elements to choose.
* @param <T> The type of the elements.
* @return A list with the chosen elements.
*/
public static <T> List<T> randomSample(Collection<T> collection, int n) {
List<T> list = new ArrayList<T>(collection);
List<T> sample = new ArrayList<T>(n);
Random random = new Random();
while(n > 0 && !list.isEmpty()) {
int index = random.nextInt(list.size());
sample.add(list.get(index));
int indexLast = list.size() - 1;
T last = list.remove(indexLast);
if(index < indexLast) {
list.set(index, last);
}
n--;
}
return sample;
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree.utils;
/**
* A pair of objects of the same type.
*
* @param <T> The type of the objects.
*/
public class Pair<T> {
/**
* The first object.
*/
public T first;
/**
* The second object.
*/
public T second;
/**
* Creates a pair of {@code null} objects.
*/
public Pair() {}
/**
* Creates a pair with the objects specified in the arguments.
* @param first The first object.
* @param second The second object.
*/
public Pair(T first, T second) {
this.first = first;
this.second = second;
}
/**
* Accesses an object by its index. The {@link #first} object has index
* {@code 0} and the {@link #second} object has index {@code 1}.
* @param index The index of the object to be accessed.
* @return The {@link #first} object if {@code index} is {@code 0}; the
* {@link #second} object if {@code index} is {@code 1}.
* @throws IllegalArgumentException If {@code index} is neither {@code 0}
* or {@code 1}.
*/
public T get(int index) throws IllegalArgumentException {
switch(index) {
case 0: return first;
case 1: return second;
default: throw new IllegalArgumentException();
}
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
/**
* Defines an object to be used to split a node in an M-Tree. A node must be
* split when it has reached its maximum capacity and a new child node would be
* added to it.
*
* <p>The splitting consists in choosing a pair of "promoted" data objects from
* the children and then partition the set of children in two partitions
* corresponding to the two promoted data objects.
*
* @param <DATA> The type of the data objects.
*/
public interface SplitFunction<DATA> {
/**
* Processes the splitting of a node.
*
* @param dataSet A set of data that are keys to the children of the node
* to be split.
* @param distanceFunction A {@linkplain DistanceFunction distance function}
* that can be used to help splitting the node.
* @return A {@link SplitResult} object with a pair of promoted data objects
* and a pair of corresponding partitions of the data objects.
*/
SplitResult<DATA> process(Set<DATA> dataSet, DistanceFunction<? super DATA> distanceFunction);
/**
* An object used as the result for the
* {@link SplitFunction#process(Set, DistanceFunction)} method.
*
* @param <DATA> The type of the data objects.
*/
public static class SplitResult<DATA> {
/**
* A pair of promoted data objects.
*/
public Pair<DATA> promoted;
/**
* A pair of partitions corresponding to the {@code promoted} data
* objects.
*/
public Pair<Set<DATA>> partitions;
/**
* The constructor for a {@link SplitResult} object.
*/
public SplitResult(Pair<DATA> promoted, Pair<Set<DATA>> partitions) {
this.promoted = promoted;
this.partitions = partitions;
}
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
/**
* An object that chooses a pair from a set of data objects.
*
* @param <DATA> The type of the data objects.
*/
public interface PromotionFunction<DATA> {
/**
* Chooses (promotes) a pair of objects according to some criteria that is
* suitable for the application using the M-Tree.
*
* @param dataSet The set of objects to choose a pair from.
* @param distanceFunction A function that can be used for choosing the
* promoted objects.
* @return A pair of chosen objects.
*/
Pair<DATA> process(Set<DATA> dataSet, DistanceFunction<? super DATA> distanceFunction);
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.List;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
import moa.clusterers.outliers.utils.mtree.utils.Utils;
/**
* Some pre-defined implementations of {@linkplain PromotionFunction promotion
* functions}.
*/
public final class PromotionFunctions {
/**
* Don't let anyone instantiate this class.
*/
private PromotionFunctions() {}
/**
* A {@linkplain PromotionFunction promotion function} object that randomly
* chooses ("promotes") two data objects.
*
* @param <DATA> The type of the data objects.
*/
public static class RandomPromotion<DATA> implements PromotionFunction<DATA> {
@Override
public Pair<DATA> process(Set<DATA> dataSet,
DistanceFunction<? super DATA> distanceFunction)
{
List<DATA> promotedList = Utils.randomSample(dataSet, 2);
return new Pair<DATA>(promotedList.get(0), promotedList.get(1));
}
}
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Some pre-defined implementations of {@linkplain DistanceFunction distance
* functions}.
*/
public final class DistanceFunctions {
/**
* Don't let anyone instantiate this class.
*/
private DistanceFunctions() {}
/**
* Creates a cached version of a {@linkplain DistanceFunction distance
* function}. This method is used internally by {@link MTree} to create
* a cached distance function to pass to the {@linkplain SplitFunction split
* function}.
* @param distanceFunction The distance function to create a cached version
* of.
* @return The cached distance function.
*/
public static <Data> DistanceFunction<Data> cached(final DistanceFunction<Data> distanceFunction) {
return new DistanceFunction<Data>() {
class Pair {
Data data1;
Data data2;
public Pair(Data data1, Data data2) {
this.data1 = data1;
this.data2 = data2;
}
@Override
public int hashCode() {
return data1.hashCode() ^ data2.hashCode();
}
@Override
public boolean equals(Object arg0) {
if(arg0 instanceof Pair) {
Pair that = (Pair) arg0;
return this.data1.equals(that.data1)
&& this.data2.equals(that.data2);
} else {
return false;
}
}
}
private final Map<Pair, Double> cache = new HashMap<Pair, Double>();
@Override
public double calculate(Data data1, Data data2) {
Pair pair1 = new Pair(data1, data2);
Double distance = cache.get(pair1);
if(distance != null) {
return distance;
}
Pair pair2 = new Pair(data2, data1);
distance = cache.get(pair2);
if(distance != null) {
return distance;
}
distance = distanceFunction.calculate(data1, data2);
cache.put(pair1, distance);
cache.put(pair2, distance);
return distance;
}
};
}
/**
* An interface to represent coordinates in Euclidean spaces.
* @see <a href="http://en.wikipedia.org/wiki/Euclidean_space">"Euclidean
* Space" article at Wikipedia</a>
*/
public interface EuclideanCoordinate {
/**
* The number of dimensions.
*/
int dimensions();
/**
* A method to access the {@code index}-th component of the coordinate.
*
* @param index The index of the component. Must be less than {@link
* #dimensions()}.
*/
double get(int index);
}
/**
* Calculates the distance between two {@linkplain EuclideanCoordinate
* euclidean coordinates}.
*/
public static double euclidean(EuclideanCoordinate coord1, EuclideanCoordinate coord2) {
int size = Math.min(coord1.dimensions(), coord2.dimensions());
double distance = 0;
for(int i = 0; i < size; i++) {
double diff = coord1.get(i) - coord2.get(i);
distance += diff * diff;
}
distance = Math.sqrt(distance);
return distance;
}
/**
* A {@linkplain DistanceFunction distance function} object that calculates
* the distance between two {@linkplain EuclideanCoordinate euclidean
* coordinates}.
*/
public static final DistanceFunction<EuclideanCoordinate> EUCLIDEAN = new DistanceFunction<DistanceFunctions.EuclideanCoordinate>() {
@Override
public double calculate(EuclideanCoordinate coord1, EuclideanCoordinate coord2) {
return DistanceFunctions.euclidean(coord1, coord2);
}
};
/**
* A {@linkplain DistanceFunction distance function} object that calculates
* the distance between two coordinates represented by {@linkplain
* java.util.List lists} of {@link java.lang.Integer}s.
*/
public static final DistanceFunction<List<Integer>> EUCLIDEAN_INTEGER_LIST = new DistanceFunction<List<Integer>>() {
@Override
public double calculate(List<Integer> data1, List<Integer> data2) {
class IntegerListEuclideanCoordinate implements EuclideanCoordinate {
List<Integer> list;
public IntegerListEuclideanCoordinate(List<Integer> list) { this.list = list; }
@Override public int dimensions() { return list.size(); }
@Override public double get(int index) { return list.get(index); }
};
IntegerListEuclideanCoordinate coord1 = new IntegerListEuclideanCoordinate(data1);
IntegerListEuclideanCoordinate coord2 = new IntegerListEuclideanCoordinate(data2);
return DistanceFunctions.euclidean(coord1, coord2);
}
};
/**
* A {@linkplain DistanceFunction distance function} object that calculates
* the distance between two coordinates represented by {@linkplain
* java.util.List lists} of {@link java.lang.Double}s.
*/
public static final DistanceFunction<List<Double>> EUCLIDEAN_DOUBLE_LIST = new DistanceFunction<List<Double>>() {
@Override
public double calculate(List<Double> data1, List<Double> data2) {
class DoubleListEuclideanCoordinate implements EuclideanCoordinate {
List<Double> list;
public DoubleListEuclideanCoordinate(List<Double> list) { this.list = list; }
@Override public int dimensions() { return list.size(); }
@Override public double get(int index) { return list.get(index); }
};
DoubleListEuclideanCoordinate coord1 = new DoubleListEuclideanCoordinate(data1);
DoubleListEuclideanCoordinate coord2 = new DoubleListEuclideanCoordinate(data2);
return DistanceFunctions.euclidean(coord1, coord2);
}
};
}
| Java |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers.outliers.utils.mtree;
/**
* An object that can calculate the distance between two data objects.
*
* @param <DATA> The type of the data objects.
*/
public interface DistanceFunction<DATA> {
double calculate(DATA data1, DATA data2);
}
| Java |
/*
* MyBaseOutlierDetector.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers;
import java.io.File;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.Vector;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
import moa.cluster.SphereCluster;
import moa.clusterers.AbstractClusterer;
import moa.core.Measurement;
import moa.options.IntOption;
import weka.core.Instance;
public abstract class MyBaseOutlierDetector extends AbstractClusterer {
public static class Outlier implements Comparable<Outlier> {
public long id;
public Instance inst;
public Object obj;
public Outlier(Instance inst, long id, Object obj) {
this.id = id;
this.inst = inst;
this.obj = obj;
}
@Override
public int compareTo(Outlier o) {
if (this.id > o.id)
return 1;
else if (this.id < o.id)
return -1;
else
return 0;
}
@Override
public boolean equals(Object o) {
return (this.id == ((Outlier) o).id);
}
}
public static abstract class OutlierNotifier {
public void OnOutlier(Outlier outlier) {
throw new UnsupportedOperationException("Not yet implemented");
}
public void OnInlier(Outlier outlier) {
throw new UnsupportedOperationException("Not yet implemented");
}
}
public IntOption windowSizeOption = new IntOption("windowSize", 'w', "Size of the window.", 1000);
public OutlierNotifier outlierNotifier = null;
protected Random random;
protected int iMaxMemUsage = 0;
protected int nRangeQueriesExecuted = 0;
protected Long nTotalRunTime = 0L;
protected double nTimePerObj;
private Clustering myClusters = null;
private TreeSet<Outlier> outliersFound;
private Long m_timePreObjSum;
private int nProcessed;
private static final int m_timePreObjInterval = 100;
protected void UpdateMaxMemUsage() {
int x = GetMemoryUsage();
if (iMaxMemUsage < x) iMaxMemUsage = x;
}
public double getTimePerObj() {
return nTimePerObj;
}
public String getObjectInfo(Object obj) {
throw new UnsupportedOperationException("Not yet implemented");
}
public String getStatistics() {
throw new UnsupportedOperationException("Not yet implemented");
}
public double[] getInstanceValues(Instance inst) {
double[] values = new double[inst.numValues() - 1]; // last attribute is the class
for (int i = 0; i < inst.numValues() - 1; i++) {
values[i] = inst.value(i);
}
return values;
}
public void PrintInstance(Instance inst) {
Print("instance: [ ");
for (int i = 0; i < inst.numValues() - 1; i++) { // last value is the class
Printf("%.2f ", inst.value(i));
}
Print("] ");
Println("");
}
@Override
public void resetLearningImpl() {
Init();
}
protected void Init() {
random = new Random(System.currentTimeMillis());
outliersFound = new TreeSet<Outlier>();
m_timePreObjSum = 0L;
nProcessed = 0;
nTimePerObj = 0L;
StdPrintMsg printer = new StdPrintMsg();
printer.RedirectToDisplay();
//timePerObjInfo = new MyTimePerObjInfo();
//timePerObjInfo.Init();
SetUserInfo(true, false, printer, 1000);
}
@Override
public void trainOnInstanceImpl(Instance inst) {
processNewInstanceImpl(inst);
}
public void processNewInstanceImpl(Instance inst) {
Long nsNow = System.nanoTime();
ProcessNewStreamObj(inst);
UpdateMaxMemUsage();
nTotalRunTime += (System.nanoTime() - nsNow) / (1024 * 1024);
// update process time per object
nProcessed++;
m_timePreObjSum += System.nanoTime() - nsNow;
if (nProcessed % m_timePreObjInterval == 0) {
nTimePerObj = ((double) m_timePreObjSum) / ((double) m_timePreObjInterval);
if (bShowProgress) ShowTimePerObj();
// init
m_timePreObjSum = 0L;
}
}
private void ShowTimePerObj() {
double ms = nTimePerObj / (1000.0 * 1000.0);
Println("Process time per object (ms): " + String.format("%.3f", ms));
}
protected void ProcessNewStreamObj(Instance inst) {
throw new UnsupportedOperationException("Not yet implemented");
}
public void PrintOutliers() {
Print("Outliers: ");
for (Outlier o : outliersFound) {
Printf("[%d] ", o.id);
}
Println("");
}
public Set<Outlier> GetOutliersFound() {
return outliersFound;
}
protected boolean IsNodeIdInWin(long id) {
throw new UnsupportedOperationException("Not yet implemented");
}
@Override
public Clustering getClusteringResult(){
myClusters = new Clustering();
for (Outlier o : outliersFound) {
if (IsNodeIdInWin(o.id)) {
double[] center = new double[o.inst.numValues() - 1];
for (int i = 0; i < o.inst.numValues() - 1; i++) {
center[i] = o.inst.value(i);
}
Cluster c = new SphereCluster(center, 0);
myClusters.add(c);
}
}
return myClusters;
}
public Vector<Outlier> getOutliersResult(){
Vector<Outlier> outliers = new Vector<Outlier>();
for (Outlier o : outliersFound) {
if (IsNodeIdInWin(o.id)) {
outliers.add(o);
}
}
return outliers;
}
protected void AddOutlier(Outlier newOutlier) {
boolean bNewAdd = outliersFound.add(newOutlier);
if ((outlierNotifier != null) && bNewAdd) {
outlierNotifier.OnOutlier(newOutlier);
}
}
protected boolean RemoveExpiredOutlier(Outlier outlier) {
boolean bFound = outliersFound.remove(outlier);
return bFound;
}
protected boolean RemoveOutlier(Outlier outlier) {
boolean bFound = outliersFound.remove(outlier);
if ((outlierNotifier != null) && bFound) {
outlierNotifier.OnInlier(outlier);
}
return bFound;
}
@Override
public boolean implementsMicroClusterer() {
return false;
}
@Override
public Clustering getMicroClusteringResult() {
return null;
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isRandomizable() {
return false;
}
@Override
public boolean keepClassLabel(){
return true;
}
@Override
public double[] getVotesForInstance(Instance inst) {
return null;
}
// show progress through object
protected ProgressInfo myProgressInfo;
protected PrintMsg myOut;
protected boolean bTrace = false;
protected boolean bShowProgress = false;
public boolean bStopAlgorithm = false;
// time measurement for progress
private Long _msPrev = 0L, _msNow = 0L;
public void SetShowProgress(boolean b) {
bShowProgress = b;
}
public void SetTrace(boolean b) {
bTrace = b;
}
public void SetProgressInterval(int iProgressInterval) {
myProgressInfo = new MyProgressInfo(iProgressInterval);
}
public void SetMessagePrinter(PrintMsg logPrinter) {
myOut = logPrinter;
}
public void SetUserInfo(
boolean bShowProgress,
boolean bTrace,
PrintMsg logPrinter,
int iProgressInterval)
{
this.bShowProgress = bShowProgress;
this.bTrace = bTrace;
myProgressInfo = new MyProgressInfo(iProgressInterval);
myOut = logPrinter;
}
public interface PrintMsg {
public void print(String s);
public void println(String s);
public void printf(String fmt, Object... args);
}
public interface ProgressInfo {
public int GetInterval();
public void ShowProgress(String sMsg);
}
public class StdPrintMsg implements PrintMsg {
private PrintStream printStream, fileStream;
public StdPrintMsg() {
RedirectToDisplay();
}
public StdPrintMsg(String sFilename) {
RedirectToFile(sFilename);
}
public void RedirectToDisplay() {
printStream = System.out;
}
public void RedirectToFile(String sFilename) {
File file = new File(sFilename);
try {
fileStream = new PrintStream(new FileOutputStream(file));
printStream = fileStream;
} catch (Exception ex) {}
}
public void RedirectToFile() {
printStream = fileStream;
}
@Override
public void println(String s) {
printStream.println(s);
}
@Override
public void print(String s) {
printStream.print(s);
}
@Override
public void printf(String fmt, Object... args) {
printStream.printf(fmt, args);
}
}
class MyProgressInfo implements ProgressInfo {
int progressInterval;
public MyProgressInfo(int interval) {
progressInterval = interval;
}
public void setProgressInterval(int progressInterval) {
this.progressInterval = progressInterval;
}
@Override
public int GetInterval() {
return progressInterval;
}
@Override
public void ShowProgress(String sMsg) {
myOut.println(sMsg);
}
}
protected void ShowProgress(String sMsg)
{
ShowProgress(sMsg, false);
}
protected void ShowProgress(String sMsg, boolean bShowAlways) {
if (bShowAlways || (_msNow - _msPrev >= myProgressInfo.GetInterval())) {
// call user progress function
myProgressInfo.ShowProgress(sMsg);
_msNow = System.currentTimeMillis();
_msPrev = _msNow;
} else {
_msNow = System.currentTimeMillis();
}
}
/*public class MyTimePerObjInfo {
Long nPrevObj, nsPrevObj;
Long _msTimerPrev = 0L, _msTimerNow = 0L;
public MyTimePerObjInfo() {
Init();
}
public void Init()
{
nPrevObj = 0L;
nsPrevObj = 0L;
}
private void _ShowTimePerObj(Long nObj) {
Double timePerObj = 0.0;
Long nsNow = System.nanoTime();
Long d = nObj - nPrevObj + 1;
if ((d > 0) && (nsPrevObj > 0)) {
timePerObj = (double)(nsNow - nsPrevObj) / (double)d;
}
nsPrevObj = nsNow;
nPrevObj = nObj;
// call user progress function
myProgressInfo.ShowProgress("Processing time per object (ms): " + String.format("%.3f", timePerObj / (1000.0 * 1000.0)));
}
public void ShowTimePerObj(Long nObj, boolean bShowAlways) {
if (bShowAlways || (_msTimerNow - _msTimerPrev >= myProgressInfo.GetInterval())) {
_ShowTimePerObj(nObj);
_msTimerNow = System.currentTimeMillis();
_msTimerPrev = _msTimerNow;
} else {
_msTimerNow = System.currentTimeMillis();
}
}
public void ShowTimePerObj(Long nObj)
{
ShowTimePerObj(nObj, false);
}
}*/
protected void Println(String s) {
if (myOut != null)
myOut.println(s);
}
protected void Print(String s) {
if (myOut != null)
myOut.print(s);
}
protected void Printf(String fmt, Object... args) {
if (myOut != null)
myOut.printf(fmt, args);
}
protected int GetMemoryUsage() {
int iMemory = (int) ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / (1024 * 1024));
return iMemory;
}
}
| Java |
/*
* SimpleCODBase.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.SimpleCOD;
import java.util.*;
import moa.clusterers.outliers.MyBaseOutlierDetector;
import moa.clusterers.outliers.SimpleCOD.ISBIndex.ISBNode;
public abstract class SimpleCODBase extends MyBaseOutlierDetector {
protected static class EventItem implements Comparable<EventItem> {
public ISBNode node;
public Long timeStamp;
public EventItem(ISBNode node, Long timeStamp) {
this.node = node;
this.timeStamp = timeStamp;
}
@Override
public int compareTo(EventItem t) {
if (this.timeStamp > t.timeStamp) {
return +1;
} else if (this.timeStamp < t.timeStamp) {
return -1;
} else {
if (this.node.id > t.node.id)
return +1;
else if (this.node.id < t.node.id)
return -1;
}
return 0;
}
}
protected static class EventQueue {
public TreeSet<EventItem> setEvents;
public EventQueue() {
setEvents = new TreeSet<EventItem>();
}
public void Insert(ISBNode node, Long expTime) {
setEvents.add(new EventItem(node, expTime));
}
public EventItem FindMin() {
if (setEvents.size() > 0) {
// events are sorted ascenting by expiration time
return setEvents.first();
}
return null;
}
public EventItem ExtractMin() {
EventItem e = FindMin();
if (e != null) {
setEvents.remove(e);
return e;
}
return null;
}
}
protected static final Long FIRST_OBJ_ID = 1L;
// object identifier increments with each new data stream object
protected Long objId;
// list used to find expired nodes
protected Vector<ISBNode> windowNodes;
protected EventQueue eventQueue;
// index of objects
protected ISBIndex ISB;
protected int m_WindowSize;
protected double m_radius;
protected int m_k;
public boolean bWarning = false;
// statistics
public int m_nBothInlierOutlier;
public int m_nOnlyInlier;
public int m_nOnlyOutlier;
@Override
public String getObjectInfo(Object obj) {
if (obj == null) return null;
ISBNode node = (ISBNode) obj;
ArrayList<String> infoTitle = new ArrayList<String>();
ArrayList<String> infoValue = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
// show node position
for (int i = 0; i < node.obj.dimensions(); i++) {
infoTitle.add("Dim" + (i+1));
infoValue.add(String.format("%.3f", node.obj.get(i)));
}
// show node properties
infoTitle.add("id");
infoValue.add(String.format("%d", node.id));
infoTitle.add("count_after");
infoValue.add(String.format("%d", node.count_after));
infoTitle.add("|nn_before|");
infoValue.add(String.format("%d", node.CountPrecNeighs(GetWindowStart())));
sb.append("<html>");
sb.append("<table>");
int i = 0;
while(i < infoTitle.size() && i < infoValue.size()){
sb.append("<tr><td><b>"+infoTitle.get(i)+":</b></td><td>"+infoValue.get(i)+"</td></tr>");
i++;
}
sb.append("</table>");
sb.append("</html>");
return sb.toString();
}
@Override
public String getStatistics() {
StringBuilder sb = new StringBuilder();
sb.append("Statistics:\n\n");
// get counters of expired nodes
int nBothInlierOutlier = m_nBothInlierOutlier;
int nOnlyInlier = m_nOnlyInlier;
int nOnlyOutlier = m_nOnlyOutlier;
// add counters of non expired nodes
for (ISBNode node : windowNodes) {
if ((node.nInlier > 0) && (node.nOutlier > 0))
nBothInlierOutlier++;
else if (node.nInlier > 0)
nOnlyInlier++;
else
nOnlyOutlier++;
}
int sum = nBothInlierOutlier + nOnlyInlier + nOnlyOutlier;
if (sum > 0) {
sb.append(String.format(" Nodes always inlier: %d (%.1f%%)\n", nOnlyInlier, (100 * nOnlyInlier) / (double)sum));
sb.append(String.format(" Nodes always outlier: %d (%.1f%%)\n", nOnlyOutlier, (100 * nOnlyOutlier) / (double)sum));
sb.append(String.format(" Nodes both inlier and outlier: %d (%.1f%%)\n", nBothInlierOutlier, (100 * nBothInlierOutlier) / (double)sum));
sb.append(" (Sum: " + sum + ")\n");
}
sb.append("\n Total range queries: " + nRangeQueriesExecuted + "\n");
sb.append(" Max memory usage: " + iMaxMemUsage + " MB\n");
sb.append(" Total process time: " + String.format("%.2f ms", nTotalRunTime / 1000.0) + "\n");
return sb.toString();
}
Long GetWindowEnd() {
return objId - 1;
}
Long GetWindowStart() {
Long x = GetWindowEnd() - m_WindowSize + 1;
if (x < FIRST_OBJ_ID)
x = FIRST_OBJ_ID;
return x;
}
boolean IsWinFull() {
return (GetWindowEnd() >= FIRST_OBJ_ID + m_WindowSize - 1);
}
Long GetExpirationTime(ISBNode node) {
return node.id + m_WindowSize;
}
void SaveOutlier(ISBNode node) {
AddOutlier(new Outlier(node.inst, node.id, node));
node.nOutlier++; // update statistics
}
void RemoveOutlier(ISBNode node) {
RemoveOutlier(new Outlier(node.inst, node.id, node));
node.nInlier++; // update statistics
}
@Override
protected boolean IsNodeIdInWin(long id) {
int toleranceStart = 1;
Long start = GetWindowStart() - toleranceStart;
if ( (start <= id) && (id <= GetWindowEnd()) )
return true;
else
return false;
}
void AddNode(ISBNode node) {
windowNodes.add(node);
}
void RemoveNode(ISBNode node) {
windowNodes.remove(node);
RemoveExpiredOutlier(new Outlier(node.inst, node.id, node)); // ### remove when expired?
// update statistics
UpdateStatistics(node);
}
void UpdateStatistics(ISBNode node) {
if ((node.nInlier > 0) && (node.nOutlier > 0))
m_nBothInlierOutlier++;
else if (node.nInlier > 0)
m_nOnlyInlier++;
else
m_nOnlyOutlier++;
}
ISBNode GetExpiredNode() {
if (windowNodes.size() <= 0)
return null;
// get oldest node
ISBNode node = windowNodes.get(0);
// check if node has expired
if (node.id < GetWindowStart()) {
return node;
}
return null;
}
double GetEuclideanDist(ISBNode n1, ISBNode n2)
{
double diff;
double sum = 0;
int d = n1.obj.dimensions();
for (int i = 0; i < d; i++) {
diff = n1.obj.get(i) - n2.obj.get(i);
sum += Math.pow(diff, 2);
}
return Math.sqrt(sum);
}
void PrintWindow() {
Println("Window [" + GetWindowStart() + "-" + GetWindowEnd() + "]: ");
ISBNode node;
for (int i = 0; i < windowNodes.size(); i++) {
node = windowNodes.get(i);
Print(" Node: ");
PrintNode(node);
}
}
void PrintNode(ISBNode n) {
Print("id=" + n.id + " (");
int dim = n.obj.dimensions();
for (int d = 0; d < dim; d++) {
Print(Double.toString(n.obj.get(d)));
if (d < dim - 1)
Print(", ");
}
Println(")");
}
public void PrintNodeSet(Set<ISBNode> set) {
for (ISBNode n : set) {
Print(n.id + " ");
}
Println("");
}
public void PrintNodeVector(Vector<ISBNode> vector) {
for (ISBNode n : vector) {
Print(n.id + " ");
}
Println("");
}
public void PrintNodeList(List<ISBNode> list) {
for (ISBNode n : list) {
Print(n.id + " ");
}
Println("");
}
public void PrintEventQueue() {
Println("event queue: ");
for (EventItem n : eventQueue.setEvents) {
Printf(" id=%d, exp=%d\n", n.node.id, n.timeStamp);
}
}
public void PrintISB() {
Print("PD: ");
for (ISBNode n : ISB.GetAllNodes()) {
Print(n.id + " ");
}
Println("");
}
}
| Java |
/*
* MyMTree.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.SimpleCOD;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.ComposedSplitFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions;
import moa.clusterers.outliers.utils.mtree.MTree;
import moa.clusterers.outliers.utils.mtree.PartitionFunctions;
import moa.clusterers.outliers.utils.mtree.PromotionFunction;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
import moa.clusterers.outliers.utils.mtree.utils.Utils;
class MyMTree extends MTree<StreamObj> {
private static final PromotionFunction<StreamObj> nonRandomPromotion = new PromotionFunction<StreamObj>() {
@Override
public Pair<StreamObj> process(Set<StreamObj> dataSet, DistanceFunction<? super StreamObj> distanceFunction) {
return Utils.minMax(dataSet);
}
};
MyMTree() {
super(2, DistanceFunctions.EUCLIDEAN,
new ComposedSplitFunction<StreamObj>(
nonRandomPromotion,
new PartitionFunctions.BalancedPartition<StreamObj>()));
}
public void add(StreamObj data) {
super.add(data);
_check();
}
public boolean remove(StreamObj data) {
boolean result = super.remove(data);
_check();
return result;
}
DistanceFunction<? super StreamObj> getDistanceFunction() {
return distanceFunction;
}
};
| Java |
/*
* StreamObj.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.SimpleCOD;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions.EuclideanCoordinate;
public class StreamObj implements EuclideanCoordinate, Comparable<StreamObj> {
private final double[] values;
private final int hashCode;
public StreamObj(double... values) {
this.values = values;
int h = 1;
for (double value : values) {
h = 31 * (int) h + (int) value;
}
this.hashCode = h;
}
@Override
public int dimensions() {
return values.length;
}
@Override
public double get(int index) {
return values[index];
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof StreamObj) {
StreamObj that = (StreamObj) obj;
if (this.dimensions() != that.dimensions()) {
return false;
}
for (int i = 0; i < this.dimensions(); i++) {
if (this.values[i] != that.values[i]) {
return false;
}
}
return true;
} else {
return false;
}
}
@Override
public int compareTo(StreamObj that) {
int dimensions = Math.min(this.dimensions(), that.dimensions());
for (int i = 0; i < dimensions; i++) {
double v1 = this.values[i];
double v2 = that.values[i];
if (v1 > v2) {
return +1;
}
if (v1 < v2) {
return -1;
}
}
if (this.dimensions() > dimensions) {
return +1;
}
if (that.dimensions() > dimensions) {
return -1;
}
return 0;
}
} | Java |
/*
* ISBIndex.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.SimpleCOD;
import java.util.*;
import weka.core.Instance;
public class ISBIndex {
public static class ISBNode implements Comparable<ISBNode> {
public Instance inst;
public StreamObj obj;
public Long id;
public boolean bOutlier;
public int count_after;
private ArrayList<ISBNode> nn_before;
// statistics
public int nOutlier;
public int nInlier;
public ISBNode(Instance inst, StreamObj obj, Long id) {
this.inst = inst;
this.obj = obj;
this.id = id;
this.bOutlier = false;
this.count_after = 1;
this.nn_before = new ArrayList<ISBNode>();
// init statistics
nOutlier = 0;
nInlier = 0;
}
@Override
public int compareTo(ISBNode t) {
if (this.id > t.id)
return +1;
else if (this.id < t.id)
return -1;
return 0;
}
public void AddPrecNeigh(ISBNode node) {
int pos = Collections.binarySearch(nn_before, node);
if (pos < 0) {
// item does not exist, so add it to the right position
nn_before.add(-(pos + 1), node);
}
}
public void RemovePrecNeigh(ISBNode node) {
int pos = Collections.binarySearch(nn_before, node);
if (pos >= 0) {
// item exists
nn_before.remove(pos);
}
}
public ISBNode GetMinPrecNeigh(Long sinceId) {
if (nn_before.size() > 0) {
int startPos;
ISBNode dummy = new ISBNode(null, null, sinceId);
int pos = Collections.binarySearch(nn_before, dummy);
if (pos < 0) {
// item does not exist, should insert at position startPos
startPos = -(pos + 1);
} else {
// item exists at startPos
startPos = pos;
}
if (startPos < nn_before.size()) {
return nn_before.get(startPos);
}
}
return null;
}
public int CountPrecNeighs(Long sinceId) {
if (nn_before.size() > 0) {
// get number of neighs with id >= sinceId
int startPos;
ISBNode dummy = new ISBNode(null, null, sinceId);
int pos = Collections.binarySearch(nn_before, dummy);
if (pos < 0) {
// item does not exist, should insert at position startPos
startPos = -(pos + 1);
} else {
// item exists at startPos
startPos = pos;
}
if (startPos < nn_before.size()) {
return nn_before.size() - startPos;
}
}
return 0;
}
public List<ISBNode> Get_nn_before() {
return nn_before;
}
}
MyMTree mtree;
Map<Integer, Set<ISBNode>> mapNodes;
double m_radius;
int m_k; // k nearest neighbors
public ISBIndex(double radius, int k) {
mtree = new MyMTree();
mapNodes = new HashMap<Integer, Set<ISBNode>>();
m_radius = radius;
m_k = k;
}
Vector<ISBNode> GetAllNodes() {
Vector<ISBNode> v = new Vector<ISBNode>();
Iterator it = mapNodes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry pairs = (Map.Entry) it.next();
Set<ISBNode> setNodes = (Set<ISBNode>) pairs.getValue();
for (ISBNode n : setNodes) {
v.add(n);
}
}
return v;
}
public static class ISBSearchResult {
public ISBNode node;
public double distance;
public ISBSearchResult(ISBNode n, double distance) {
this.node = n;
this.distance = distance;
}
}
public Vector<ISBSearchResult> RangeSearch(ISBNode node, double radius) {
Vector<ISBSearchResult> results = new Vector<ISBSearchResult>();
StreamObj obj;
double d;
MyMTree.Query query = mtree.getNearestByRange(node.obj, radius);
for (MyMTree.ResultItem q : query) {
// get next obj found within range
obj = q.data;
// get distance of obj from query
d = q.distance;
// get all nodes referencing obj
Vector<ISBNode> nodes = MapGetNodes(obj);
for (int i = 0; i < nodes.size(); i++)
results.add(new ISBSearchResult(nodes.get(i), d));
}
return results;
}
public void Insert(ISBNode node) {
// insert object of node at mtree
mtree.add(node.obj);
// insert node at map
MapInsert(node);
}
public void Remove(ISBNode node) {
// remove from map
MapDelete(node);
// check if stream object at mtree is still being referenced
if (MapCountObjRefs(node.obj) <= 0) {
// delete stream object from mtree
mtree.remove(node.obj);
}
}
Vector<ISBNode> MapGetNodes(StreamObj obj) {
int h = obj.hashCode();
Vector<ISBNode> v = new Vector<ISBNode>();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode node;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
node = i.next();
if (node.obj.equals(obj))
v.add(node);
}
}
return v;
}
int MapCountObjRefs(StreamObj obj) {
int h = obj.hashCode();
int iCount = 0;
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode n;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
n = i.next();
if (n.obj.equals(obj))
iCount++;
}
}
return iCount;
}
void MapInsert(ISBNode node) {
int h = node.obj.hashCode();
Set<ISBNode> s;
if (mapNodes.containsKey(h)) {
s = mapNodes.get(h);
s.add(node);
}
else {
s = new HashSet<ISBNode>();
s.add(node);
mapNodes.put(h, s);
}
}
void MapDelete(ISBNode node) {
int h = node.obj.hashCode();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
s.remove(node);
if (s.isEmpty()) { // ### added
mapNodes.remove(h);
}
}
}
}
| Java |
/*
* Test.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.SimpleCOD;
import moa.streams.ArffFileStream;
import moa.streams.clustering.RandomRBFGeneratorEvents;
import weka.core.Instance;
public class Test {
public static void main(String[] args) throws Exception
{
//if (true) return;
int numInstances = 30;
moa.streams.ArffFileStream stream = new ArffFileStream("./datasets/debug_1.txt", -1);
//RandomRBFGeneratorEvents stream = new RandomRBFGeneratorEvents();
stream.prepareForUse();
SimpleCOD myOutlierDetector= new SimpleCOD();
myOutlierDetector.kOption.setValue(3);
myOutlierDetector.radiusOption.setValue(5);
myOutlierDetector.windowSizeOption.setValue(6);
myOutlierDetector.setModelContext(stream.getHeader());
myOutlierDetector.prepareForUse();
Long tmStart = System.currentTimeMillis();
int numberSamples = 0;
int w = myOutlierDetector.windowSizeOption.getValue();
while (stream.hasMoreInstances() && (numberSamples < numInstances)) {
Instance newInst = stream.nextInstance();
myOutlierDetector.processNewInstanceImpl(newInst);
numberSamples++;
}
//myOutlierDetector.PrintOutliers();
System.out.println("Total time = " + (System.currentTimeMillis() - tmStart) + " ms");
}
}
| Java |
/*
* SimpleCOD.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.SimpleCOD;
import java.util.Vector;
import moa.clusterers.outliers.SimpleCOD.ISBIndex.ISBNode;
import moa.clusterers.outliers.SimpleCOD.ISBIndex.ISBSearchResult;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.Instance;
////The algorithm is described in
// M. Kontaki, A. Gounaris, A. N. Papadopoulos, K. Tsichlas, and Y. Manolopoulos.
//Continuous monitoring of distance-based outliers over data streams.
//In ICDE, pages 135–146, 2011.
public class SimpleCOD extends SimpleCODBase {
public FloatOption radiusOption = new FloatOption("radius", 'r', "Search radius.", 0.1);
public IntOption kOption = new IntOption("k", 't', "Parameter k.", 50);
public SimpleCOD()
{
// System.out.println("SimpleCOD: created");
}
@Override
public void Init() {
super.Init();
m_WindowSize = windowSizeOption.getValue();
m_radius = radiusOption.getValue();
m_k = kOption.getValue();
Println("Init SimpleCOD:");
Println(" window_size: " + m_WindowSize);
Println(" radius: " + m_radius);
Println(" k: " + m_k);
//bTrace = true;
//bWarning = true;
objId = FIRST_OBJ_ID; // init object identifier
// create nodes list of window
windowNodes = new Vector<ISBNode>();
// create ISB
ISB = new ISBIndex(m_radius, m_k);
// create event queue
eventQueue = new EventQueue();
// init statistics
m_nBothInlierOutlier = 0;
m_nOnlyInlier = 0;
m_nOnlyOutlier = 0;
}
void ProcessNewNode(ISBNode nodeNew, boolean bNewNode) {
if (bTrace) { Print("ProcessNewNode: "); PrintNode(nodeNew); }
if (bTrace) Println("Perform R range query");
nRangeQueriesExecuted++;
Vector<ISBSearchResult> resultNodes;
resultNodes = ISB.RangeSearch(nodeNew, m_radius);
for (ISBSearchResult sr : resultNodes) {
double distance = sr.distance;
ISBNode q = sr.node;
if ( (nodeNew != q) && (distance <= m_radius) ) {
if (bTrace) Println("nodeNew has neighbor q.id " + q.id);
nodeNew.AddPrecNeigh(q);
q.count_after++;
if (q.bOutlier) {
int count = q.CountPrecNeighs(GetWindowStart()) + q.count_after;
if (count >= m_k) {
if (bTrace) Println("q.id " + q.id + " is now an inlier");
q.bOutlier = false;
RemoveOutlier(q);
// insert q to event queue
ISBNode nodeMinExp = q.GetMinPrecNeigh(GetWindowStart());
AddToEventQueue(q, nodeMinExp);
}
}
}
}
if (bTrace) Println("Check if nodeNew is an inlier or outlier");
int count = nodeNew.CountPrecNeighs(GetWindowStart()) + nodeNew.count_after;
if (count >= m_k) {
if (bTrace) Println("nodeNew is an inlier");
nodeNew.bOutlier = false;
RemoveOutlier(nodeNew); // updates statistics
// insert nodeNew to event queue
ISBNode nodeMinExp = nodeNew.GetMinPrecNeigh(GetWindowStart());
AddToEventQueue(nodeNew, nodeMinExp);
} else {
if (bTrace) Println("nodeNew is an outlier");
nodeNew.bOutlier = true;
SaveOutlier(nodeNew);
}
}
void AddToEventQueue(ISBNode x, ISBNode nodeMinExp) {
if (bTrace) Println("AddToEventQueue x.id: " + x.id);
if (nodeMinExp != null) {
Long expTime = GetExpirationTime(nodeMinExp);
eventQueue.Insert(x, expTime);
if (bTrace) {
Print("x.nn_before: "); PrintNodeList(x.Get_nn_before());
Println("nodeMinExp: " + nodeMinExp.id + ", expTime = " + expTime);
PrintEventQueue();
}
} else {
if (bWarning) Println("AddToEventQueue: Cannot add x.id: " + x.id + " to event queue (nn_before is empty, count_after=" + x.count_after + ")");
}
}
void ProcessEventQueue(ISBNode nodeExpired) {
EventItem e = eventQueue.FindMin();
while ((e != null) && (e.timeStamp <= GetWindowEnd())) {
e = eventQueue.ExtractMin();
ISBNode x = e.node;
if (bTrace) Println("Process event queue: check node x: " + x.id);
// node x must be in window
if (IsNodeIdInWin(x.id)) {
// remove nodeExpired from x.nn_before
x.RemovePrecNeigh(nodeExpired);
// get amount of neighbors of x
int count = x.count_after + x.CountPrecNeighs(GetWindowStart());
if (count < m_k) {
if (bTrace) Println("x is an outlier");
x.bOutlier = true;
SaveOutlier(x);
} else {
if (bTrace) Println("x is an inlier, add to event queue");
x.bOutlier = false;
// get oldest preceding neighbor of x
ISBNode nodeMinExp = x.GetMinPrecNeigh(GetWindowStart());
// add x to event queue
AddToEventQueue(x, nodeMinExp);
}
} else {
if (bWarning) Println("Process event queue: node x.id: " + x.id + " has expired!");
}
e = eventQueue.FindMin();
}
}
void ProcessExpiredNode(ISBNode nodeExpired) {
if (nodeExpired != null) {
if (bTrace) Println("\nnodeExpired: " + nodeExpired.id);
ISB.Remove(nodeExpired); // remove nodeExpired from index
RemoveNode(nodeExpired);
ProcessEventQueue(nodeExpired);
}
}
@Override
protected void ProcessNewStreamObj(Instance inst)
{
if (bShowProgress) ShowProgress("Processed " + (objId-1) + " stream objects.");
// PrintInstance(inst);
double[] values = getInstanceValues(inst);
StreamObj obj = new StreamObj(values);
if (bTrace) Println("\n- - - - - - - - - - - -\n");
// create new ISB node
ISBNode nodeNew = new ISBNode(inst, obj, objId);
if (bTrace) { Print("New node: "); PrintNode(nodeNew); }
objId++; // update object identifier (slide window)
AddNode(nodeNew); // add nodeNew to window
if (bTrace) PrintWindow();
// insert new node to ISB index
ISB.Insert(nodeNew);
ProcessNewNode(nodeNew, true);
ProcessExpiredNode(GetExpiredNode());
if (bTrace) {
PrintOutliers();
PrintISB();
}
}
}
| Java |
/*
* MyMTree.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.AbstractC;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.ComposedSplitFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions;
import moa.clusterers.outliers.utils.mtree.MTree;
import moa.clusterers.outliers.utils.mtree.PartitionFunctions;
import moa.clusterers.outliers.utils.mtree.PromotionFunction;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
import moa.clusterers.outliers.utils.mtree.utils.Utils;
class MyMTree extends MTree<StreamObj> {
private static final PromotionFunction<StreamObj> nonRandomPromotion = new PromotionFunction<StreamObj>() {
@Override
public Pair<StreamObj> process(Set<StreamObj> dataSet, DistanceFunction<? super StreamObj> distanceFunction) {
return Utils.minMax(dataSet);
}
};
MyMTree() {
super(2, DistanceFunctions.EUCLIDEAN,
new ComposedSplitFunction<StreamObj>(
nonRandomPromotion,
new PartitionFunctions.BalancedPartition<StreamObj>()));
}
public void add(StreamObj data) {
super.add(data);
_check();
}
public boolean remove(StreamObj data) {
boolean result = super.remove(data);
_check();
return result;
}
DistanceFunction<? super StreamObj> getDistanceFunction() {
return distanceFunction;
}
};
| Java |
/*
* StreamObj.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.AbstractC;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions.EuclideanCoordinate;
public class StreamObj implements EuclideanCoordinate, Comparable<StreamObj> {
private final double[] values;
private final int hashCode;
public StreamObj(double... values) {
this.values = values;
int h = 1;
for (double value : values) {
h = 31 * (int) h + (int) value;
}
this.hashCode = h;
}
@Override
public int dimensions() {
return values.length;
}
@Override
public double get(int index) {
return values[index];
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof StreamObj) {
StreamObj that = (StreamObj) obj;
if (this.dimensions() != that.dimensions()) {
return false;
}
for (int i = 0; i < this.dimensions(); i++) {
if (this.values[i] != that.values[i]) {
return false;
}
}
return true;
} else {
return false;
}
}
@Override
public int compareTo(StreamObj that) {
int dimensions = Math.min(this.dimensions(), that.dimensions());
for (int i = 0; i < dimensions; i++) {
double v1 = this.values[i];
double v2 = that.values[i];
if (v1 > v2) {
return +1;
}
if (v1 < v2) {
return -1;
}
}
if (this.dimensions() > dimensions) {
return +1;
}
if (that.dimensions() > dimensions) {
return -1;
}
return 0;
}
} | Java |
/*
* ISBIndex.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.AbstractC;
import java.util.*;
import weka.core.Instance;
public class ISBIndex {
public static class ISBNode {
public Instance inst;
public StreamObj obj;
public Long id;
public ArrayList<Integer> lt_cnt;
// statistics
public int nOutlier;
public int nInlier;
public ISBNode(Instance inst, StreamObj obj, Long id) {
this.inst = inst;
this.obj = obj;
this.id = id;
lt_cnt = new ArrayList<Integer>();
// init statistics
nOutlier = 0;
nInlier = 0;
}
}
MyMTree mtree;
Map<Integer, Set<ISBNode>> mapNodes;
double m_radius;
double m_Fraction;
public ISBIndex(double radius, double fra) {
mtree = new MyMTree();
mapNodes = new HashMap<Integer, Set<ISBNode>>();
m_radius = radius;
m_Fraction = fra;
}
public static class ISBSearchResult {
public ISBNode node;
public double distance;
public ISBSearchResult(ISBNode n, double distance) {
this.node = n;
this.distance = distance;
}
}
public Vector<ISBSearchResult> RangeSearch(ISBNode node, double radius) {
Vector<ISBSearchResult> results = new Vector<ISBSearchResult>();
// execute range search at mtree
StreamObj obj;
double d;
MyMTree.Query query = mtree.getNearestByRange(node.obj, radius);
for (MyMTree.ResultItem q : query) {
// get next obj found within range
obj = q.data;
// get distance of obj from query
d = q.distance;
// get all nodes referencing obj
Vector<ISBNode> nodes = MapGetNodes(obj);
for (int i = 0; i < nodes.size(); i++)
results.add(new ISBSearchResult(nodes.get(i), d));
}
return results;
}
public void Insert(ISBNode node) {
// insert object of node at mtree
mtree.add(node.obj);
// insert node at map
MapInsert(node);
}
public void Remove(ISBNode node) {
// remove from map
MapDelete(node);
// check if stream object at mtree is still being referenced
if (MapCountObjRefs(node.obj) <= 0) {
// delete stream object from mtree
mtree.remove(node.obj);
}
}
Vector<ISBNode> MapGetNodes(StreamObj obj) {
int h = obj.hashCode();
Vector<ISBNode> v = new Vector<ISBNode>();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode node;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
node = i.next();
if (node.obj.equals(obj))
v.add(node);
}
}
return v;
}
int MapCountObjRefs(StreamObj obj) {
int h = obj.hashCode();
int iCount = 0;
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode n;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
n = i.next();
if (n.obj.equals(obj))
iCount++;
}
}
return iCount;
}
void MapInsert(ISBNode node) {
int h = node.obj.hashCode();
Set<ISBNode> s;
if (mapNodes.containsKey(h)) {
s = mapNodes.get(h);
s.add(node);
}
else {
s = new HashSet<ISBNode>();
s.add(node);
mapNodes.put(h, s);
}
}
void MapDelete(ISBNode node) {
int h = node.obj.hashCode();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
s.remove(node);
if (s.isEmpty()) { // ### added
mapNodes.remove(h);
}
}
}
}
| Java |
/*
* Test.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.AbstractC;
import moa.streams.clustering.RandomRBFGeneratorEvents;
import weka.core.Instance;
public class Test {
public static void main(String[] args) throws Exception
{
//if (true) return;
int numInstances = 10000;
//moa.streams.ArffFileStream stream = new ArffFileStream("./datasets/debug_1.txt", -1);
RandomRBFGeneratorEvents stream = new RandomRBFGeneratorEvents();
stream.prepareForUse();
AbstractC myOutlierDetector= new AbstractC();
/*myOutlierDetector.fractionOption.setValue(0.2);
myOutlierDetector.radiusOption.setValue(3);
myOutlierDetector.windowSizeOption.setValue(12);*/
myOutlierDetector.setModelContext(stream.getHeader());
myOutlierDetector.prepareForUse();
Long tmStart = System.currentTimeMillis();
int numberSamples = 0;
int w = myOutlierDetector.windowSizeOption.getValue();
while (stream.hasMoreInstances() && (numberSamples < numInstances)) {
Instance newInst = stream.nextInstance();
myOutlierDetector.processNewInstanceImpl(newInst);
numberSamples++;
if (numberSamples % 100 == 0) {
//System.out.println("Processed " + numberSamples + " stream objects.");
}
if ((numberSamples % (w / 2)) == 0) {
//myOutlierDetector.PrintOutliers();
}
}
// myOutlierDetector.PrintOutliers();
System.out.println("Total time = " + (System.currentTimeMillis() - tmStart) + " ms");
}
}
| Java |
/*
* AbstractCBase.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.AbstractC;
import java.util.ArrayList;
import java.util.Vector;
import moa.clusterers.outliers.AbstractC.ISBIndex.ISBNode;
import moa.clusterers.outliers.MyBaseOutlierDetector;
public abstract class AbstractCBase extends MyBaseOutlierDetector {
protected static final Long FIRST_OBJ_ID = 1L;
// object identifier increments with each new data stream object
protected Long objId;
// list used to find expired nodes
protected Vector<ISBNode> windowNodes;
protected ISBIndex ISB;
protected int m_WindowSize;
protected double m_radius;
protected double m_Fraction;
protected boolean bWarning = false;
// statistics
public int m_nBothInlierOutlier;
public int m_nOnlyInlier;
public int m_nOnlyOutlier;
@Override
public String getObjectInfo(Object obj) {
if (obj == null) return null;
ISBNode node = (ISBNode) obj;
ArrayList<String> infoTitle = new ArrayList<String>();
ArrayList<String> infoValue = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
// show node position
for (int i = 0; i < node.obj.dimensions(); i++) {
infoTitle.add("Dim" + (i+1));
infoValue.add(String.format("%.3f", node.obj.get(i)));
}
// show node properties
infoTitle.add("id");
infoValue.add(String.format("%d", node.id));
sb.append("<html>");
sb.append("<table>");
int i = 0;
while(i < infoTitle.size() && i < infoValue.size()){
sb.append("<tr><td><b>"+infoTitle.get(i)+":</b></td><td>"+infoValue.get(i)+"</td></tr>");
i++;
}
sb.append("</table>");
sb.append("</html>");
return sb.toString();
}
@Override
public String getStatistics() {
StringBuilder sb = new StringBuilder();
sb.append("Statistics:\n\n");
// get counters of expired nodes
int nBothInlierOutlier = m_nBothInlierOutlier;
int nOnlyInlier = m_nOnlyInlier;
int nOnlyOutlier = m_nOnlyOutlier;
// add counters of non expired nodes
for (ISBNode node : windowNodes) {
if ((node.nInlier > 0) && (node.nOutlier > 0))
nBothInlierOutlier++;
else if (node.nInlier > 0)
nOnlyInlier++;
else
nOnlyOutlier++;
}
int sum = nBothInlierOutlier + nOnlyInlier + nOnlyOutlier;
if (sum > 0) {
sb.append(String.format(" Nodes always inlier: %d (%.1f%%)\n", nOnlyInlier, (100 * nOnlyInlier) / (double)sum));
sb.append(String.format(" Nodes always outlier: %d (%.1f%%)\n", nOnlyOutlier, (100 * nOnlyOutlier) / (double)sum));
sb.append(String.format(" Nodes both inlier and outlier: %d (%.1f%%)\n", nBothInlierOutlier, (100 * nBothInlierOutlier) / (double)sum));
sb.append(" (Sum: " + sum + ")\n");
}
sb.append("\n Total range queries: " + nRangeQueriesExecuted + "\n");
sb.append(" Max memory usage: " + iMaxMemUsage + " MB\n");
sb.append(" Total process time: " + String.format("%.2f ms", nTotalRunTime / 1000.0) + "\n");
return sb.toString();
}
Long GetWindowEnd() {
return objId - 1;
}
Long GetWindowStart() {
Long x = GetWindowEnd() - m_WindowSize + 1;
if (x < FIRST_OBJ_ID)
x = FIRST_OBJ_ID;
return x;
}
boolean IsWinFull() {
return (GetWindowEnd() >= FIRST_OBJ_ID + m_WindowSize - 1);
}
Long GetExpirationTime(ISBNode node) {
return node.id + m_WindowSize;
}
void AddNode(ISBNode node) {
windowNodes.add(node);
ISB.Insert(node);
}
void RemoveNode(ISBNode node) {
windowNodes.remove(node);
ISB.Remove(node);
RemoveExpiredOutlier(new Outlier(node.inst, node.id, node)); // ### remove when expired?
// update statistics
if ((node.nInlier > 0) && (node.nOutlier > 0))
m_nBothInlierOutlier++;
else if (node.nInlier > 0)
m_nOnlyInlier++;
else
m_nOnlyOutlier++;
}
ISBNode GetExpiredNode() {
if (windowNodes.size() <= 0)
return null;
// get oldest node
ISBNode node = windowNodes.get(0);
// check if node has expired
if (node.id < GetWindowStart()) {
return node;
}
return null;
}
void SaveOutlier(ISBNode node) {
AddOutlier(new Outlier(node.inst, node.id, node));
node.nOutlier++; // update statistics
}
void RemoveOutlier(ISBNode node) {
RemoveOutlier(new Outlier(node.inst, node.id, node));
node.nInlier++; // update statistics
}
@Override
protected boolean IsNodeIdInWin(long id) {
if ((GetWindowStart() <= id) && (id <= GetWindowEnd()) )
return true;
else
return false;
}
void PrintWindow() {
Println("Window [" + GetWindowStart() + "-" + GetWindowEnd() + "]: ");
ISBNode node;
for (int i = 0; i < windowNodes.size(); i++) {
node = windowNodes.get(i);
Print(" Node: ");
PrintNode(node);
}
}
void PrintNode(ISBNode n) {
Print("id=" + n.id + " (");
int dim = n.obj.dimensions();
for (int d = 0; d < dim; d++) {
Print(Double.toString(n.obj.get(d)));
if (d < dim - 1)
Print(", ");
}
Println(")");
}
public void Print_lt_cnt(ArrayList<Integer> lt_cnt) {
for (int i = 0; i < lt_cnt.size(); i++) {
Print("(" + i + ": " + lt_cnt.get(i) + ") ");
}
Println("");
}
}
| Java |
/*
* AbstractC.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.AbstractC;
import java.util.Vector;
import moa.clusterers.outliers.AbstractC.ISBIndex.ISBNode;
import moa.clusterers.outliers.AbstractC.ISBIndex.ISBSearchResult;
import moa.options.FlagOption;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.Instance;
//The algorithm is presented in:
//D. Yang, E. Rundensteiner, and M. Ward.
//Neighbor-based pattern detection for windows over streaming data.
//In EDBT, pages 529–540, 2009
public class AbstractC extends AbstractCBase {
public FloatOption radiusOption = new FloatOption("radius", 'r', "Search radius.", 0.1);
//public FloatOption fractionOption = new FloatOption("fraction", 'f', "Parameter fraction.", 0.05);
public IntOption kOption = new IntOption("k", 't', "Parameter k.", 50);
public FlagOption waitWinFullOption = new FlagOption("waitWinFull", 'a', "Output outliers when windows is full.");
public AbstractC()
{
// System.out.println("AbstractC: created");
}
@Override
public void Init() {
super.Init();
m_WindowSize = windowSizeOption.getValue();
m_radius = radiusOption.getValue();
//m_Fraction = fractionOption.getValue();
m_Fraction = (double)kOption.getValue() / (double)m_WindowSize;
Println("Init AbstractC:");
Println(" window_size: " + m_WindowSize);
Println(" radius: " + m_radius);
Println(" Fraction: " + m_Fraction);
Println(" (Fraction * window_size: " + String.format("%.2f", m_Fraction * m_WindowSize) + ")");
//bTrace = true;
bWarning = true;
objId = FIRST_OBJ_ID; // init object identifier
// create fifo
windowNodes = new Vector<ISBNode>();
// create ISB
ISB = new ISBIndex(m_radius, m_Fraction);
// init statistics
m_nBothInlierOutlier = 0;
m_nOnlyInlier = 0;
m_nOnlyOutlier = 0;
}
void UpdateNeighbors(ISBNode n, ISBNode q) {
if (n == q) return;
if (bTrace) Println("UpdateNeighbors: n.id: " + n.id + ", q.id: " + q.id);
int len = q.lt_cnt.size();
for (int i = 0; i < len; i++) {
// n.lt_cnt++
n.lt_cnt.set(i, n.lt_cnt.get(i) + 1);
// q.lt_cnt++
q.lt_cnt.set(i, q.lt_cnt.get(i) + 1);
}
}
void OutputPatterns() {
if (bTrace) Println("OutputPatterns");
double thr = m_Fraction * m_WindowSize;
for (ISBNode node : windowNodes) {
if (node.lt_cnt.size() > 0) {
if (IsWinFull() || !waitWinFullOption.isSet()) {
if (node.lt_cnt.get(0) < thr) {
SaveOutlier(node);
} else {
RemoveOutlier(node);
}
}
node.lt_cnt.remove(0);
} else {
if (bWarning) Println("OutputPatterns: " + node.id + ".lt_cnt is empty!");
}
}
}
@Override
protected void ProcessNewStreamObj(Instance inst)
{
if (bShowProgress) ShowProgress("Processed " + (objId - 1) + " stream objects.");
// PrintInstance(inst);
double[] values = getInstanceValues(inst);
StreamObj obj = new StreamObj(values);
// process new data stream object
if (bTrace) Println("\n- - - - - - - - - - - -\n");
// create new ISB node
ISBNode nodeNew = new ISBNode(inst, obj, objId);
if (bTrace) { Print("New node: "); PrintNode(nodeNew); }
objId++; // update object identifier (slide window)
// purge expired node
ISBNode nodeExpired = GetExpiredNode();
if (nodeExpired != null) {
// purge nodeExpired
if (bTrace) { Print("nodeExpired: "); PrintNode(nodeExpired); }
RemoveNode(nodeExpired);
}
// initialize nodeNew.lt_cnt
if (bTrace) Println("initialize nodeNew");
for (int i = 0; i < m_WindowSize; i++) {
nodeNew.lt_cnt.add(1);
}
AddNode(nodeNew); // add nodeNew to window and index
if (bTrace) PrintWindow();
// perform range query search
if (bTrace) Println("Perform range query seach");
nRangeQueriesExecuted++;
Vector<ISBIndex.ISBSearchResult> neighbors = ISB.RangeSearch(nodeNew, m_radius);
// process each returned node
for (ISBSearchResult res : neighbors) {
ISBNode node = res.node;
UpdateNeighbors(nodeNew, node);
}
OutputPatterns();
if (bTrace) {
PrintOutliers();
for (ISBNode node : windowNodes) {
Print(node.id + ".lt_count: "); Print_lt_cnt(node.lt_cnt);
}
}
}
}
| Java |
/*
* MCODBase.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import java.util.*;
import moa.clusterers.outliers.MCOD.ISBIndex.ISBNode;
import moa.clusterers.outliers.MCOD.ISBIndex.ISBNode.NodeType;
import moa.clusterers.outliers.MyBaseOutlierDetector;
public abstract class MCODBase extends MyBaseOutlierDetector {
protected static class EventItem implements Comparable<EventItem> {
public ISBNode node;
public Long timeStamp;
public EventItem(ISBNode node, Long timeStamp) {
this.node = node;
this.timeStamp = timeStamp;
}
@Override
public int compareTo(EventItem t) {
if (this.timeStamp > t.timeStamp) {
return +1;
} else if (this.timeStamp < t.timeStamp) {
return -1;
} else {
if (this.node.id > t.node.id)
return +1;
else if (this.node.id < t.node.id)
return -1;
}
return 0;
}
}
protected static class EventQueue {
public TreeSet<EventItem> setEvents;
public EventQueue() {
setEvents = new TreeSet<EventItem>();
}
public void Insert(ISBNode node, Long expTime) {
setEvents.add(new EventItem(node, expTime));
}
public EventItem FindMin() {
if (setEvents.size() > 0) {
// events are sorted ascenting by expiration time
return setEvents.first();
}
return null;
}
public EventItem ExtractMin() {
EventItem e = FindMin();
if (e != null) {
setEvents.remove(e);
return e;
}
return null;
}
}
protected static final Long FIRST_OBJ_ID = 1L;
// object identifier increments with each new data stream object
protected Long objId;
// list used to find expired nodes
protected Vector<ISBNode> windowNodes;
protected EventQueue eventQueue;
// MTree index of micro-clusters
protected MTreeMicroClusters mtreeMC;
// set of micro-clusters (for trace)
protected TreeSet<MicroCluster> setMC;
// nodes treated as new nodes when a mc removed
protected TreeSet<ISBNode> nodesReinsert;
// index of objects not in any micro-cluster
protected ISBIndex ISB_PD;
protected int m_WindowSize;
protected double m_radius;
protected int m_k;
protected double m_theta = 1.0;
public boolean bWarning = false;
// statistics
public int m_nBothInlierOutlier;
public int m_nOnlyInlier;
public int m_nOnlyOutlier;
@Override
public String getObjectInfo(Object obj) {
if (obj == null) return null;
ISBNode node = (ISBNode) obj;
ArrayList<String> infoTitle = new ArrayList<String>();
ArrayList<String> infoValue = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
// show node type
infoTitle.add("Node type");
infoValue.add((node.nodeType == NodeType.OUTLIER) ? "Outlier" : "Inlier");
// show node position
for (int i = 0; i < node.obj.dimensions(); i++) {
infoTitle.add("Dim" + (i+1));
infoValue.add(String.format("%.3f", node.obj.get(i)));
}
// show node properties
infoTitle.add("id");
infoValue.add(String.format("%d", node.id));
infoTitle.add("count_after");
infoValue.add(String.format("%d", node.count_after));
infoTitle.add("|nn_before|");
infoValue.add(String.format("%d", node.CountPrecNeighs(GetWindowStart())));
sb.append("<html>");
sb.append("<table>");
int i = 0;
while(i < infoTitle.size() && i < infoValue.size()){
sb.append("<tr><td><b>"+infoTitle.get(i)+":</b></td><td>"+infoValue.get(i)+"</td></tr>");
i++;
}
sb.append("</table>");
sb.append("</html>");
return sb.toString();
}
@Override
public String getStatistics() {
StringBuilder sb = new StringBuilder();
sb.append("Statistics:\n\n");
// get counters of expired nodes
int nBothInlierOutlier = m_nBothInlierOutlier;
int nOnlyInlier = m_nOnlyInlier;
int nOnlyOutlier = m_nOnlyOutlier;
// add counters of non expired nodes
for (ISBNode node : windowNodes) {
if ((node.nInlier > 0) && (node.nOutlier > 0))
nBothInlierOutlier++;
else if (node.nInlier > 0)
nOnlyInlier++;
else
nOnlyOutlier++;
}
int sum = nBothInlierOutlier + nOnlyInlier + nOnlyOutlier;
if (sum > 0) {
sb.append(String.format(" Nodes always inlier: %d (%.1f%%)\n", nOnlyInlier, (100 * nOnlyInlier) / (double)sum));
sb.append(String.format(" Nodes always outlier: %d (%.1f%%)\n", nOnlyOutlier, (100 * nOnlyOutlier) / (double)sum));
sb.append(String.format(" Nodes both inlier and outlier: %d (%.1f%%)\n", nBothInlierOutlier, (100 * nBothInlierOutlier) / (double)sum));
sb.append(" (Sum: " + sum + ")\n");
}
sb.append("\n Total range queries: " + nRangeQueriesExecuted + "\n");
sb.append(" Max memory usage: " + iMaxMemUsage + " MB\n");
sb.append(" Total process time: " + String.format("%.2f ms", nTotalRunTime / 1000.0) + "\n");
return sb.toString();
}
Long GetWindowEnd() {
return objId - 1;
}
Long GetWindowStart() {
Long x = GetWindowEnd() - m_WindowSize + 1;
if (x < FIRST_OBJ_ID)
x = FIRST_OBJ_ID;
return x;
}
boolean IsWinFull() {
return (GetWindowEnd() >= FIRST_OBJ_ID + m_WindowSize - 1);
}
Long GetExpirationTime(ISBNode node) {
return node.id + m_WindowSize;
}
void SaveOutlier(ISBNode node) {
AddOutlier(new Outlier(node.inst, node.id, node));
node.nOutlier++; // update statistics
}
void RemoveOutlier(ISBNode node) {
RemoveOutlier(new Outlier(node.inst, node.id, node));
node.nInlier++; // update statistics
}
@Override
protected boolean IsNodeIdInWin(long id) {
int toleranceStart = 1;
Long start = GetWindowStart() - toleranceStart;
if ( (start <= id) && (id <= GetWindowEnd()) )
return true;
else
return false;
}
void AddNode(ISBNode node) {
windowNodes.add(node);
}
void RemoveNode(ISBNode node) {
windowNodes.remove(node);
RemoveExpiredOutlier(new Outlier(node.inst, node.id, node)); // ### remove when expired?
// update statistics
UpdateStatistics(node);
}
void UpdateStatistics(ISBNode node) {
if ((node.nInlier > 0) && (node.nOutlier > 0))
m_nBothInlierOutlier++;
else if (node.nInlier > 0)
m_nOnlyInlier++;
else
m_nOnlyOutlier++;
}
ISBNode GetExpiredNode() {
if (windowNodes.size() <= 0)
return null;
// get oldest node
ISBNode node = windowNodes.get(0);
// check if node has expired
if (node.id < GetWindowStart()) {
return node;
}
return null;
}
void AddMicroCluster(MicroCluster mc) {
mtreeMC.add(mc);
setMC.add(mc);
}
void RemoveMicroCluster(MicroCluster mc) {
mtreeMC.remove(mc);
setMC.remove(mc);
}
class SearchResultMC {
MicroCluster mc;
double distance;
public SearchResultMC(MicroCluster mc, double distance) {
this.mc = mc;
this.distance = distance;
}
}
Vector<SearchResultMC> RangeSearchMC(ISBNode nodeNew, double radius) {
Vector<SearchResultMC> results = new Vector<SearchResultMC>();
// create a dummy mc in order to search w.r.t. nodeNew
MicroCluster dummy = new MicroCluster(nodeNew);
// query results are returned ascenting by distance
MTreeMicroClusters.Query query = mtreeMC.getNearestByRange(dummy, radius);
for (MTreeMicroClusters.ResultItem q : query) {
results.add(new SearchResultMC(q.data, q.distance));
}
return results;
}
double GetEuclideanDist(ISBNode n1, ISBNode n2)
{
double diff;
double sum = 0;
int d = n1.obj.dimensions();
for (int i = 0; i < d; i++) {
diff = n1.obj.get(i) - n2.obj.get(i);
sum += Math.pow(diff, 2);
}
return Math.sqrt(sum);
}
void PrintWindow() {
Println("Window [" + GetWindowStart() + "-" + GetWindowEnd() + "]: ");
ISBNode node;
for (int i = 0; i < windowNodes.size(); i++) {
node = windowNodes.get(i);
Print(" Node: ");
PrintNode(node);
}
}
void PrintNode(ISBNode n) {
Print("id=" + n.id + " (");
int dim = n.obj.dimensions();
for (int d = 0; d < dim; d++) {
Print(Double.toString(n.obj.get(d)));
if (d < dim - 1)
Print(", ");
}
Println(")");
}
public void PrintNodeSet(Set<ISBNode> set) {
for (ISBNode n : set) {
Print(n.id + " ");
}
Println("");
}
public void PrintMCSet(Set<MicroCluster> set) {
for (MicroCluster n : set) {
Print(n.mcc.id + " ");
}
Println("");
}
public void PrintNodeVector(Vector<ISBNode> vector) {
for (ISBNode n : vector) {
Print(n.id + " ");
}
Println("");
}
public void PrintNodeList(List<ISBNode> list) {
for (ISBNode n : list) {
Print(n.id + " ");
}
Println("");
}
public void PrintEventQueue() {
Println("event queue: ");
for (EventItem n : eventQueue.setEvents) {
Printf(" id=%d, exp=%d\n", n.node.id, n.timeStamp);
}
}
public void PrintPD() {
Print("PD: ");
for (ISBNode n : ISB_PD.GetAllNodes()) {
Print(n.id + " ");
}
Println("");
}
}
| Java |
/*
* MicroCluster.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import java.util.ArrayList;
import moa.clusterers.outliers.MCOD.ISBIndex.ISBNode;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions.EuclideanCoordinate;
public class MicroCluster implements EuclideanCoordinate, Comparable<MicroCluster> {
public ISBNode mcc;
public ArrayList<ISBNode> nodes;
public MicroCluster(ISBNode mcc) {
this.mcc = mcc;
nodes = new ArrayList<ISBNode>();
AddNode(mcc);
}
public void AddNode(ISBNode node) {
if (node != null)
nodes.add(node);
}
public void RemoveNode(ISBNode node) {
if (node != null)
nodes.remove(node);
}
public int GetNodesCount() {
return nodes.size();
}
@Override
public int dimensions() {
return mcc.obj.dimensions();
}
@Override
public double get(int index) {
return mcc.obj.get(index);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof MicroCluster) {
MicroCluster that = (MicroCluster) obj;
if (this.dimensions() != that.dimensions()) {
return false;
}
for (int i = 0; i < this.dimensions(); i++) {
if (this.mcc.obj.get(i) != that.mcc.obj.get(i)) {
return false;
}
}
return true;
} else {
return false;
}
}
@Override
public int compareTo(MicroCluster that) {
int dimensions = Math.min(this.dimensions(), that.dimensions());
for (int i = 0; i < dimensions; i++) {
double v1 = this.mcc.obj.get(i);
double v2 = that.mcc.obj.get(i);
if (v1 > v2) {
return +1;
}
if (v1 < v2) {
return -1;
}
}
if (this.dimensions() > dimensions) {
return +1;
}
if (that.dimensions() > dimensions) {
return -1;
}
return 0;
}
} | Java |
/*
* MCOD.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import java.util.ArrayList;
import java.util.TreeSet;
import java.util.Vector;
import moa.clusterers.outliers.MCOD.ISBIndex.ISBNode;
import moa.clusterers.outliers.MCOD.ISBIndex.ISBNode.NodeType;
import moa.clusterers.outliers.MCOD.ISBIndex.ISBSearchResult;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.Instance;
//The algorithm is described in
// M. Kontaki, A. Gounaris, A. N. Papadopoulos, K. Tsichlas, and Y. Manolopoulos.
//Continuous monitoring of distance-based outliers over data streams.
//In ICDE, pages 135–146, 2011.
public class MCOD extends MCODBase {
public FloatOption radiusOption = new FloatOption("radius", 'r', "Search radius.", 0.1);
public IntOption kOption = new IntOption("k", 't', "Parameter k.", 50);
public MCOD()
{
// System.out.println("MCOD: created");
}
@Override
public void Init() {
super.Init();
m_WindowSize = windowSizeOption.getValue();
m_radius = radiusOption.getValue();
m_k = kOption.getValue();
Println("Init MCOD:");
Println(" window_size: " + m_WindowSize);
Println(" radius: " + m_radius);
Println(" k: " + m_k);
//bTrace = true;
//bWarning = true;
objId = FIRST_OBJ_ID; // init object identifier
// create nodes list of window
windowNodes = new Vector<ISBNode>();
// create ISB
ISB_PD = new ISBIndex(m_radius, m_k);
// create helper sets for micro-cluster management
setMC = new TreeSet<MicroCluster>();
// micro-cluster index
mtreeMC = new MTreeMicroClusters();
// create event queue
eventQueue = new EventQueue();
// init statistics
m_nBothInlierOutlier = 0;
m_nOnlyInlier = 0;
m_nOnlyOutlier = 0;
}
void SetNodeType(ISBNode node, NodeType type) {
node.nodeType = type;
// update statistics
if (type == NodeType.OUTLIER)
node.nOutlier++;
else
node.nInlier++;
}
void AddNeighbor(ISBNode node, ISBNode q, boolean bUpdateState) {
if (bTrace) Println("AddNeighbor: node.id: " + node.id + ", q.id: " + q.id);
// check if q still in window
if (IsNodeIdInWin(q.id) == false) {
if (bWarning) Println("AddNeighbor: node.id: " + node.id + ", q.id: " + q.id + " (expired)");
return;
}
if (q.id < node.id) {
node.AddPrecNeigh(q);
} else {
node.count_after++;
}
if (bUpdateState) {
// check if node inlier or outlier
int count = node.count_after + node.CountPrecNeighs(GetWindowStart());
if ((node.nodeType == NodeType.OUTLIER) && (count >= m_k)) {
// remove node from outliers
if (bTrace) Println("Remove node from outliers");
RemoveOutlier(node);
// add node to inlier set PD
SetNodeType(node, NodeType.INLIER_PD);
// insert node to event queue
ISBNode nodeMinExp = node.GetMinPrecNeigh(GetWindowStart());
AddToEventQueue(node, nodeMinExp);
}
}
}
void ProcessNewNode(ISBNode nodeNew, boolean bNewNode) {
if (bTrace) { Print("ProcessNewNode: "); PrintNode(nodeNew); }
if (bTrace) Println("Perform 3R/2 range query to cluster centers w.r.t new node");
Vector<SearchResultMC> resultsMC;
// results are sorted ascenting by distance
resultsMC = RangeSearchMC(nodeNew, 1.5 * m_radius);
if (bTrace) {
Println("MC query found: ");
for (SearchResultMC sr : resultsMC) {
Printf(" (%.1f) mcc: ", sr.distance); PrintNode(sr.mc.mcc);
}
}
if (bTrace) Println("Get closest micro-cluster");
MicroCluster mcClosest = null;
if (resultsMC.size() > 0) {
mcClosest = resultsMC.get(0).mc;
if (bTrace) Println("Closest mcc: " + mcClosest.mcc.id);
}
// check if nodeNew can be insterted to closest micro-cluster
boolean bFoundMC = false;
if (mcClosest != null) {
double d = GetEuclideanDist(nodeNew, mcClosest.mcc);
if (d <= m_radius / 2) {
bFoundMC = true;
} else {
if (bTrace) Println("Not close enough to closest mcc");
}
}
if (bFoundMC) {
if (bTrace) Println("Add new node to micro-cluster");
nodeNew.mc = mcClosest;
SetNodeType(nodeNew, NodeType.INLIER_MC);
mcClosest.AddNode(nodeNew);
if (bTrace) { Print("mcClosest.nodes: "); PrintNodeList(mcClosest.nodes); }
if (bTrace) Println("Update neighbors of set PD");
Vector<ISBNode> nodes;
nodes = ISB_PD.GetAllNodes();
for (ISBNode q : nodes) {
if (q.Rmc.contains(mcClosest)) {
if (GetEuclideanDist(q, nodeNew) <= m_radius) {
if (bNewNode) {
// update q.count_after and its' outlierness
AddNeighbor(q, nodeNew, true);
} else {
if (nodesReinsert.contains(q)) {
// update q.count_after or q.nn_before and its' outlierness
AddNeighbor(q, nodeNew, true);
}
}
}
}
}
}
else {
// No close enough micro-cluster found.
// Perform 3R/2 range query to nodes in set PD.
if (bTrace) Println("Perform 3R/2 range query to nodes in set PD");
nRangeQueriesExecuted++;
// create helper sets for micro-cluster management
ArrayList<ISBNode> setNC = new ArrayList<ISBNode>();
ArrayList<ISBNode> setNNC = new ArrayList<ISBNode>();
Vector<ISBSearchResult> resultNodes;
resultNodes = ISB_PD.RangeSearch(nodeNew, 1.5 * m_radius); // 1.5 ###
for (ISBSearchResult sr : resultNodes) {
ISBNode q = sr.node;
if (sr.distance <= m_radius) {
// add q to neighs of nodeNew
AddNeighbor(nodeNew, q, false);
if (bNewNode) {
// update q.count_after and its' outlierness
AddNeighbor(q, nodeNew, true);
} else {
if (nodesReinsert.contains(q)) {
// update q.count_after or q.nn_before and its' outlierness
AddNeighbor(q, nodeNew, true);
}
}
}
if (sr.distance <= m_radius / 2.0) {
setNC.add(q);
} else {
setNNC.add(q);
}
}
if (bTrace) {
Print("Prec neighs of new node: "); PrintNodeList(nodeNew.Get_nn_before());
Print("NC: "); PrintNodeList(setNC);
Print("NNC: "); PrintNodeList(setNNC);
}
// check if size of set NC big enough to create cluster
if (bTrace) Println("Check size of set NC");
if (setNC.size() >= m_theta * m_k) {
// create new micro-cluster with center nodeNew
if (bTrace) Println("Create new micro-cluster");
MicroCluster mcNew = new MicroCluster(nodeNew);
AddMicroCluster(mcNew);
nodeNew.mc = mcNew;
SetNodeType(nodeNew, NodeType.INLIER_MC);
if (bTrace) Println("Add to new mc nodes within range R/2");
for (ISBNode q : setNC) {
q.mc = mcNew;
mcNew.AddNode(q);
// move q from set PD to set inlier-mc
SetNodeType(q, NodeType.INLIER_MC);
ISB_PD.Remove(q);
RemoveOutlier(q); // needed? ###
}
if (bTrace) {
Print("mcNew.nodes: "); PrintNodeList(mcNew.nodes);
PrintPD();
}
if (bTrace) Println("Update Rmc lists of nodes of PD in range 3R/2 from mcNew");
for (ISBNode q : setNNC) {
q.Rmc.add(mcNew);
if (bTrace) { Print(q.id + ".Rmc: "); PrintMCSet(q.Rmc); }
}
} else {
if (bTrace) Println("Add to nodeNew neighs nodes of near micro-clusters");
for (SearchResultMC sr : resultsMC) {
for (ISBNode q : sr.mc.nodes) {
if (GetEuclideanDist(q, nodeNew) <= m_radius) {
// add q to neighs of nodeNew
AddNeighbor(nodeNew, q, false);
}
}
}
if (bTrace) {
Println("nodeNew.count_after = " + nodeNew.count_after);
Print("nodeNew.nn_before: "); PrintNodeList(nodeNew.Get_nn_before());
}
if (bTrace) Println("Insert nodeNew to index of nodes of PD");
ISB_PD.Insert(nodeNew);
if (bTrace) PrintPD();
// check if nodeNew is an inlier or outlier
// use both nn_before and count_after for case bNewNode=false
int count = nodeNew.CountPrecNeighs(GetWindowStart()) + nodeNew.count_after;
if (count >= m_k) {
if (bTrace) Println("nodeNew is an inlier");
SetNodeType(nodeNew, NodeType.INLIER_PD);
// insert nodeNew to event queue
ISBNode nodeMinExp = nodeNew.GetMinPrecNeigh(GetWindowStart());
AddToEventQueue(nodeNew, nodeMinExp);
} else {
if (bTrace) Println("nodeNew is an outlier");
SetNodeType(nodeNew, NodeType.OUTLIER);
SaveOutlier(nodeNew);
}
if (bTrace) Println("Update nodeNew.Rmc");
for (SearchResultMC sr : resultsMC) {
nodeNew.Rmc.add(sr.mc);
}
if (bTrace) { Print("nodeNew.Rmc: "); PrintMCSet(nodeNew.Rmc); }
}
}
}
void AddToEventQueue(ISBNode x, ISBNode nodeMinExp) {
if (bTrace) Println("AddToEventQueue x.id: " + x.id);
if (nodeMinExp != null) {
Long expTime = GetExpirationTime(nodeMinExp);
eventQueue.Insert(x, expTime);
if (bTrace) {
Print("x.nn_before: "); PrintNodeList(x.Get_nn_before());
Println("nodeMinExp: " + nodeMinExp.id + ", expTime = " + expTime);
PrintEventQueue();
}
} else {
if (bWarning) Println("AddToEventQueue: Cannot add x.id: " + x.id + " to event queue (nn_before is empty, count_after=" + x.count_after + ")");
}
}
void ProcessEventQueue(ISBNode nodeExpired) {
EventItem e = eventQueue.FindMin();
while ((e != null) && (e.timeStamp <= GetWindowEnd())) {
e = eventQueue.ExtractMin();
ISBNode x = e.node;
if (bTrace) Println("Process event queue: check node x: " + x.id);
// node x must be in window and not in any micro-cluster
boolean bValid = ( IsNodeIdInWin(x.id) && (x.mc == null) );
if (bValid) {
// remove nodeExpired from x.nn_before
x.RemovePrecNeigh(nodeExpired);
// get amount of neighbors of x
int count = x.count_after + x.CountPrecNeighs(GetWindowStart());
if (count < m_k) {
if (bTrace) Println("x is an outlier");
SetNodeType(x, NodeType.OUTLIER);
SaveOutlier(x);
} else {
if (bTrace) Println("x is an inlier, add to event queue");
// get oldest preceding neighbor of x
ISBNode nodeMinExp = x.GetMinPrecNeigh(GetWindowStart());
// add x to event queue
AddToEventQueue(x, nodeMinExp);
}
} else {
if (bWarning) Println("Process event queue: node x.id: " + x.id + " is not valid!");
}
e = eventQueue.FindMin();
}
}
void ProcessExpiredNode(ISBNode nodeExpired) {
if (nodeExpired != null) {
if (bTrace) Println("\nnodeExpired: " + nodeExpired.id);
MicroCluster mc = nodeExpired.mc;
if (mc != null) {
if (bTrace) Println("nodeExpired belongs to mc: " + mc.mcc.id);
mc.RemoveNode(nodeExpired);
if (bTrace) { Print("mc.nodes: "); PrintNodeList(mc.nodes); }
if (bTrace) Println("Check if mc has enough objects");
if (mc.GetNodesCount() < m_k) {
// remove micro-cluster mc
if (bTrace) Println("Remove mc");
RemoveMicroCluster(mc);
// insert nodes of mc to set nodesReinsert
nodesReinsert = new TreeSet<ISBNode>();
for (ISBNode q : mc.nodes) {
nodesReinsert.add(q);
}
// treat each node of mc as new node
for (ISBNode q : mc.nodes) {
if (bTrace) Println("\nTreat as new node q: " + q.id);
q.InitNode();
ProcessNewNode(q, false);
}
}
} else {
// nodeExpired belongs to set PD
// remove nodeExpired from PD index
ISB_PD.Remove(nodeExpired);
}
RemoveNode(nodeExpired);
ProcessEventQueue(nodeExpired);
}
}
@Override
protected void ProcessNewStreamObj(Instance inst)
{
if (bShowProgress) ShowProgress("Processed " + (objId-1) + " stream objects.");
// PrintInstance(inst);
double[] values = getInstanceValues(inst);
StreamObj obj = new StreamObj(values);
if (bTrace) Println("\n- - - - - - - - - - - -\n");
// create new ISB node
ISBNode nodeNew = new ISBNode(inst, obj, objId);
if (bTrace) { Print("New node: "); PrintNode(nodeNew); }
objId++; // update object identifier (slide window)
AddNode(nodeNew); // add nodeNew to window
if (bTrace) PrintWindow();
ProcessNewNode(nodeNew, true);
ProcessExpiredNode(GetExpiredNode());
if (bTrace) {
Print("Micro-clusters: "); PrintMCSet(setMC);
PrintOutliers();
PrintPD();
}
}
}
| Java |
/*
* MTreeMicroClusters.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.ComposedSplitFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions;
import moa.clusterers.outliers.utils.mtree.MTree;
import moa.clusterers.outliers.utils.mtree.PartitionFunctions;
import moa.clusterers.outliers.utils.mtree.PromotionFunction;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
import moa.clusterers.outliers.utils.mtree.utils.Utils;
class MTreeMicroClusters extends MTree<MicroCluster> {
private static final PromotionFunction<MicroCluster> nonRandomPromotion = new PromotionFunction<MicroCluster>() {
@Override
public Pair<MicroCluster> process(Set<MicroCluster> dataSet, DistanceFunction<? super MicroCluster> distanceFunction) {
return Utils.minMax(dataSet);
}
};
MTreeMicroClusters() {
super(2, DistanceFunctions.EUCLIDEAN,
new ComposedSplitFunction<MicroCluster>(
nonRandomPromotion,
new PartitionFunctions.BalancedPartition<MicroCluster>()));
}
public void add(MicroCluster data) {
super.add(data);
_check();
}
public boolean remove(MicroCluster data) {
boolean result = super.remove(data);
_check();
return result;
}
DistanceFunction<? super MicroCluster> getDistanceFunction() {
return distanceFunction;
}
};
| Java |
/*
* StreamObj.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions.EuclideanCoordinate;
public class StreamObj implements EuclideanCoordinate, Comparable<StreamObj> {
private final double[] values;
private final int hashCode;
public StreamObj(double... values) {
this.values = values;
int h = 1;
for (double value : values) {
h = 31 * (int) h + (int) value;
}
this.hashCode = h;
}
@Override
public int dimensions() {
return values.length;
}
@Override
public double get(int index) {
return values[index];
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof StreamObj) {
StreamObj that = (StreamObj) obj;
if (this.dimensions() != that.dimensions()) {
return false;
}
for (int i = 0; i < this.dimensions(); i++) {
if (this.values[i] != that.values[i]) {
return false;
}
}
return true;
} else {
return false;
}
}
@Override
public int compareTo(StreamObj that) {
int dimensions = Math.min(this.dimensions(), that.dimensions());
for (int i = 0; i < dimensions; i++) {
double v1 = this.values[i];
double v2 = that.values[i];
if (v1 > v2) {
return +1;
}
if (v1 < v2) {
return -1;
}
}
if (this.dimensions() > dimensions) {
return +1;
}
if (that.dimensions() > dimensions) {
return -1;
}
return 0;
}
} | Java |
/*
* ISBIndex.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import java.util.*;
import weka.core.Instance;
public class ISBIndex {
public static class ISBNode implements Comparable<ISBNode> {
public static enum NodeType { OUTLIER, INLIER_MC, INLIER_PD }
public Instance inst;
public StreamObj obj;
public Long id;
public MicroCluster mc;
public Set<MicroCluster> Rmc;
public int count_after;
public NodeType nodeType;
private ArrayList<ISBNode> nn_before;
// statistics
public int nOutlier;
public int nInlier;
public ISBNode(Instance inst, StreamObj obj, Long id) {
this.inst = inst;
this.obj = obj;
this.id = id;
// init statistics
nOutlier = 0;
nInlier = 0;
// init other fields
InitNode();
}
public void InitNode() {
this.mc = null;
this.Rmc = new TreeSet<MicroCluster>();
this.count_after = 1;
this.nodeType = NodeType.INLIER_PD;
this.nn_before = new ArrayList<ISBNode>();
}
@Override
public int compareTo(ISBNode t) {
if (this.id > t.id)
return +1;
else if (this.id < t.id)
return -1;
return 0;
}
public void AddPrecNeigh(ISBNode node) {
int pos = Collections.binarySearch(nn_before, node);
if (pos < 0) {
// item does not exist, so add it to the right position
nn_before.add(-(pos + 1), node);
}
}
public void RemovePrecNeigh(ISBNode node) {
int pos = Collections.binarySearch(nn_before, node);
if (pos >= 0) {
// item exists
nn_before.remove(pos);
}
}
public ISBNode GetMinPrecNeigh(Long sinceId) {
if (nn_before.size() > 0) {
int startPos;
ISBNode dummy = new ISBNode(null, null, sinceId);
int pos = Collections.binarySearch(nn_before, dummy);
if (pos < 0) {
// item does not exist, should insert at position startPos
startPos = -(pos + 1);
} else {
// item exists at startPos
startPos = pos;
}
if (startPos < nn_before.size()) {
return nn_before.get(startPos);
}
}
return null;
}
public int CountPrecNeighs(Long sinceId) {
if (nn_before.size() > 0) {
// get number of neighs with id >= sinceId
int startPos;
ISBNode dummy = new ISBNode(null, null, sinceId);
int pos = Collections.binarySearch(nn_before, dummy);
if (pos < 0) {
// item does not exist, should insert at position startPos
startPos = -(pos + 1);
} else {
// item exists at startPos
startPos = pos;
}
if (startPos < nn_before.size()) {
return nn_before.size() - startPos;
}
}
return 0;
}
public List<ISBNode> Get_nn_before() {
return nn_before;
}
}
MTreeStreamObjects mtree;
Map<Integer, Set<ISBNode>> mapNodes;
double m_radius;
int m_k; // k nearest neighbors
public ISBIndex(double radius, int k) {
mtree = new MTreeStreamObjects();
mapNodes = new HashMap<Integer, Set<ISBNode>>();
m_radius = radius;
m_k = k;
}
Vector<ISBNode> GetAllNodes() {
Vector<ISBNode> v = new Vector<ISBNode>();
Iterator it = mapNodes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry pairs = (Map.Entry) it.next();
Set<ISBNode> setNodes = (Set<ISBNode>) pairs.getValue();
for (ISBNode n : setNodes) {
v.add(n);
}
}
return v;
}
public static class ISBSearchResult {
public ISBNode node;
public double distance;
public ISBSearchResult(ISBNode n, double distance) {
this.node = n;
this.distance = distance;
}
}
public Vector<ISBSearchResult> RangeSearch(ISBNode node, double radius) {
Vector<ISBSearchResult> results = new Vector<ISBSearchResult>();
StreamObj obj;
double d;
MTreeStreamObjects.Query query = mtree.getNearestByRange(node.obj, radius);
for (MTreeStreamObjects.ResultItem q : query) {
// get next obj found within range
obj = q.data;
// get distance of obj from query
d = q.distance;
// get all nodes referencing obj
Vector<ISBNode> nodes = MapGetNodes(obj);
for (int i = 0; i < nodes.size(); i++)
results.add(new ISBSearchResult(nodes.get(i), d));
}
return results;
}
public void Insert(ISBNode node) {
// insert object of node at mtree
mtree.add(node.obj);
// insert node at map
MapInsert(node);
}
public void Remove(ISBNode node) {
// remove from map
MapDelete(node);
// check if stream object at mtree is still being referenced
if (MapCountObjRefs(node.obj) <= 0) {
// delete stream object from mtree
mtree.remove(node.obj);
}
}
Vector<ISBNode> MapGetNodes(StreamObj obj) {
int h = obj.hashCode();
Vector<ISBNode> v = new Vector<ISBNode>();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode node;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
node = i.next();
if (node.obj.equals(obj))
v.add(node);
}
}
return v;
}
int MapCountObjRefs(StreamObj obj) {
int h = obj.hashCode();
int iCount = 0;
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
ISBNode n;
Iterator<ISBNode> i = s.iterator();
while (i.hasNext()) {
n = i.next();
if (n.obj.equals(obj))
iCount++;
}
}
return iCount;
}
void MapInsert(ISBNode node) {
int h = node.obj.hashCode();
Set<ISBNode> s;
if (mapNodes.containsKey(h)) {
s = mapNodes.get(h);
s.add(node);
}
else {
s = new HashSet<ISBNode>();
s.add(node);
mapNodes.put(h, s);
}
}
void MapDelete(ISBNode node) {
int h = node.obj.hashCode();
if (mapNodes.containsKey(h)) {
Set<ISBNode> s = mapNodes.get(h);
s.remove(node);
if (s.isEmpty()) { // ### added
mapNodes.remove(h);
}
}
}
}
| Java |
/*
* MTreeStreamObjects.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import java.util.Set;
import moa.clusterers.outliers.utils.mtree.ComposedSplitFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunction;
import moa.clusterers.outliers.utils.mtree.DistanceFunctions;
import moa.clusterers.outliers.utils.mtree.MTree;
import moa.clusterers.outliers.utils.mtree.PartitionFunctions;
import moa.clusterers.outliers.utils.mtree.PromotionFunction;
import moa.clusterers.outliers.utils.mtree.utils.Pair;
import moa.clusterers.outliers.utils.mtree.utils.Utils;
class MTreeStreamObjects extends MTree<StreamObj> {
private static final PromotionFunction<StreamObj> nonRandomPromotion = new PromotionFunction<StreamObj>() {
@Override
public Pair<StreamObj> process(Set<StreamObj> dataSet, DistanceFunction<? super StreamObj> distanceFunction) {
return Utils.minMax(dataSet);
}
};
MTreeStreamObjects() {
super(2, DistanceFunctions.EUCLIDEAN,
new ComposedSplitFunction<StreamObj>(
nonRandomPromotion,
new PartitionFunctions.BalancedPartition<StreamObj>()));
}
public void add(StreamObj data) {
super.add(data);
_check();
}
public boolean remove(StreamObj data) {
boolean result = super.remove(data);
_check();
return result;
}
DistanceFunction<? super StreamObj> getDistanceFunction() {
return distanceFunction;
}
};
| Java |
/*
* Test.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers.MCOD;
import moa.streams.ArffFileStream;
import moa.streams.clustering.RandomRBFGeneratorEvents;
import weka.core.Instance;
public class Test {
public static void main(String[] args) throws Exception
{
//if (true) return;
int numInstances = 50000;
//moa.streams.ArffFileStream stream = new ArffFileStream("./datasets/debug_2.txt", -1);
RandomRBFGeneratorEvents stream = new RandomRBFGeneratorEvents();
stream.prepareForUse();
MCOD myOutlierDetector= new MCOD();
/*myOutlierDetector.kOption.setValue(3);
myOutlierDetector.radiusOption.setValue(5);
myOutlierDetector.windowSizeOption.setValue(6);*/
myOutlierDetector.setModelContext(stream.getHeader());
myOutlierDetector.prepareForUse();
Long tmStart = System.currentTimeMillis();
int numberSamples = 0;
int w = myOutlierDetector.windowSizeOption.getValue();
while (stream.hasMoreInstances() && (numberSamples < numInstances)) {
Instance newInst = stream.nextInstance();
myOutlierDetector.processNewInstanceImpl(newInst);
numberSamples++;
if (numberSamples % 100 == 0) {
//System.out.println("Processed " + numberSamples + " stream objects.");
}
if ((numberSamples % (w / 2)) == 0) {
//myOutlierDetector.PrintOutliers();
}
}
//myOutlierDetector.PrintOutliers();
System.out.println("Total time = " + (System.currentTimeMillis() - tmStart) + " ms");
}
}
| Java |
/*
* TestSpeed.java
* Copyright (C) 2013 Aristotle University of Thessaloniki, Greece
* @author D. Georgiadis, A. Gounaris, A. Papadopoulos, K. Tsichlas, Y. Manolopoulos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.outliers;
import moa.clusterers.outliers.AbstractC.AbstractC;
import moa.clusterers.outliers.Angiulli.ExactSTORM;
import moa.clusterers.outliers.MCOD.MCOD;
import moa.clusterers.outliers.SimpleCOD.SimpleCOD;
import moa.streams.clustering.RandomRBFGeneratorEvents;
import weka.core.Instance;
public class TestSpeed {
public static void main(String[] args) throws Exception
{
int numInstances = 2000;
RandomRBFGeneratorEvents stream = new RandomRBFGeneratorEvents();
stream.prepareForUse();
SimpleCOD scod = new SimpleCOD();
MCOD mcod = new MCOD();
ExactSTORM angiulli = new ExactSTORM();
//DistanceOutliersAppr angiulli = new DistanceOutliersAppr();
AbstractC abstractC = new AbstractC();
angiulli.queryFreqOption.setValue(1);
scod.setModelContext(stream.getHeader());
scod.prepareForUse();
mcod.setModelContext(stream.getHeader());
mcod.prepareForUse();
angiulli.setModelContext(stream.getHeader());
angiulli.prepareForUse();
abstractC.setModelContext(stream.getHeader());
abstractC.prepareForUse();
Long tmStart = System.currentTimeMillis();
int numberSamples = 0;
while (stream.hasMoreInstances() && (numberSamples < numInstances)) {
Instance newInst = stream.nextInstance();
//scod.processNewInstanceImpl(newInst);
mcod.processNewInstanceImpl(newInst);
//angiulli.processNewInstanceImpl(newInst);
//abstractC.processNewInstanceImpl(newInst);
numberSamples++;
}
System.out.println("Total time = " + (System.currentTimeMillis() - tmStart) + " ms");
}
}
| Java |
/*
* KMeans.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers;
import java.util.ArrayList;
import java.util.List;
import moa.cluster.CFCluster;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
import moa.cluster.SphereCluster;
/**
* A kMeans implementation for microclusterings. For now it only uses the real centers of the
* groundtruthclustering for implementation. There should also be an option to use random
* centers.
* TODO: random centers
* TODO: Create a macro clustering interface to make different macro clustering algorithms available
* to micro clustering algorithms like clustream, denstream and clustree
*
*/
public class KMeans {
/**
* This kMeans implementation clusters a big number of microclusters
* into a smaller amount of macro clusters. To make it comparable to other
* algorithms it uses the real centers of the ground truth macro clustering
* to have the best possible initialization. The quality of resulting
* macro clustering yields an upper bound for kMeans on the underlying
* microclustering.
*
* @param centers of the ground truth clustering
* @param data list of microclusters
* @return
*/
public static Clustering kMeans(Cluster[] centers, List<? extends Cluster> data ) {
int k = centers.length;
int dimensions = centers[0].getCenter().length;
ArrayList<ArrayList<Cluster>> clustering =
new ArrayList<ArrayList<Cluster>>();
for ( int i = 0; i < k; i++ ) {
clustering.add( new ArrayList<Cluster>() );
}
int repetitions = 100;
while ( repetitions-- >= 0 ) {
// Assign points to clusters
for ( Cluster point : data ) {
double minDistance = distance( point.getCenter(), centers[0].getCenter() );
int closestCluster = 0;
for ( int i = 1; i < k; i++ ) {
double distance = distance( point.getCenter(), centers[i].getCenter() );
if ( distance < minDistance ) {
closestCluster = i;
minDistance = distance;
}
}
clustering.get( closestCluster ).add( point );
}
// Calculate new centers and clear clustering lists
SphereCluster[] newCenters = new SphereCluster[centers.length];
for ( int i = 0; i < k; i++ ) {
newCenters[i] = calculateCenter( clustering.get( i ), dimensions );
clustering.get( i ).clear();
}
centers = newCenters;
}
return new Clustering( centers );
}
private static double distance(double[] pointA, double [] pointB){
double distance = 0.0;
for (int i = 0; i < pointA.length; i++) {
double d = pointA[i] - pointB[i];
distance += d * d;
}
return Math.sqrt(distance);
}
private static SphereCluster calculateCenter( ArrayList<Cluster> cluster, int dimensions ) {
double[] res = new double[dimensions];
for ( int i = 0; i < res.length; i++ ) {
res[i] = 0.0;
}
if ( cluster.size() == 0 ) {
return new SphereCluster( res, 0.0 );
}
for ( Cluster point : cluster ) {
double [] center = point.getCenter();
for (int i = 0; i < res.length; i++) {
res[i] += center[i];
}
}
// Normalize
for ( int i = 0; i < res.length; i++ ) {
res[i] /= cluster.size();
}
// Calculate radius
double radius = 0.0;
for ( Cluster point : cluster ) {
double dist = distance( res, point.getCenter() );
if ( dist > radius ) {
radius = dist;
}
}
return new SphereCluster( res, radius );
}
public static Clustering gaussianMeans(Clustering gtClustering, Clustering clustering) {
ArrayList<CFCluster> microclusters = new ArrayList<CFCluster>();
for (int i = 0; i < clustering.size(); i++) {
if (clustering.get(i) instanceof CFCluster) {
microclusters.add((CFCluster)clustering.get(i));
}
else{
System.out.println("Unsupported Cluster Type:"+clustering.get(i).getClass()
+". Cluster needs to extend moa.cluster.CFCluster");
}
}
Cluster[] centers = new Cluster[gtClustering.size()];
for (int i = 0; i < centers.length; i++) {
centers[i] = gtClustering.get(i);
}
int k = centers.length;
if ( microclusters.size() < k ) {
return new Clustering( new Cluster[0]);
}
Clustering kMeansResult = kMeans( centers, microclusters );
k = kMeansResult.size();
CFCluster[] res = new CFCluster[ k ];
for ( CFCluster microcluster : microclusters) {
// Find closest kMeans cluster
double minDistance = Double.MAX_VALUE;
int closestCluster = 0;
for ( int i = 0; i < k; i++ ) {
double distance = distance( kMeansResult.get(i).getCenter(), microcluster.getCenter() );
if ( distance < minDistance ) {
closestCluster = i;
minDistance = distance;
}
}
// Add to cluster
if ( res[closestCluster] == null ) {
res[closestCluster] = (CFCluster)microcluster.copy();
} else {
res[closestCluster].add(microcluster);
}
}
// Clean up res
int count = 0;
for ( int i = 0; i < res.length; i++ ) {
if ( res[i] != null )
++count;
}
CFCluster[] cleaned = new CFCluster[count];
count = 0;
for ( int i = 0; i < res.length; i++ ) {
if ( res[i] != null )
cleaned[count++] = res[i];
}
return new Clustering( cleaned );
}
}
| Java |
/*
* MicroCluster.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Wels (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.denstream;
import moa.cluster.CFCluster;
import weka.core.Instance;
public class MicroCluster extends CFCluster {
private long lastEditT = -1;
private long creationTimestamp = -1;
private double lambda;
private Timestamp currentTimestamp;
public MicroCluster(double[] center, int dimensions, long creationTimestamp, double lambda, Timestamp currentTimestamp) {
super(center, dimensions);
this.creationTimestamp = creationTimestamp;
this.lastEditT = creationTimestamp;
this.lambda = lambda;
this.currentTimestamp = currentTimestamp;
}
public MicroCluster(Instance instance, int dimensions, long timestamp, double lambda, Timestamp currentTimestamp) {
this(instance.toDoubleArray(), dimensions, timestamp, lambda, currentTimestamp);
}
public void insert(Instance instance, long timestamp) {
N++;
super.setWeight(super.getWeight() + 1);
this.lastEditT = timestamp;
for (int i = 0; i < instance.numValues(); i++) {
LS[i] += instance.value(i);
SS[i] += instance.value(i) * instance.value(i);
}
}
public long getLastEditTimestamp() {
return lastEditT;
}
private double[] calcCF2(long dt) {
double[] cf2 = new double[SS.length];
for (int i = 0; i < SS.length; i++) {
cf2[i] = Math.pow(2, -lambda * dt) * SS[i];
}
return cf2;
}
private double[] calcCF1(long dt) {
double[] cf1 = new double[LS.length];
for (int i = 0; i < LS.length; i++) {
cf1[i] = Math.pow(2, -lambda * dt) * LS[i];
}
return cf1;
}
@Override
public double getWeight() {
return getWeight(currentTimestamp.getTimestamp());
}
private double getWeight(long timestamp) {
long dt = timestamp - lastEditT;
return (N * Math.pow(2, -lambda * dt));
}
public long getCreationTime() {
return creationTimestamp;
}
@Override
public double[] getCenter() {
return getCenter(currentTimestamp.getTimestamp());
}
private double[] getCenter(long timestamp) {
long dt = timestamp - lastEditT;
double w = getWeight(timestamp);
double[] res = new double[LS.length];
for (int i = 0; i < LS.length; i++) {
res[i] = LS[i];
res[i] *= Math.pow(2, -lambda * dt);
res[i] /= w;
}
return res;
}
@Override
public double getRadius() {
return getRadius(currentTimestamp.getTimestamp())*radiusFactor;
}
public double getRadius(long timestamp) {
long dt = timestamp - lastEditT;
double[] cf1 = calcCF1(dt);
double[] cf2 = calcCF2(dt);
double w = getWeight(timestamp);
double max = 0;
double sum = 0;
for (int i = 0; i < SS.length; i++) {
double x1 = cf2[i] / w;
double x2 = Math.pow(cf1[i] / w, 2);
//sum += Math.pow(x1 - x2,2);
sum += (x1 - x2);
if (Math.sqrt(x1 - x2) > max) {
max = Math.sqrt(x1 - x2);
}
}
return max;
}
@Override
public MicroCluster copy() {
MicroCluster copy = new MicroCluster(this.LS.clone(), this.LS.length, this.getCreationTime(), this.lambda, this.currentTimestamp);
copy.setWeight(this.N + 1);
copy.N = this.N;
copy.SS = this.SS.clone();
copy.LS = this.LS.clone();
copy.lastEditT = this.lastEditT;
return copy;
}
@Override
public double getInclusionProbability(Instance instance) {
if (getCenterDistance(instance) <= getRadius()) {
return 1.0;
}
return 0.0;
}
@Override
public CFCluster getCF(){
CFCluster cf = copy();
double w = getWeight();
cf.setN(w);
return cf;
}
}
| Java |
/**
* Subspace MOA [DenStream_DBSCAN.java]
*
* DenStream with DBSCAN as the macro-clusterer.
*
* @author Stephan Wels (stephan.wels@rwth-aachen.de)
* @editor Yunsu Kim
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.denstream;
import java.util.ArrayList;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
import moa.clusterers.AbstractClusterer;
import moa.clusterers.macro.dbscan.DBScan;
import moa.core.Measurement;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.DenseInstance;
import weka.core.Instance;
public class WithDBSCAN extends AbstractClusterer {
private static final long serialVersionUID = 1L;
public IntOption horizonOption = new IntOption("horizon", 'h',
"Range of the window.", 1000);
public FloatOption epsilonOption = new FloatOption("epsilon", 'e',
"Defines the epsilon neighbourhood", 0.02, 0, 1);
// public IntOption minPointsOption = new IntOption("minPoints", 'p',
// "Minimal number of points cluster has to contain.", 10);
public FloatOption betaOption = new FloatOption("beta", 'b', "", 0.2, 0,
1);
public FloatOption muOption = new FloatOption("mu", 'm', "", 1, 0,
Double.MAX_VALUE);
public IntOption initPointsOption = new IntOption("initPoints", 'i',
"Number of points to use for initialization.", 1000);
public FloatOption offlineOption = new FloatOption("offline", 'o',
"offline multiplier for epsilion.", 2, 2, 20);
public FloatOption lambdaOption = new FloatOption("lambda", 'l', "",
0.25,
0, 1);
public IntOption speedOption = new IntOption("processingSpeed", 's',
"Number of incoming points per time unit.", 100, 1, 1000);
private double weightThreshold = 0.01;
double lambda;
double epsilon;
int minPoints;
double mu;
double beta;
Clustering p_micro_cluster;
Clustering o_micro_cluster;
ArrayList<DenPoint> initBuffer;
boolean initialized;
private long timestamp = 0;
Timestamp currentTimestamp;
long tp;
/* #point variables */
protected int numInitPoints;
protected int numProcessedPerUnit;
protected int processingSpeed;
// TODO Some variables to prevent duplicated processes
private class DenPoint extends DenseInstance {
private static final long serialVersionUID = 1L;
protected boolean covered;
public DenPoint(Instance nextInstance, Long timestamp) {
super(nextInstance);
this.setDataset(nextInstance.dataset());
}
}
@Override
public void resetLearningImpl() {
// init DenStream
currentTimestamp = new Timestamp();
// lambda = -Math.log(weightThreshold) / Math.log(2)
// / (double) horizonOption.getValue();
lambda = lambdaOption.getValue();
epsilon = epsilonOption.getValue();
minPoints = (int) muOption.getValue();// minPointsOption.getValue();
mu = (int) muOption.getValue();
beta = betaOption.getValue();
initialized = false;
p_micro_cluster = new Clustering();
o_micro_cluster = new Clustering();
initBuffer = new ArrayList<DenPoint>();
tp = Math.round(1 / lambda * Math.log((beta * mu) / (beta * mu - 1))) + 1;
numProcessedPerUnit = 0;
processingSpeed = speedOption.getValue();
}
public void initialDBScan() {
for (int p = 0; p < initBuffer.size(); p++) {
DenPoint point = initBuffer.get(p);
if (!point.covered) {
point.covered = true;
ArrayList<Integer> neighbourhood = getNeighbourhoodIDs(point,
initBuffer, epsilon);
if (neighbourhood.size() > minPoints) {
MicroCluster mc = new MicroCluster(point,
point.numAttributes(), timestamp, lambda,
currentTimestamp);
expandCluster(mc, initBuffer, neighbourhood);
p_micro_cluster.add(mc);
} else {
point.covered = false;
}
}
}
}
@Override
public void trainOnInstanceImpl(Instance inst) {
DenPoint point = new DenPoint(inst, timestamp);
numProcessedPerUnit++;
/* Controlling the stream speed */
if (numProcessedPerUnit % processingSpeed == 0) {
timestamp++;
currentTimestamp.setTimestamp(timestamp);
}
// ////////////////
// Initialization//
// ////////////////
if (!initialized) {
initBuffer.add(point);
if (initBuffer.size() >= initPointsOption.getValue()) {
initialDBScan();
initialized = true;
}
} else {
// ////////////
// Merging(p)//
// ////////////
boolean merged = false;
if (p_micro_cluster.getClustering().size() != 0) {
MicroCluster x = nearestCluster(point, p_micro_cluster);
MicroCluster xCopy = x.copy();
xCopy.insert(point, timestamp);
if (xCopy.getRadius(timestamp) <= epsilon) {
x.insert(point, timestamp);
merged = true;
}
}
if (!merged && (o_micro_cluster.getClustering().size() != 0)) {
MicroCluster x = nearestCluster(point, o_micro_cluster);
MicroCluster xCopy = x.copy();
xCopy.insert(point, timestamp);
if (xCopy.getRadius(timestamp) <= epsilon) {
x.insert(point, timestamp);
merged = true;
if (x.getWeight() > beta * mu) {
o_micro_cluster.getClustering().remove(x);
p_micro_cluster.getClustering().add(x);
}
}
}
if (!merged) {
o_micro_cluster.getClustering().add(
new MicroCluster(point.toDoubleArray(), point
.toDoubleArray().length, timestamp, lambda,
currentTimestamp));
}
// //////////////////////////
// Periodic cluster removal//
// //////////////////////////
if (timestamp % tp == 0) {
ArrayList<MicroCluster> removalList = new ArrayList<MicroCluster>();
for (Cluster c : p_micro_cluster.getClustering()) {
if (((MicroCluster) c).getWeight() < beta * mu) {
removalList.add((MicroCluster) c);
}
}
for (Cluster c : removalList) {
p_micro_cluster.getClustering().remove(c);
}
for (Cluster c : o_micro_cluster.getClustering()) {
long t0 = ((MicroCluster) c).getCreationTime();
double xsi1 = Math
.pow(2, (-lambda * (timestamp - t0 + tp))) - 1;
double xsi2 = Math.pow(2, -lambda * tp) - 1;
double xsi = xsi1 / xsi2;
if (((MicroCluster) c).getWeight() < xsi) {
removalList.add((MicroCluster) c);
}
}
for (Cluster c : removalList) {
o_micro_cluster.getClustering().remove(c);
}
}
}
}
private void expandCluster(MicroCluster mc, ArrayList<DenPoint> points,
ArrayList<Integer> neighbourhood) {
for (int p : neighbourhood) {
DenPoint npoint = points.get(p);
if (!npoint.covered) {
npoint.covered = true;
mc.insert(npoint, timestamp);
ArrayList<Integer> neighbourhood2 = getNeighbourhoodIDs(npoint,
initBuffer, epsilon);
if (neighbourhood.size() > minPoints) {
expandCluster(mc, points, neighbourhood2);
}
}
}
}
private ArrayList<Integer> getNeighbourhoodIDs(DenPoint point,
ArrayList<DenPoint> points, double eps) {
ArrayList<Integer> neighbourIDs = new ArrayList<Integer>();
for (int p = 0; p < points.size(); p++) {
DenPoint npoint = points.get(p);
if (!npoint.covered) {
double dist = distance(point.toDoubleArray(), points.get(p)
.toDoubleArray());
if (dist < eps) {
neighbourIDs.add(p);
}
}
}
return neighbourIDs;
}
private MicroCluster nearestCluster(DenPoint p, Clustering cl) {
MicroCluster min = null;
double minDist = 0;
for (int c = 0; c < cl.size(); c++) {
MicroCluster x = (MicroCluster) cl.get(c);
if (min == null) {
min = x;
}
double dist = distance(p.toDoubleArray(), x.getCenter());
dist -= x.getRadius(timestamp);
if (dist < minDist) {
minDist = dist;
min = x;
}
}
return min;
}
private double distance(double[] pointA, double[] pointB) {
double distance = 0.0;
for (int i = 0; i < pointA.length; i++) {
double d = pointA[i] - pointB[i];
distance += d * d;
}
return Math.sqrt(distance);
}
public Clustering getClusteringResult() {
DBScan dbscan = new DBScan(p_micro_cluster,offlineOption.getValue() * epsilon, minPoints);
return dbscan.getClustering(p_micro_cluster);
}
@Override
public boolean implementsMicroClusterer() {
return true;
}
@Override
public Clustering getMicroClusteringResult() {
return p_micro_cluster;
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
}
public boolean isRandomizable() {
return true;
}
public double[] getVotesForInstance(Instance inst) {
return null;
}
public String getParameterString() {
StringBuffer sb = new StringBuffer();
sb.append(this.getClass().getSimpleName() + " ");
sb.append("-" + horizonOption.getCLIChar() + " ");
sb.append(horizonOption.getValueAsCLIString() + " ");
sb.append("-" + epsilonOption.getCLIChar() + " ");
sb.append(epsilonOption.getValueAsCLIString() + " ");
sb.append("-" + betaOption.getCLIChar() + " ");
sb.append(betaOption.getValueAsCLIString() + " ");
sb.append("-" + muOption.getCLIChar() + " ");
sb.append(muOption.getValueAsCLIString() + " ");
sb.append("-" + lambdaOption.getCLIChar() + " ");
sb.append(lambdaOption.getValueAsCLIString() + " ");
sb.append("-" + initPointsOption.getCLIChar() + " ");
// NO " " at the end! results in errors on windows systems
sb.append(initPointsOption.getValueAsCLIString());
return sb.toString();
}
}
| Java |
/*
* Timestamp.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Wels (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.denstream;
import moa.AbstractMOAObject;
public class Timestamp extends AbstractMOAObject{
private long timestamp;
public Timestamp(long timestamp) {
this.timestamp = timestamp;
}
public Timestamp() {
timestamp = 0;
}
public long getTimestamp() {
return timestamp;
}
public void increase() {
timestamp++;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
public void getDescription(StringBuilder sb, int i) {
}
}
| Java |
/**
* [CluStream_kMeans.java]
* CluStream with k-means as macroclusterer
*
* Appeared in seminar paper "Understanding of Internal Clustering Validation Measure in Streaming Environment" (Yunsu Kim)
* for the course "Seminar: Data Mining and Multimedia Retrival" in RWTH Aachen University, WS 12/13
*
* @author Yunsu Kim
* based on the code of Timm Jansen (moa@cs.rwth-aachen.de)
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import moa.cluster.CFCluster;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
import moa.cluster.SphereCluster;
import moa.clusterers.AbstractClusterer;
import moa.core.Measurement;
import moa.options.IntOption;
import weka.core.DenseInstance;
import weka.core.Instance;
public class WithKmeans extends AbstractClusterer {
private static final long serialVersionUID = 1L;
public IntOption timeWindowOption = new IntOption("horizon",
'h', "Rang of the window.", 1000);
public IntOption maxNumKernelsOption = new IntOption(
"maxNumKernels", 'm',
"Maximum number of micro kernels to use.", 100);
public IntOption kernelRadiFactorOption = new IntOption(
"kernelRadiFactor", 't',
"Multiplier for the kernel radius", 2);
public IntOption kOption = new IntOption(
"k", 'k',
"k of macro k-means (number of clusters)", 5);
private int timeWindow;
private long timestamp = -1;
private ClustreamKernel[] kernels;
private boolean initialized;
private List<ClustreamKernel> buffer; // Buffer for initialization with kNN
private int bufferSize;
private double t;
private int m;
public WithKmeans() {
}
@Override
public void resetLearningImpl() {
this.kernels = new ClustreamKernel[maxNumKernelsOption.getValue()];
this.timeWindow = timeWindowOption.getValue();
this.initialized = false;
this.buffer = new LinkedList<ClustreamKernel>();
this.bufferSize = maxNumKernelsOption.getValue();
t = kernelRadiFactorOption.getValue();
m = maxNumKernelsOption.getValue();
}
@Override
public void trainOnInstanceImpl(Instance instance) {
int dim = instance.numValues();
timestamp++;
// 0. Initialize
if (!initialized) {
if (buffer.size() < bufferSize) {
buffer.add(new ClustreamKernel(instance, dim, timestamp, t, m));
return;
} else {
for (int i = 0; i < buffer.size(); i++) {
kernels[i] = new ClustreamKernel(new DenseInstance(1.0, buffer.get(i).getCenter()), dim, timestamp, t, m);
}
buffer.clear();
initialized = true;
return;
}
}
// 1. Determine closest kernel
ClustreamKernel closestKernel = null;
double minDistance = Double.MAX_VALUE;
for ( int i = 0; i < kernels.length; i++ ) {
//System.out.println(i+" "+kernels[i].getWeight()+" "+kernels[i].getDeviation());
double distance = distance(instance.toDoubleArray(), kernels[i].getCenter());
if (distance < minDistance) {
closestKernel = kernels[i];
minDistance = distance;
}
}
// 2. Check whether instance fits into closestKernel
double radius = 0.0;
if ( closestKernel.getWeight() == 1 ) {
// Special case: estimate radius by determining the distance to the
// next closest cluster
radius = Double.MAX_VALUE;
double[] center = closestKernel.getCenter();
for ( int i = 0; i < kernels.length; i++ ) {
if ( kernels[i] == closestKernel ) {
continue;
}
double distance = distance(kernels[i].getCenter(), center );
radius = Math.min( distance, radius );
}
} else {
radius = closestKernel.getRadius();
}
if ( minDistance < radius ) {
// Date fits, put into kernel and be happy
closestKernel.insert( instance, timestamp );
return;
}
// 3. Date does not fit, we need to free
// some space to insert a new kernel
long threshold = timestamp - timeWindow; // Kernels before this can be forgotten
// 3.1 Try to forget old kernels
for ( int i = 0; i < kernels.length; i++ ) {
if ( kernels[i].getRelevanceStamp() < threshold ) {
kernels[i] = new ClustreamKernel( instance, dim, timestamp, t, m );
return;
}
}
// 3.2 Merge closest two kernels
int closestA = 0;
int closestB = 0;
minDistance = Double.MAX_VALUE;
for ( int i = 0; i < kernels.length; i++ ) {
double[] centerA = kernels[i].getCenter();
for ( int j = i + 1; j < kernels.length; j++ ) {
double dist = distance( centerA, kernels[j].getCenter() );
if ( dist < minDistance ) {
minDistance = dist;
closestA = i;
closestB = j;
}
}
}
assert (closestA != closestB);
kernels[closestA].add( kernels[closestB] );
kernels[closestB] = new ClustreamKernel( instance, dim, timestamp, t, m );
}
@Override
public Clustering getMicroClusteringResult() {
if (!initialized) {
return new Clustering(new Cluster[0]);
}
ClustreamKernel[] result = new ClustreamKernel[kernels.length];
for (int i = 0; i < result.length; i++) {
result[i] = new ClustreamKernel(kernels[i], t, m);
}
return new Clustering(result);
}
@Override
public Clustering getClusteringResult() {
if (!initialized) {
return new Clustering(new Cluster[0]);
}
return kMeans_rand(kOption.getValue(), getMicroClusteringResult());
}
public Clustering getClusteringResult(Clustering gtClustering) {
return kMeans_gta(kOption.getValue(), getMicroClusteringResult(), gtClustering);
}
public String getName() {
return "CluStreamWithKMeans " + timeWindow;
}
/**
* Distance between two vectors.
*
* @param pointA
* @param pointB
* @return dist
*/
private static double distance(double[] pointA, double [] pointB) {
double distance = 0.0;
for (int i = 0; i < pointA.length; i++) {
double d = pointA[i] - pointB[i];
distance += d * d;
}
return Math.sqrt(distance);
}
/**
* k-means of (micro)clusters, with ground-truth-aided initialization.
* (to produce best results)
*
* @param k
* @param data
* @return (macro)clustering - CFClusters
*/
public static Clustering kMeans_gta(int k, Clustering clustering, Clustering gtClustering) {
ArrayList<CFCluster> microclusters = new ArrayList<CFCluster>();
for (int i = 0; i < clustering.size(); i++) {
if (clustering.get(i) instanceof CFCluster) {
microclusters.add((CFCluster)clustering.get(i));
} else {
System.out.println("Unsupported Cluster Type:" + clustering.get(i).getClass() + ". Cluster needs to extend moa.cluster.CFCluster");
}
}
int n = microclusters.size();
assert (k <= n);
/* k-means */
Random random = new Random(0);
Cluster[] centers = new Cluster[k];
int K = gtClustering.size();
for (int i = 0; i < k; i++) {
if (i < K) { // GT-aided
centers[i] = new SphereCluster(gtClustering.get(i).getCenter(), 0);
} else { // Randomized
int rid = random.nextInt(n);
centers[i] = new SphereCluster(microclusters.get(rid).getCenter(), 0);
}
}
return cleanUpKMeans(kMeans(k, centers, microclusters), microclusters);
}
/**
* k-means of (micro)clusters, with randomized initialization.
*
* @param k
* @param data
* @return (macro)clustering - CFClusters
*/
public static Clustering kMeans_rand(int k, Clustering clustering) {
ArrayList<CFCluster> microclusters = new ArrayList<CFCluster>();
for (int i = 0; i < clustering.size(); i++) {
if (clustering.get(i) instanceof CFCluster) {
microclusters.add((CFCluster)clustering.get(i));
} else {
System.out.println("Unsupported Cluster Type:" + clustering.get(i).getClass() + ". Cluster needs to extend moa.cluster.CFCluster");
}
}
int n = microclusters.size();
assert (k <= n);
/* k-means */
Random random = new Random(0);
Cluster[] centers = new Cluster[k];
for (int i = 0; i < k; i++) {
int rid = random.nextInt(n);
centers[i] = new SphereCluster(microclusters.get(rid).getCenter(), 0);
}
return cleanUpKMeans(kMeans(k, centers, microclusters), microclusters);
}
/**
* (The Actual Algorithm) k-means of (micro)clusters, with specified initialization points.
*
* @param k
* @param centers - initial centers
* @param data
* @return (macro)clustering - SphereClusters
*/
protected static Clustering kMeans(int k, Cluster[] centers, List<? extends Cluster> data) {
assert (centers.length == k);
assert (k > 0);
int dimensions = centers[0].getCenter().length;
ArrayList<ArrayList<Cluster>> clustering = new ArrayList<ArrayList<Cluster>>();
for (int i = 0; i < k; i++) {
clustering.add(new ArrayList<Cluster>());
}
while (true) {
// Assign points to clusters
for (Cluster point : data) {
double minDistance = distance(point.getCenter(), centers[0].getCenter());
int closestCluster = 0;
for (int i = 1; i < k; i++) {
double distance = distance(point.getCenter(), centers[i].getCenter());
if (distance < minDistance) {
closestCluster = i;
minDistance = distance;
}
}
clustering.get(closestCluster).add(point);
}
// Calculate new centers and clear clustering lists
SphereCluster[] newCenters = new SphereCluster[centers.length];
for (int i = 0; i < k; i++) {
newCenters[i] = calculateCenter(clustering.get(i), dimensions);
clustering.get(i).clear();
}
// Convergence check
boolean converged = true;
for (int i = 0; i < k; i++) {
if (!Arrays.equals(centers[i].getCenter(), newCenters[i].getCenter())) {
converged = false;
break;
}
}
if (converged) {
break;
} else {
centers = newCenters;
}
}
return new Clustering(centers);
}
/**
* Rearrange the k-means result into a set of CFClusters, cleaning up the redundancies.
*
* @param kMeansResult
* @param microclusters
* @return
*/
protected static Clustering cleanUpKMeans(Clustering kMeansResult, ArrayList<CFCluster> microclusters) {
/* Convert k-means result to CFClusters */
int k = kMeansResult.size();
CFCluster[] converted = new CFCluster[k];
for (CFCluster mc : microclusters) {
// Find closest kMeans cluster
double minDistance = Double.MAX_VALUE;
int closestCluster = 0;
for (int i = 0; i < k; i++) {
double distance = distance(kMeansResult.get(i).getCenter(), mc.getCenter());
if (distance < minDistance) {
closestCluster = i;
minDistance = distance;
}
}
// Add to cluster
if ( converted[closestCluster] == null ) {
converted[closestCluster] = (CFCluster)mc.copy();
} else {
converted[closestCluster].add(mc);
}
}
// Clean up
int count = 0;
for (int i = 0; i < converted.length; i++) {
if (converted[i] != null)
count++;
}
CFCluster[] cleaned = new CFCluster[count];
count = 0;
for (int i = 0; i < converted.length; i++) {
if (converted[i] != null)
cleaned[count++] = converted[i];
}
return new Clustering(cleaned);
}
/**
* k-means helper: Calculate a wrapping cluster of assigned points[microclusters].
*
* @param assigned
* @param dimensions
* @return SphereCluster (with center and radius)
*/
private static SphereCluster calculateCenter(ArrayList<Cluster> assigned, int dimensions) {
double[] result = new double[dimensions];
for (int i = 0; i < result.length; i++) {
result[i] = 0.0;
}
if (assigned.size() == 0) {
return new SphereCluster(result, 0.0);
}
for (Cluster point : assigned) {
double[] center = point.getCenter();
for (int i = 0; i < result.length; i++) {
result[i] += center[i];
}
}
// Normalize
for (int i = 0; i < result.length; i++) {
result[i] /= assigned.size();
}
// Calculate radius: biggest wrapping distance from center
double radius = 0.0;
for (Cluster point : assigned) {
double dist = distance(result, point.getCenter());
if (dist > radius) {
radius = dist;
}
}
SphereCluster sc = new SphereCluster(result, radius);
sc.setWeight(assigned.size());
return sc;
}
/** Miscellaneous **/
@Override
public boolean implementsMicroClusterer() {
return true;
}
public boolean isRandomizable() {
return false;
}
public double[] getVotesForInstance(Instance inst) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| Java |
/*
* Clustream.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustream;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
import moa.cluster.SphereCluster;
import moa.clusterers.AbstractClusterer;
import moa.core.Measurement;
import moa.options.IntOption;
import weka.core.DenseInstance;
import weka.core.Instance;
/** Citation: CluStream: Charu C. Aggarwal, Jiawei Han, Jianyong Wang, Philip S. Yu:
* A Framework for Clustering Evolving Data Streams. VLDB 2003: 81-92
*/
public class Clustream extends AbstractClusterer{
private static final long serialVersionUID = 1L;
public IntOption timeWindowOption = new IntOption("horizon",
'h', "Rang of the window.", 1000);
public IntOption maxNumKernelsOption = new IntOption(
"maxNumKernels", 'k',
"Maximum number of micro kernels to use.", 100);
public IntOption kernelRadiFactorOption = new IntOption(
"kernelRadiFactor", 't',
"Multiplier for the kernel radius", 2);
private int timeWindow;
private long timestamp = -1;
private ClustreamKernel[] kernels;
private boolean initialized;
private List<ClustreamKernel> buffer; // Buffer for initialization with kNN
private int bufferSize;
private double t;
private int m;
public Clustream() {
}
@Override
public void resetLearningImpl() {
this.kernels = new ClustreamKernel[maxNumKernelsOption.getValue()];
this.timeWindow = timeWindowOption.getValue();
this.initialized = false;
this.buffer = new LinkedList<ClustreamKernel>();
this.bufferSize = maxNumKernelsOption.getValue();
t = kernelRadiFactorOption.getValue();
m = maxNumKernelsOption.getValue();
}
@Override
public void trainOnInstanceImpl(Instance instance) {
int dim = instance.numValues();
timestamp++;
// 0. Initialize
if ( !initialized ) {
if ( buffer.size() < bufferSize ) {
buffer.add( new ClustreamKernel(instance,dim, timestamp, t, m) );
return;
}
int k = kernels.length;
//System.err.println("k="+k+" bufferSize="+bufferSize);
assert (k <= bufferSize);
ClustreamKernel[] centers = new ClustreamKernel[k];
for ( int i = 0; i < k; i++ ) {
centers[i] = buffer.get( i ); // TODO: make random!
}
Clustering kmeans_clustering = kMeans(k, centers, buffer);
// Clustering kmeans_clustering = kMeans(k, buffer);
for ( int i = 0; i < kmeans_clustering.size(); i++ ) {
kernels[i] = new ClustreamKernel( new DenseInstance(1.0,centers[i].getCenter()), dim, timestamp, t, m );
}
buffer.clear();
initialized = true;
return;
}
// 1. Determine closest kernel
ClustreamKernel closestKernel = null;
double minDistance = Double.MAX_VALUE;
for ( int i = 0; i < kernels.length; i++ ) {
//System.out.println(i+" "+kernels[i].getWeight()+" "+kernels[i].getDeviation());
double distance = distance(instance.toDoubleArray(), kernels[i].getCenter() );
if ( distance < minDistance ) {
closestKernel = kernels[i];
minDistance = distance;
}
}
// 2. Check whether instance fits into closestKernel
double radius = 0.0;
if ( closestKernel.getWeight() == 1 ) {
// Special case: estimate radius by determining the distance to the
// next closest cluster
radius = Double.MAX_VALUE;
double[] center = closestKernel.getCenter();
for ( int i = 0; i < kernels.length; i++ ) {
if ( kernels[i] == closestKernel ) {
continue;
}
double distance = distance(kernels[i].getCenter(), center );
radius = Math.min( distance, radius );
}
} else {
radius = closestKernel.getRadius();
}
if ( minDistance < radius ) {
// Date fits, put into kernel and be happy
closestKernel.insert( instance, timestamp );
return;
}
// 3. Date does not fit, we need to free
// some space to insert a new kernel
long threshold = timestamp - timeWindow; // Kernels before this can be forgotten
// 3.1 Try to forget old kernels
for ( int i = 0; i < kernels.length; i++ ) {
if ( kernels[i].getRelevanceStamp() < threshold ) {
kernels[i] = new ClustreamKernel( instance, dim, timestamp, t, m );
return;
}
}
// 3.2 Merge closest two kernels
int closestA = 0;
int closestB = 0;
minDistance = Double.MAX_VALUE;
for ( int i = 0; i < kernels.length; i++ ) {
double[] centerA = kernels[i].getCenter();
for ( int j = i + 1; j < kernels.length; j++ ) {
double dist = distance( centerA, kernels[j].getCenter() );
if ( dist < minDistance ) {
minDistance = dist;
closestA = i;
closestB = j;
}
}
}
assert (closestA != closestB);
kernels[closestA].add( kernels[closestB] );
kernels[closestB] = new ClustreamKernel( instance, dim, timestamp, t, m );
}
@Override
public Clustering getMicroClusteringResult() {
if ( !initialized ) {
return new Clustering( new Cluster[0] );
}
ClustreamKernel[] res = new ClustreamKernel[kernels.length];
for ( int i = 0; i < res.length; i++ ) {
res[i] = new ClustreamKernel( kernels[i], t, m );
}
return new Clustering( res );
}
@Override
public boolean implementsMicroClusterer() {
return true;
}
@Override
public Clustering getClusteringResult() {
return null;
}
public String getName() {
return "Clustream " + timeWindow;
}
private static double distance(double[] pointA, double [] pointB){
double distance = 0.0;
for (int i = 0; i < pointA.length; i++) {
double d = pointA[i] - pointB[i];
distance += d * d;
}
return Math.sqrt(distance);
}
//wrapper... we need to rewrite kmeans to points, not clusters, doesnt make sense anymore
// public static Clustering kMeans( int k, ArrayList<Instance> points, int dim ) {
// ArrayList<ClustreamKernel> cl = new ArrayList<ClustreamKernel>();
// for(Instance inst : points){
// cl.add(new ClustreamKernel(inst, dim , 0, 0, 0));
// }
// Clustering clustering = kMeans(k, cl);
// return clustering;
// }
public static Clustering kMeans( int k, List<? extends Cluster> data ) {
Random random = new Random(0);
Cluster[] centers = new Cluster[k];
for (int i = 0; i < centers.length; i++) {
int rid = random.nextInt(k);
centers[i] = new SphereCluster(data.get(rid).getCenter(),0);
}
Clustering clustering = kMeans(k, centers, data);
return clustering;
}
public static Clustering kMeans( int k, Cluster[] centers, List<? extends Cluster> data ) {
assert (centers.length == k);
assert (k > 0);
int dimensions = centers[0].getCenter().length;
ArrayList<ArrayList<Cluster>> clustering = new ArrayList<ArrayList<Cluster>>();
for ( int i = 0; i < k; i++ ) {
clustering.add( new ArrayList<Cluster>() );
}
int repetitions = 100;
while ( repetitions-- >= 0 ) {
// Assign points to clusters
for ( Cluster point : data ) {
double minDistance = distance( point.getCenter(), centers[0].getCenter() );
int closestCluster = 0;
for ( int i = 1; i < k; i++ ) {
double distance = distance( point.getCenter(), centers[i].getCenter() );
if ( distance < minDistance ) {
closestCluster = i;
minDistance = distance;
}
}
clustering.get( closestCluster ).add( point );
}
// Calculate new centers and clear clustering lists
SphereCluster[] newCenters = new SphereCluster[centers.length];
for ( int i = 0; i < k; i++ ) {
newCenters[i] = calculateCenter( clustering.get( i ), dimensions );
clustering.get( i ).clear();
}
centers = newCenters;
}
return new Clustering( centers );
}
private static SphereCluster calculateCenter( ArrayList<Cluster> cluster, int dimensions ) {
double[] res = new double[dimensions];
for ( int i = 0; i < res.length; i++ ) {
res[i] = 0.0;
}
if ( cluster.size() == 0 ) {
return new SphereCluster( res, 0.0 );
}
for ( Cluster point : cluster ) {
double [] center = point.getCenter();
for (int i = 0; i < res.length; i++) {
res[i] += center[i];
}
}
// Normalize
for ( int i = 0; i < res.length; i++ ) {
res[i] /= cluster.size();
}
// Calculate radius
double radius = 0.0;
for ( Cluster point : cluster ) {
double dist = distance( res, point.getCenter() );
if ( dist > radius ) {
radius = dist;
}
}
SphereCluster sc = new SphereCluster( res, radius );
sc.setWeight(cluster.size());
return sc;
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
public boolean isRandomizable() {
return false;
}
public double[] getVotesForInstance(Instance inst) {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| Java |
/*
* ClustreamKernel.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.clustream;
import java.util.ArrayList;
import java.util.Random;
import moa.cluster.CFCluster;
import weka.core.Instance;
public class ClustreamKernel extends CFCluster {
private static final long serialVersionUID = 1L;
private final static double EPSILON = 0.00005;
public static final double MIN_VARIANCE = 1e-50;
protected double LST;
protected double SST;
int m;
double t;
public ClustreamKernel( Instance instance, int dimensions, long timestamp , double t, int m) {
super(instance, dimensions);
this.t = t;
this.m = m;
this.LST = timestamp;
this.SST = timestamp*timestamp;
}
public ClustreamKernel( ClustreamKernel cluster, double t, int m ) {
super(cluster);
this.t = t;
this.m = m;
this.LST = cluster.LST;
this.SST = cluster.SST;
}
public void insert( Instance instance, long timestamp ) {
N++;
LST += timestamp;
SST += timestamp*timestamp;
for ( int i = 0; i < instance.numValues(); i++ ) {
LS[i] += instance.value(i);
SS[i] += instance.value(i)*instance.value(i);
}
}
@Override
public void add( CFCluster other2 ) {
ClustreamKernel other = (ClustreamKernel) other2;
assert( other.LS.length == this.LS.length );
this.N += other.N;
this.LST += other.LST;
this.SST += other.SST;
for ( int i = 0; i < LS.length; i++ ) {
this.LS[i] += other.LS[i];
this.SS[i] += other.SS[i];
}
}
public double getRelevanceStamp() {
if ( N < 2*m )
return getMuTime();
return getMuTime() + getSigmaTime() * getQuantile( ((double)m)/(2*N) );
}
private double getMuTime() {
return LST / N;
}
private double getSigmaTime() {
return Math.sqrt(SST/N - (LST/N)*(LST/N));
}
private double getQuantile( double z ) {
assert( z >= 0 && z <= 1 );
return Math.sqrt( 2 ) * inverseError( 2*z - 1 );
}
@Override
public double getRadius() {
//trivial cluster
if(N == 1) return 0;
if(t==1)
t=1;
return getDeviation()*radiusFactor;
}
@Override
public CFCluster getCF(){
return this;
}
private double getDeviation(){
double[] variance = getVarianceVector();
double sumOfDeviation = 0.0;
for (int i = 0; i < variance.length; i++) {
double d = Math.sqrt(variance[i]);
sumOfDeviation += d;
}
return sumOfDeviation / variance.length;
}
/**
* @return this kernels' center
*/
@Override
public double[] getCenter() {
assert (!this.isEmpty());
double res[] = new double[this.LS.length];
for (int i = 0; i < res.length; i++) {
res[i] = this.LS[i] / N;
}
return res;
}
/**
* See interface <code>Cluster</code>
* @param point
* @return
*/
@Override
public double getInclusionProbability(Instance instance) {
//trivial cluster
if(N == 1){
double distance = 0.0;
for (int i = 0; i < LS.length; i++) {
double d = LS[i] - instance.value(i);
distance += d * d;
}
distance = Math.sqrt(distance);
if( distance < EPSILON )
return 1.0;
return 0.0;
}
else{
double dist = calcNormalizedDistance(instance.toDoubleArray());
if(dist <= getRadius()){
return 1;
}
else{
return 0;
}
// double res = AuxiliaryFunctions.distanceProbabilty(dist, LS.length);
// return res;
}
}
private double[] getVarianceVector() {
double[] res = new double[this.LS.length];
for (int i = 0; i < this.LS.length; i++) {
double ls = this.LS[i];
double ss = this.SS[i];
double lsDivN = ls / this.getWeight();
double lsDivNSquared = lsDivN * lsDivN;
double ssDivN = ss / this.getWeight();
res[i] = ssDivN - lsDivNSquared;
// Due to numerical errors, small negative values can occur.
// We correct this by settings them to almost zero.
if (res[i] <= 0.0) {
if (res[i] > -EPSILON) {
res[i] = MIN_VARIANCE;
}
}
else{
}
}
return res;
}
/**
* Check if this cluster is empty or not.
* @return <code>true</code> if the cluster has no data points,
* <code>false</code> otherwise.
*/
public boolean isEmpty() {
return this.N == 0;
}
/**
* Calculate the normalized euclidean distance (Mahalanobis distance for
* distribution w/o covariances) to a point.
* @param other The point to which the distance is calculated.
* @return The normalized distance to the cluster center.
*
* TODO: check whether WEIGHTING is correctly applied to variances
*/
//???????
private double calcNormalizedDistance(double[] point) {
double[] variance = getVarianceVector();
double[] center = getCenter();
double res = 0.0;
for (int i = 0; i < center.length; i++) {
double diff = center[i] - point[i];
res += (diff * diff);// variance[i];
}
return Math.sqrt(res);
}
/**
* Approximates the inverse error function. Clustream needs this.
* @param z
*/
public static double inverseError(double x) {
double z = Math.sqrt(Math.PI) * x;
double res = (z) / 2;
double z2 = z * z;
double zProd = z * z2; // z^3
res += (1.0 / 24) * zProd;
zProd *= z2; // z^5
res += (7.0 / 960) * zProd;
zProd *= z2; // z^7
res += (127 * zProd) / 80640;
zProd *= z2; // z^9
res += (4369 * zProd) / 11612160;
zProd *= z2; // z^11
res += (34807 * zProd) / 364953600;
zProd *= z2; // z^13
res += (20036983 * zProd) / 797058662400d;
return res;
}
@Override
protected void getClusterSpecificInfo(ArrayList<String> infoTitle, ArrayList<String> infoValue) {
super.getClusterSpecificInfo(infoTitle, infoValue);
infoTitle.add("Deviation");
double[] variance = getVarianceVector();
double sumOfDeviation = 0.0;
for (int i = 0; i < variance.length; i++) {
double d = Math.sqrt(variance[i]);
sumOfDeviation += d;
}
sumOfDeviation/= variance.length;
infoValue.add(Double.toString(sumOfDeviation));
}
}
| Java |
/*
* AbstractClusterer.java
* Copyright (C) 2009 University of Waikato, Hamilton, New Zealand
* @author Albert Bifet (abifet@cs.waikato.ac.nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import moa.cluster.Clustering;
import moa.core.InstancesHeader;
import moa.core.Measurement;
import moa.core.ObjectRepository;
import moa.core.StringUtils;
import moa.gui.AWTRenderer;
import moa.options.AbstractOptionHandler;
import moa.options.FlagOption;
import moa.options.IntOption;
import moa.tasks.TaskMonitor;
import weka.core.Instance;
import weka.core.Instances;
public abstract class AbstractClusterer extends AbstractOptionHandler
implements Clusterer {
@Override
public String getPurposeString() {
return "MOA Clusterer: " + getClass().getCanonicalName();
}
protected InstancesHeader modelContext;
protected double trainingWeightSeenByModel = 0.0;
protected int randomSeed = 1;
protected IntOption randomSeedOption;
public FlagOption evaluateMicroClusteringOption;
protected Random clustererRandom;
protected Clustering clustering;
public AbstractClusterer() {
if (isRandomizable()) {
this.randomSeedOption = new IntOption("randomSeed", 'r',
"Seed for random behaviour of the Clusterer.", 1);
}
if( implementsMicroClusterer()){
this.evaluateMicroClusteringOption =
new FlagOption("evaluateMicroClustering", 'M',
"Evaluate the underlying microclustering instead of the macro clustering");
}
}
@Override
public void prepareForUseImpl(TaskMonitor monitor,
ObjectRepository repository) {
if (this.randomSeedOption != null) {
this.randomSeed = this.randomSeedOption.getValue();
}
if (!trainingHasStarted()) {
resetLearning();
}
clustering = new Clustering();
}
public void setModelContext(InstancesHeader ih) {
if ((ih != null) && (ih.classIndex() < 0)) {
throw new IllegalArgumentException(
"Context for a Clusterer must include a class to learn");
}
if (trainingHasStarted()
&& (this.modelContext != null)
&& ((ih == null) || !contextIsCompatible(this.modelContext, ih))) {
throw new IllegalArgumentException(
"New context is not compatible with existing model");
}
this.modelContext = ih;
}
public InstancesHeader getModelContext() {
return this.modelContext;
}
public void setRandomSeed(int s) {
this.randomSeed = s;
if (this.randomSeedOption != null) {
// keep option consistent
this.randomSeedOption.setValue(s);
}
}
public boolean trainingHasStarted() {
return this.trainingWeightSeenByModel > 0.0;
}
public double trainingWeightSeenByModel() {
return this.trainingWeightSeenByModel;
}
public void resetLearning() {
this.trainingWeightSeenByModel = 0.0;
if (isRandomizable()) {
this.clustererRandom = new Random(this.randomSeed);
}
resetLearningImpl();
}
public void trainOnInstance(Instance inst) {
if (inst.weight() > 0.0) {
this.trainingWeightSeenByModel += inst.weight();
trainOnInstanceImpl(inst);
}
}
public Measurement[] getModelMeasurements() {
List<Measurement> measurementList = new LinkedList<Measurement>();
measurementList.add(new Measurement("model training instances",
trainingWeightSeenByModel()));
measurementList.add(new Measurement("model serialized size (bytes)",
measureByteSize()));
Measurement[] modelMeasurements = getModelMeasurementsImpl();
if (modelMeasurements != null) {
for (Measurement measurement : modelMeasurements) {
measurementList.add(measurement);
}
}
// add average of sub-model measurements
Clusterer[] subModels = getSubClusterers();
if ((subModels != null) && (subModels.length > 0)) {
List<Measurement[]> subMeasurements = new LinkedList<Measurement[]>();
for (Clusterer subModel : subModels) {
if (subModel != null) {
subMeasurements.add(subModel.getModelMeasurements());
}
}
Measurement[] avgMeasurements = Measurement
.averageMeasurements(subMeasurements
.toArray(new Measurement[subMeasurements.size()][]));
for (Measurement measurement : avgMeasurements) {
measurementList.add(measurement);
}
}
return measurementList.toArray(new Measurement[measurementList.size()]);
}
public void getDescription(StringBuilder out, int indent) {
StringUtils.appendIndented(out, indent, "Model type: ");
out.append(this.getClass().getName());
StringUtils.appendNewline(out);
Measurement.getMeasurementsDescription(getModelMeasurements(), out,
indent);
StringUtils.appendNewlineIndented(out, indent, "Model description:");
StringUtils.appendNewline(out);
if (trainingHasStarted()) {
getModelDescription(out, indent);
} else {
StringUtils.appendIndented(out, indent,
"Model has not been trained.");
}
}
public Clusterer[] getSubClusterers() {
return null;
}
@Override
public Clusterer copy() {
return (Clusterer) super.copy();
}
// public boolean correctlyClassifies(Instance inst) {
// return Utils.maxIndex(getVotesForInstance(inst)) == (int) inst
// .classValue();
// }
public String getClassNameString() {
return InstancesHeader.getClassNameString(this.modelContext);
}
public String getClassLabelString(int classLabelIndex) {
return InstancesHeader.getClassLabelString(this.modelContext,
classLabelIndex);
}
public String getAttributeNameString(int attIndex) {
return InstancesHeader.getAttributeNameString(this.modelContext,
attIndex);
}
public String getNominalValueString(int attIndex, int valIndex) {
return InstancesHeader.getNominalValueString(this.modelContext,
attIndex, valIndex);
}
// originalContext notnull
// newContext notnull
public static boolean contextIsCompatible(InstancesHeader originalContext,
InstancesHeader newContext) {
// rule 1: num classes can increase but never decrease
// rule 2: num attributes can increase but never decrease
// rule 3: num nominal attribute values can increase but never decrease
// rule 4: attribute types must stay in the same order (although class
// can
// move; is always skipped over)
// attribute names are free to change, but should always still represent
// the original attributes
if (newContext.numClasses() < originalContext.numClasses()) {
return false; // rule 1
}
if (newContext.numAttributes() < originalContext.numAttributes()) {
return false; // rule 2
}
int oPos = 0;
int nPos = 0;
while (oPos < originalContext.numAttributes()) {
if (oPos == originalContext.classIndex()) {
oPos++;
if (!(oPos < originalContext.numAttributes())) {
break;
}
}
if (nPos == newContext.classIndex()) {
nPos++;
}
if (originalContext.attribute(oPos).isNominal()) {
if (!newContext.attribute(nPos).isNominal()) {
return false; // rule 4
}
if (newContext.attribute(nPos).numValues() < originalContext
.attribute(oPos).numValues()) {
return false; // rule 3
}
} else {
assert (originalContext.attribute(oPos).isNumeric());
if (!newContext.attribute(nPos).isNumeric()) {
return false; // rule 4
}
}
oPos++;
nPos++;
}
return true; // all checks clear
}
public AWTRenderer getAWTRenderer() {
// TODO should return a default renderer here
// - or should null be interpreted as the default?
return null;
}
// reason for ...Impl methods:
// ease programmer burden by not requiring them to remember calls to super
// in overridden methods & will produce compiler errors if not overridden
public abstract void resetLearningImpl();
public abstract void trainOnInstanceImpl(Instance inst);
protected abstract Measurement[] getModelMeasurementsImpl();
public abstract void getModelDescription(StringBuilder out, int indent);
protected static int modelAttIndexToInstanceAttIndex(int index,
Instance inst) {
return inst.classIndex() > index ? index : index + 1;
}
protected static int modelAttIndexToInstanceAttIndex(int index,
Instances insts) {
return insts.classIndex() > index ? index : index + 1;
}
public boolean implementsMicroClusterer(){
return false;
}
public boolean keepClassLabel(){
return false;
}
public Clustering getMicroClusteringResult(){
return null;
};
}
| Java |
package moa.clusterers.streamkm;
/**
*
* @author Marcel R. Ackermann, Christiane Lammersen, Marcus Maertens, Christoph Raupach,
Christian Sohler, Kamil Swierkot
*/
public class TreeCoreset {
/**
datastructure representing a node within a tree
**/
protected class treeNode {
//number of points in this node
int n;
//array with pointers on points
Point[] points;
//pointer on the centre of the treenode
Point centre;
//pointer on the left childnode
treeNode lc;
//pointer on the right childnode
treeNode rc;
//pointer on the parent node
treeNode parent;
//cost of the treenode
double cost;
void free(){
this.parent = null;
this.lc = null;
this.rc = null;
this.points = null;
this.centre = null;
}
public treeNode(int n, Point[] points, Point centre, treeNode parent) {
this.n = n;
this.points = points;
this.centre = centre;
this.lc = null;
this.rc = null;
this.parent = parent;
this.cost = treeNodeTargetFunctionValue();;
}
/**
initalizes root as a treenode with the union of setA and setB as pointset and centre as centre
**/
public treeNode(Point[] setA, Point[] setB, int n_1, int n_2, Point centre, int centreIndex){
//loop counter variable
int i;
//the root has no parent and no child nodes in the beginning
this.parent = null;
this.lc = null;
this.rc = null;
//array with points to the points
this.points = new Point[n_1+n_2];
this.n = n_1 + n_2;
for(i=0;i<this.n;i++){
if(i < n_1){
this.points[i] = setA[i];
this.points[i].centreIndex = centreIndex;
} else {
this.points[i] = setB[i-n_1];
this.points[i].centreIndex = centreIndex;
}
}
//set the centre
this.centre = centre;
//calculate costs
this.cost = treeNodeTargetFunctionValue();
}
/**
Computes the target function value of the n points of the treenode. Differs from the function "targetFunctionValue" in three things:
1. only the centre of the treenode is used as a centre
2. works on arrays of pointers instead on arrays of points
3. stores the cost in the treenode
**/
double treeNodeTargetFunctionValue(){
//loop counter variable
int i;
//stores the cost
double sum = 0.0;
for(i=0; i<this.n; i++){
//stores the distance
double distance = 0.0;
//loop counter variable
int l;
for(l=0;l<this.points[i].dimension;l++){
//centroid coordinate of the point
double centroidCoordinatePoint;
if(this.points[i].weight != 0.0){
centroidCoordinatePoint = this.points[i].coordinates[l] / this.points[i].weight;
} else {
centroidCoordinatePoint = this.points[i].coordinates[l];
}
//centroid coordinate of the centre
double centroidCoordinateCentre;
if(this.centre.weight != 0.0){
centroidCoordinateCentre = this.centre.coordinates[l] / this.centre.weight;
} else {
centroidCoordinateCentre = this.centre.coordinates[l];
}
distance += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
sum += distance*this.points[i].weight;
}
return sum;
}
};
/**
computes the hypothetical cost if the node would be split with new centers centreA, centreB
**/
double treeNodeSplitCost(treeNode node, Point centreA, Point centreB){
//loop counter variable
int i;
//stores the cost
double sum = 0.0;
for(i=0; i<node.n; i++){
//loop counter variable
int l;
//stores the distance between p and centreA
double distanceA = 0.0;
for(l=0;l<node.points[i].dimension;l++){
//centroid coordinate of the point
double centroidCoordinatePoint;
if(node.points[i].weight != 0.0){
centroidCoordinatePoint = node.points[i].coordinates[l] / node.points[i].weight;
} else {
centroidCoordinatePoint = node.points[i].coordinates[l];
}
//centroid coordinate of the centre
double centroidCoordinateCentre;
if(centreA.weight != 0.0){
centroidCoordinateCentre = centreA.coordinates[l] / centreA.weight;
} else {
centroidCoordinateCentre = centreA.coordinates[l];
}
distanceA += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
//stores the distance between p and centreB
double distanceB = 0.0;
for(l=0;l<node.points[i].dimension;l++){
//centroid coordinate of the point
double centroidCoordinatePoint;
if(node.points[i].weight != 0.0){
centroidCoordinatePoint = node.points[i].coordinates[l] / node.points[i].weight;
} else {
centroidCoordinatePoint = node.points[i].coordinates[l];
}
//centroid coordinate of the centre
double centroidCoordinateCentre;
if(centreB.weight != 0.0){
centroidCoordinateCentre = centreB.coordinates[l] / centreB.weight;
} else {
centroidCoordinateCentre = centreB.coordinates[l];
}
distanceB += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
//add the cost of the closest centre to the sum
if(distanceA < distanceB){
sum += distanceA*node.points[i].weight;
} else {
sum += distanceB*node.points[i].weight;
}
}
//return the total cost
return sum;
}
/**
computes the cost of point p with the centre of treenode node
**/
double treeNodeCostOfPoint(treeNode node, Point p){
if(p.weight == 0.0){
return 0.0;
}
//stores the distance between centre and p
double distance = 0.0;
//loop counter variable
int l;
for(l=0;l<p.dimension;l++){
//centroid coordinate of the point
double centroidCoordinatePoint;
if(p.weight != 0.0){
centroidCoordinatePoint = p.coordinates[l] / p.weight;
} else {
centroidCoordinatePoint = p.coordinates[l];
}
//centroid coordinate of the centre
double centroidCoordinateCentre;
if(node.centre.weight != 0.0){
centroidCoordinateCentre = node.centre.coordinates[l] / node.centre.weight;
} else {
centroidCoordinateCentre = node.centre.coordinates[l];
}
distance += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
return distance * p.weight;
}
/**
tests if a node is a leaf
**/
boolean isLeaf(treeNode node){
if(node.lc == null && node.rc == null){
return true;
} else {
return false;
}
}
/**
selects a leaf node (using the kMeans++ distribution)
**/
treeNode selectNode(treeNode root, MTRandom clustererRandom){
//random number between 0 and 1
double random = clustererRandom.nextDouble();
while(!isLeaf(root)){
if(root.lc.cost == 0 && root.rc.cost == 0){
if(root.lc.n == 0){
root = root.rc;
} else if(root.rc.n == 0){
root = root.lc;
}else if(random < 0.5){
random = clustererRandom.nextDouble();
root = root.lc;
} else {
random = clustererRandom.nextDouble();
root = root.rc;
}
} else {
if(random < root.lc.cost/root.cost){
root = root.lc;
} else {
root = root.rc;
}
}
}
return root;
}
/**
selects a new centre from the treenode (using the kMeans++ distribution)
**/
Point chooseCentre(treeNode node, MTRandom clustererRandom){
//How many times should we try to choose a centre ??
int times = 3;
//stores the nodecost if node is split with the best centre
double minCost = node.cost;
Point bestCentre = null;
//loop counter variable
int i;
int j;
for(j=0;j<times;j++){
//sum of the relativ cost of the points
double sum = 0.0;
//random number between 0 and 1
double random = clustererRandom.nextDouble();
for(i=0;i<node.n;i++){
sum += treeNodeCostOfPoint(node,node.points[i]) / node.cost;
if(sum >= random){
if(node.points[i].weight == 0.0){
//printf("ERROR: CHOOSEN DUMMY NODE THOUGH OTHER AVAILABLE \n");
return null;
}
double curCost = treeNodeSplitCost(node,node.centre,node.points[i]);
if(curCost < minCost){
bestCentre = node.points[i];
minCost = curCost;
}
break;
}
}
}
if(bestCentre == null){
return node.points[0];
} else {
return bestCentre;
}
}
/**
returns the next centre
**/
Point determineClosestCentre(Point p, Point centreA, Point centreB){
//loop counter variable
int l;
//stores the distance between p and centreA
double distanceA = 0.0;
for(l=0;l<p.dimension;l++){
//centroid coordinate of the point
double centroidCoordinatePoint;
if(p.weight != 0.0){
centroidCoordinatePoint = p.coordinates[l] / p.weight;
} else {
centroidCoordinatePoint = p.coordinates[l];
}
//centroid coordinate of the centre
double centroidCoordinateCentre;
if(centreA.weight != 0.0){
centroidCoordinateCentre = centreA.coordinates[l] / centreA.weight;
} else {
centroidCoordinateCentre = centreA.coordinates[l];
}
distanceA += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
//stores the distance between p and centreB
double distanceB = 0.0;
for(l=0;l<p.dimension;l++){
//centroid coordinate of the point
double centroidCoordinatePoint;
if(p.weight != 0.0){
centroidCoordinatePoint = p.coordinates[l] / p.weight;
} else {
centroidCoordinatePoint = p.coordinates[l];
}
//centroid coordinate of the centre
double centroidCoordinateCentre;
if(centreB.weight != 0.0){
centroidCoordinateCentre = centreB.coordinates[l] / centreB.weight;
} else {
centroidCoordinateCentre = centreB.coordinates[l];
}
distanceB += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
//return the nearest centre
if(distanceA < distanceB){
return centreA;
} else {
return centreB;
}
}
/**
splits the parent node and creates two child nodes (one with the old centre and one with the new one)
**/
void split(treeNode parent, Point newCentre, int newCentreIndex){
//loop counter variable
int i;
//1. Counts how many points belong to the new and how many points belong to the old centre
int nOld = 0;
int nNew = 0;
for(i=0;i<parent.n;i++){
Point centre = determineClosestCentre(parent.points[i], parent.centre, newCentre);
if(centre == newCentre){
nNew++;
} else {
nOld++;
}
}
//2. initalizes the arrays for the pointer
//array for pointer on the points belonging to the old centre
Point[] oldPoints = new Point[nOld];
//array for pointer on the points belonging to the new centre
Point[] newPoints = new Point[nNew];
int indexOld = 0;
int indexNew = 0;
for(i=0;i<parent.n;i++){
Point centre = determineClosestCentre(parent.points[i],parent.centre,newCentre);
if(centre == newCentre){
newPoints[indexNew] = parent.points[i];
newPoints[indexNew].centreIndex = newCentreIndex;
indexNew++;
} else if(centre == parent.centre){
oldPoints[indexOld] = parent.points[i];
indexOld++;
} else {
//printf("ERROR !!! NO CENTER NEAREST !! \n");
}
}
//left child: old centre
treeNode lc = new treeNode(nOld, oldPoints,
parent.centre, parent);
/*lc.centre = parent.centre;
lc.points = oldPoints;
lc.n = nOld;
lc.lc = null;
lc.rc = null;
lc.parent = parent;
treeNodeTargetFunctionValue(lc);*/
//right child: new centre
treeNode rc = new treeNode(nNew, newPoints, newCentre,
parent);
/*rc.centre = newCentre;
rc.points = newPoints;
rc.n = nNew;
rc.lc = null;
rc.rc = null;
rc.parent = parent;
treeNodeTargetFunctionValue(rc);*/
//set childs of the parent node
parent.lc = lc;
parent.rc = rc;
//propagate the cost changes to the parent nodes
while(parent != null){
parent.cost = parent.lc.cost + parent.rc.cost;
parent = parent.parent;
}
}
/**
Checks if the storage is completly freed
**/
boolean treeFinished(treeNode root){
return (root.parent == null && root.lc == null && root.rc == null);
}
/**
frees a tree of its storage
**/
void freeTree(treeNode root){
while(!treeFinished(root)){
if(root.lc == null && root.rc == null){
root = root.parent;
} else if(root.lc == null && root.rc != null){
//Schau ob rc ein Blatt ist
if(isLeaf(root.rc)){
//Gebe rechtes Kind frei
root.rc.free();
root.rc = null;
} else {
//Fahre mit rechtem Kind fort
root = root.rc;
}
} else if(root.lc != null) {
if(isLeaf(root.lc)){
root.lc.free();
root.lc = null;
} else {
root = root.lc;
}
}
}
root.free();
}
/**
Constructs a coreset of size k from the union of setA and setB
**/
void unionTreeCoreset(int k,int n_1,int n_2,int d, Point[] setA,Point[] setB, Point[] centres, MTRandom clustererRandom) {
//printf("Computing coreset...\n");
//total number of points
int n = n_1+n_2;
//choose the first centre (each point has the same probability of being choosen)
//stores, how many centres have been choosen yet
int choosenPoints = 0;
//only choose from the n-i points not already choosen
int j = clustererRandom.nextInt(n-choosenPoints);
//copy the choosen point
if(j < n_1){
//copyPointWithoutInit(&setA[j],¢res[choosenPoints]);
centres[choosenPoints] = setA[j].clone();
} else {
j = j - n_1;
//copyPointWithoutInit(&setB[j],¢res[choosenPoints]);
centres[choosenPoints] = setB[j].clone();
}
treeNode root = new treeNode(setA,setB,n_1,n_2, centres[choosenPoints],choosenPoints); //??
choosenPoints = 1;
//choose the remaining points
while(choosenPoints < k){
if(root.cost > 0.0){
treeNode leaf = selectNode(root, clustererRandom);
Point centre = chooseCentre(leaf, clustererRandom);
split(leaf,centre,choosenPoints);
//copyPointWithoutInit(centre,¢res[choosenPoints]);
centres[choosenPoints] = centre;
} else {
//create a dummy point
//copyPointWithoutInit(root.centre,¢res[choosenPoints]);
centres[choosenPoints] = root.centre;
int l;
for(l=0;l<root.centre.dimension;l++){
centres[choosenPoints].coordinates[l] = -1 * 1000000;
}
centres[choosenPoints].id = -1;
centres[choosenPoints].weight = 0.0;
centres[choosenPoints].squareSum = 0.0;
}
choosenPoints++;
}
//free the tree
freeTree(root);
//recalculate clustering features
int i;
for(i=0;i<n;i++){
if(i < n_1) {
int index = setA[i].centreIndex;
if(centres[index].id != setA[i].id){
centres[index].weight += setA[i].weight;
centres[index].squareSum += setA[i].squareSum;
int l;
for(l=0;l<centres[index].dimension;l++){
if(setA[i].weight != 0.0){
centres[index].coordinates[l] += setA[i].coordinates[l];
}
}
}
} else {
int index = setB[i-n_1].centreIndex;
if(centres[index].id != setB[i-n_1].id){
centres[index].weight += setB[i-n_1].weight;
centres[index].squareSum += setB[i-n_1].squareSum;
int l;
for(l=0;l<centres[index].dimension;l++){
if(setB[i-n_1].weight != 0.0){
centres[index].coordinates[l] += setB[i-n_1].coordinates[l];
}
}
}
}
}
}
}
| Java |
package moa.clusterers.streamkm;
import moa.cluster.Cluster;
import moa.cluster.SphereCluster;
import weka.core.Instance;
/**
*
* @author Marcel R. Ackermann, Christiane Lammersen, Marcus Maertens, Christoph Raupach,
Christian Sohler, Kamil Swierkot
*/
public class Point {
//dimension
int dimension;
//Clustering Features (weight, squareSum, linearSum)
double weight;
double squareSum;
double[] coordinates;
//cost and index of the centre, the point is currently assigned to
double curCost;
int centreIndex;
//id and class (if there is class information in the file)
int id;
int cl;
public Point(int dimension){
this.weight = 1.0;
this.squareSum = 0.0;
this.dimension = dimension;
this.coordinates = new double[dimension];
this.id = -1;
this.cl = -1;
this.curCost = 0;
this.centreIndex = -1;
for(int l=0;l<dimension;l++){
this.coordinates[l] = 0.0;
}
}
public Point(Instance inst, int id){
this.weight = inst.weight();
this.squareSum = 0.0;
this.dimension = inst.numAttributes();
this.coordinates = new double[this.dimension];
this.id = id;
this.cl = 0;// NOT USED (int) inst.classValue();
this.curCost = 0;
this.centreIndex = -1;
for(int l=0;l<this.dimension;l++){
double nextNumber = inst.value(l) * inst.value(l);
this.coordinates[l] = inst.value(l);
this.squareSum += nextNumber* nextNumber;
}
}
public Point clone(){
Point res = new Point(this.dimension);
res.weight = this.weight;
res.squareSum = this.squareSum;
res.dimension = this.dimension;
res.coordinates = this.coordinates.clone();
res.id = this.id;
res.cl = this.cl;
res.curCost = this.curCost;
res.centreIndex = this.centreIndex;
return res;
}
public Cluster toCluster(){
//Convert point to Cluster
return (new SphereCluster(this.coordinates, 1, this.weight)); //Radius =1?
}
/**
Computes the cost of this point with the given array of centres centres[] (of size k)
**/
public double costOfPoint(int k, Point[] centres){
double nearestCost = -1.0;
for(int j=0; j<k; j++){
double distance = 0.0;
int l = 0;
for(l=0;l<this.dimension;l++){
//Centroid coordinate of the point
double centroidCoordinatePoint;
if(this.weight != 0.0){
centroidCoordinatePoint = this.coordinates[l] / this.weight;
} else {
centroidCoordinatePoint = this.coordinates[l];
}
//Centroid coordinate of the centre
double centroidCoordinateCentre;
if(centres[j].weight != 0.0){
centroidCoordinateCentre = centres[j].coordinates[l] / centres[j].weight;
} else {
centroidCoordinateCentre = centres[j].coordinates[l];
}
distance += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
if(nearestCost <0 || distance < nearestCost) {
nearestCost = distance;
}
}
return this.weight * nearestCost;
}
/**
Computes the index of the centre nearest to this point with the given array of centres centres[] (of size k)
**/
public int determineClusterCentreKMeans(int k, Point[] centres){
int centre = 0;
double nearestCost = -1.0;
for(int j=0; j < k; j++){
double distance = 0.0;
for(int l=0; l<this.dimension;l++){
//Centroid coordinate of the point
double centroidCoordinatePoint;
if(this.weight != 0.0){
centroidCoordinatePoint = this.coordinates[l] / this.weight;
} else {
centroidCoordinatePoint = this.coordinates[l];
}
//Centroid coordinate of the centre
double centroidCoordinateCentre;
if(centres[j].weight != 0.0){
centroidCoordinateCentre = centres[j].coordinates[l] / centres[j].weight;
} else {
centroidCoordinateCentre = centres[j].coordinates[l];
}
distance += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
if(nearestCost <0 || distance < nearestCost) {
nearestCost = distance;
centre = j;
}
}
return centre;
}
/**
Computes the cost of this point with centre centre
**/
public double costOfPointToCenter(Point centre){
if(this.weight == 0.0){
return 0.0;
}
//stores the distance between p and centre
double distance = 0.0;
//loop counter
for(int l=0; l<this.dimension; l++){
//Centroid coordinate of the point
double centroidCoordinatePoint;
if(this.weight != 0.0){
centroidCoordinatePoint = this.coordinates[l] / this.weight;
} else {
centroidCoordinatePoint = this.coordinates[l];
}
//Centroid coordinate of the centre
double centroidCoordinateCentre;
if(centre.weight != 0.0){
centroidCoordinateCentre = centre.coordinates[l] / centre.weight;
} else {
centroidCoordinateCentre = centre.coordinates[l];
}
distance += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
return distance * this.weight;
}
}
| Java |
package moa.clusterers.streamkm;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
import moa.clusterers.AbstractClusterer;
import moa.core.Measurement;
import moa.options.IntOption;
import weka.core.Instance;
/**
*
* @author Marcel R. Ackermann, Christiane Lammersen, Marcus Maertens, Christoph Raupach,
Christian Sohler, Kamil Swierkot
Citation: Marcel R. Ackermann, Christiane Lammersen, Marcus Märtens,
Christoph Raupach, Christian Sohler, Kamil Swierkot: StreamKM++: A
Clustering Algorithms for Data Streams. ALENEX 2010: 173-187
*/
public class StreamKM extends AbstractClusterer {
public IntOption sizeCoresetOption = new IntOption("sizeCoreset",
's', "Size of the coreset.", 10000);
public IntOption numClustersOption = new IntOption(
"numClusters", 'k',
"Number of clusters to compute.", 5);
public IntOption widthOption = new IntOption("width",
'w', "Size of Window for training learner.", 100000, 0, Integer.MAX_VALUE);
public IntOption randomSeedOption = new IntOption("randomSeed", 'r',
"Seed for random behaviour of the classifier.", 1);
protected MTRandom clustererRandom;
protected Point[] centresStreamingCoreset;
protected int numberInstances;
protected int dimension;
protected int length;
protected int numberOfCentres;
protected int coresetsize;
protected BucketManager manager;
protected boolean initialized = false;
private final static double THRESHOLD = 1.000;
@Override
public void resetLearningImpl() {
this.initialized = false;
this.coresetsize = sizeCoresetOption.getValue();
this.numberOfCentres = numClustersOption.getValue();
this.length = widthOption.getValue();
this.centresStreamingCoreset = new Point[this.numberOfCentres];
//initalize random generator with seed
this.clustererRandom = new MTRandom(this.randomSeedOption.getValue());
}
@Override
public void trainOnInstanceImpl(Instance inst) {
if (this.initialized == false) {
this.dimension = inst.numAttributes();
manager = new BucketManager(this.length, this.dimension, this.coresetsize, this.clustererRandom);
this.initialized = true;
}
manager.insertPoint(new Point(inst, this.numberInstances));
this.numberInstances++;
if (this.numberInstances % widthOption.getValue() == 0) {
Point[] streamingCoreset = manager.getCoresetFromManager(dimension);
//compute 5 clusterings of the coreset with kMeans++ and take the best
double minCost = 0.0;
double curCost = 0.0;
minCost = lloydPlusPlus(numberOfCentres, coresetsize, dimension, streamingCoreset, centresStreamingCoreset);
curCost = minCost;
for(int i = 1; i < 5; i++){
Point[] tmpCentresStreamingCoreset= new Point[0];
curCost = lloydPlusPlus(numberOfCentres, coresetsize, dimension, streamingCoreset, tmpCentresStreamingCoreset);
if(curCost < minCost) {
minCost = curCost;
centresStreamingCoreset = tmpCentresStreamingCoreset;
}
}
}
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
public boolean isRandomizable() {
return true;
}
public double[] getVotesForInstance(Instance inst) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Clustering getClusteringResult() {
if ( !this.initialized ) {
return new Clustering();
}
Clustering clustering = new Clustering();
for ( int i = 0; i < centresStreamingCoreset.length; i++ ) {
if(centresStreamingCoreset[i] != null){
clustering.add(centresStreamingCoreset[i].toCluster());
}
}
return clustering;
}
public double lloydPlusPlus(int k, int n, int d, Point points[], Point centres[]){
//printf("starting kMeans++\n");
//choose random centres
centres = chooseRandomCentres(k, n, d, points);
double cost = targetFunctionValue(k, n, centres, points);
double newCost = cost;
Point[] massCentres = new Point[k];
double[] numberOfPoints = new double[k];
do{
cost = newCost;
//reset centres of mass
int i = 0;
for(i = 0; i < k; i++){
massCentres[i] = new Point(d);
numberOfPoints[i] = 0.0;
}
//compute centres of mass
for(i = 0; i < n; i++){
int centre = points[i].determineClusterCentreKMeans(k,centres);
for(int l = 0; l < massCentres[centre].dimension; l++){
if(points[i].weight != 0.0)
massCentres[centre].coordinates[l] += points[i].coordinates[l];
}
numberOfPoints[centre] += points[i].weight;
}
//move centres
for(i=0; i<k; i++){
for(int l=0; l<centres[i].dimension; l++){
centres[i].coordinates[l] = massCentres[i].coordinates[l];
centres[i].weight = numberOfPoints[i];
}
}
//calculate costs
newCost = targetFunctionValue(k, n, centres, points);
//printf("old cost:%f, new cost:%f \n",cost,newCost);
} while (newCost < THRESHOLD * cost);
/*printf("Centres: \n");
int i=0;
for(i=0;i<k;i++){
printf("(");
int l = 0;
for(l=0;l<centres[i].dimension;l++){
printf("%f,",centres[i].coordinates[l] / centres[i].weight);
}
printf(")\n");
}
printf("kMeans++ finished\n");
*/
return newCost;
}
private Point[] chooseRandomCentres(int k, int n, int d, Point points[]){
//array to store the choosen centres
Point[] centres = new Point[k];
//choose the first centre (each point has the same probability of being choosen)
int i = 0;
int next = 0;
int j = 0;
do{ //only choose from the n-i points not already choosen
next = this.clustererRandom.nextInt(n-1);
//check if the choosen point is not a dummy
} while( points[next].weight < 1);
//set j to next unchoosen point
j = next;
//copy the choosen point to the array
centres[i] = points[j].clone();
//set the current centre for all points to the choosen centre
for(i = 0; i < n; i++){
points[i].centreIndex = 0;
points[i].curCost = points[i].costOfPointToCenter(centres[0]);
}
//choose centre 1 to k-1 with the kMeans++ distribution
for(i = 1; i < k; i++){
double cost = 0.0;
for(j = 0; j < n; j++){
cost += points[j].curCost;
}
double random = 0;
double sum = 0.0;
int pos = -1;
do{
random = this.clustererRandom.nextDouble();//genrand_real3();
sum = 0.0;
pos = -1;
for(j = 0; j < n; j++){
sum = sum + points[j].curCost;
if(random <= sum/cost){
pos = j;
break;
}
}
} while (points[pos].weight < 1);
//copy the choosen centre
centres[i] = points[pos].clone();
//check which points are closest to the new centre
for(j = 0; j < n; j++){
double newCost = points[j].costOfPointToCenter(centres[i]);
if(points[j].curCost > newCost){
points[j].curCost = newCost;
points[j].centreIndex = i;
}
}
}
/*printf("random centres: \n");
for(i = 0; i < k; i++){
//printf("%d: (",i);
int l = 0;
for(l = 0; l < centres[i].dimension; l++){
printf("%f,",centres[i].coordinates[l] / centres[i].weight);
}
printf(")\n");
}*/
return centres;
}
/**
computes the target function for the given pointarray points[] (of size n) with the given array of
centres centres[] (of size k)
**/
public double targetFunctionValue(int k, int n, Point[] centres, Point[] points){
int i=0;
double sum = 0.0;
for(i=0;i<n;i++){
double nearestCost = -1.0;
int j=0;
for(j=0;j<k;j++){
double distance = 0.0;
int l = 0;
for(l=0;l<points[i].dimension;l++){
//Centroid coordinate of the point
double centroidCoordinatePoint;
if(points[i].weight != 0.0){
centroidCoordinatePoint = points[i].coordinates[l] / points[i].weight;
} else {
centroidCoordinatePoint = points[i].coordinates[l];
}
//Centroid coordinate of the centre
double centroidCoordinateCentre;
if(centres[j].weight != 0.0){
centroidCoordinateCentre = centres[j].coordinates[l] / centres[j].weight;
} else {
centroidCoordinateCentre = centres[j].coordinates[l];
}
distance += (centroidCoordinatePoint-centroidCoordinateCentre) *
(centroidCoordinatePoint-centroidCoordinateCentre) ;
}
if(nearestCost <0 || distance < nearestCost) {
nearestCost = distance;
}
}
sum += nearestCost * points[i].weight;
}
return sum;
}
}
| Java |
package moa.clusterers.streamkm;
/**
*
* @author Marcel R. Ackermann, Christiane Lammersen, Marcus Maertens, Christoph Raupach,
Christian Sohler, Kamil Swierkot
*/
public class BucketManager {
protected class Bucket {
int cursize;
Point[] points;
Point[] spillover;
public Bucket(int d, int maxsize){
this.cursize = 0;
this.points = new Point[maxsize];
this.spillover = new Point[maxsize];
for(int i=0; i<maxsize; i++){
this.points[i] = new Point(d);
this.spillover[i] = new Point(d);
}
}
};
protected int numberOfBuckets;
protected int maxBucketsize;
protected Bucket[] buckets;
protected MTRandom clustererRandom;
protected TreeCoreset treeCoreset;
/**
initializes a bucketmanager for n points with bucketsize maxsize and dimension d
**/
public BucketManager(int n,int d,int maxsize, MTRandom random){
this.clustererRandom = random;
this.numberOfBuckets = (int) Math.ceil(Math.log((double)n/(double)maxsize) / Math.log(2) )+2;
this.maxBucketsize = maxsize;
this.buckets = new Bucket[this.numberOfBuckets];
for(int i=0; i<this.numberOfBuckets; i++){
this.buckets[i] = new Bucket(d,maxsize);
}
this.treeCoreset = new TreeCoreset();
//printf("Created manager with %d buckets of dimension %d \n",this.numberOfBuckets,d);
}
/**
inserts a single point into the bucketmanager
**/
void insertPoint(Point p){
//check if there is enough space in the first bucket
int cursize = this.buckets[0].cursize;
if(cursize >= this.maxBucketsize) {
//printf("Bucket 0 full \n");
//start spillover process
int curbucket = 0;
int nextbucket = 1;
//check if the next bucket is empty
if(this.buckets[nextbucket].cursize == 0){
//copy the bucket
int i;
for(i=0; i<this.maxBucketsize; i++){
this.buckets[nextbucket].points[i] = this.buckets[curbucket].points[i].clone();
//copyPointWithoutInit: we should not copy coordinates?
}
//bucket is now full
this.buckets[nextbucket].cursize = this.maxBucketsize;
//first bucket is now empty
this.buckets[curbucket].cursize = 0;
cursize = 0;
} else {
//printf("Bucket %d full \n",nextbucket);
//copy bucket to spillover and continue
int i;
for(i=0;i<this.maxBucketsize;i++){
this.buckets[nextbucket].spillover[i] = this.buckets[curbucket].points[i].clone();
//copyPointWithoutInit: we should not copy coordinates?
}
this.buckets[0].cursize=0;
cursize = 0;
curbucket++;
nextbucket++;
/*
as long as the next bucket is full output the coreset to the spillover of the next bucket
*/
while(this.buckets[nextbucket].cursize == this.maxBucketsize){
//printf("Bucket %d full \n",nextbucket);
this.treeCoreset.unionTreeCoreset(this.maxBucketsize,this.maxBucketsize,
this.maxBucketsize,p.dimension,
this.buckets[curbucket].points,this.buckets[curbucket].spillover,
this.buckets[nextbucket].spillover, this.clustererRandom);
//bucket now empty
this.buckets[curbucket].cursize = 0;
curbucket++;
nextbucket++;
}
this.treeCoreset.unionTreeCoreset(this.maxBucketsize,this.maxBucketsize,
this.maxBucketsize,p.dimension,
this.buckets[curbucket].points,this.buckets[curbucket].spillover,
this.buckets[nextbucket].points, this.clustererRandom);
this.buckets[curbucket].cursize = 0;
this.buckets[nextbucket].cursize = this.maxBucketsize;
}
}
//insert point into the first bucket
this.buckets[0].points[cursize] = p.clone();
//copyPointWithoutInit: we should not copy coordinates?
this.buckets[0].cursize++;
}
/**
It may happen that the manager is not full (since n is not always a power of 2). In this case we extract the coreset
from the manager by computing a coreset of all nonempty buckets
Case 1: the last bucket is full
=> n is a power of 2 and we return the contents of the last bucket
Case2: the last bucket is not full
=> we compute a coreset of all nonempty buckets
this operation should only be called after the streaming process is finished
**/
Point[] getCoresetFromManager(int d){
Point[] coreset = new Point[d];
int i = 0;
if(this.buckets[this.numberOfBuckets-1].cursize == this.maxBucketsize){
coreset = this.buckets[this.numberOfBuckets-1].points;
} else {
//find the first nonempty bucket
for(i=0; i < this.numberOfBuckets; i++){
if(this.buckets[i].cursize != 0){
coreset = this.buckets[i].points;
break;
}
}
//as long as there is a nonempty bucket compute a coreset
int j;
for(j=i+1; j < this.numberOfBuckets; j++){
if(this.buckets[j].cursize != 0){
//output the coreset into the spillover of bucket j
this.treeCoreset.unionTreeCoreset(this.maxBucketsize,this.maxBucketsize,
this.maxBucketsize,d,
this.buckets[j].points,coreset,
this.buckets[j].spillover, this.clustererRandom);
coreset = this.buckets[j].spillover;
}
}
}
return coreset;
}
}
| Java |
/*
* MTRandom : A Java implementation of the MT19937 (Mersenne Twister)
* pseudo random number generator algorithm based upon the
* original C code by Makoto Matsumoto and Takuji Nishimura.
* Author : David Beaumont
* Email : mersenne-at-www.goui.net
*
* For the original C code, see:
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
*
* This version, Copyright (C) 2005, David Beaumont.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
package moa.clusterers.streamkm;
import java.util.Random;
/**
* @version 1.0
* @author David Beaumont, Copyright 2005
* <p>
* A Java implementation of the MT19937 (Mersenne Twister) pseudo
* random number generator algorithm based upon the original C code
* by Makoto Matsumoto and Takuji Nishimura (see
* <a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html">
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html</a> for
* more information.
* <p>
* As a subclass of java.util.Random this class provides a single
* canonical method next() for generating bits in the pseudo random
* number sequence. Anyone using this class should invoke the public
* inherited methods (nextInt(), nextFloat etc.) to obtain values as
* normal. This class should provide a drop-in replacement for the
* standard implementation of java.util.Random with the additional
* advantage of having a far longer period and the ability to use a
* far larger seed value.
* <p>
* This is <b>not</b> a cryptographically strong source of randomness
* and should <b>not</b> be used for cryptographic systems or in any
* other situation where true random numbers are required.
* <p>
* <!-- Creative Commons License -->
* <a href="http://creativecommons.org/licenses/LGPL/2.1/"><img alt="CC-GNU LGPL" border="0" src="http://creativecommons.org/images/public/cc-LGPL-a.png" /></a><br />
* This software is licensed under the <a href="http://creativecommons.org/licenses/LGPL/2.1/">CC-GNU LGPL</a>.
* <!-- /Creative Commons License -->
*
* <!--
* <rdf:RDF xmlns="http://web.resource.org/cc/"
* xmlns:dc="http://purl.org/dc/elements/1.1/"
* xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
*
* <Work rdf:about="">
* <license rdf:resource="http://creativecommons.org/licenses/LGPL/2.1/" />
* <dc:type rdf:resource="http://purl.org/dc/dcmitype/Software" />
* </Work>
*
* <License rdf:about="http://creativecommons.org/licenses/LGPL/2.1/">
* <permits rdf:resource="http://web.resource.org/cc/Reproduction" />
* <permits rdf:resource="http://web.resource.org/cc/Distribution" />
* <requires rdf:resource="http://web.resource.org/cc/Notice" />
* <permits rdf:resource="http://web.resource.org/cc/DerivativeWorks" />
* <requires rdf:resource="http://web.resource.org/cc/ShareAlike" />
* <requires rdf:resource="http://web.resource.org/cc/SourceCode" />
* </License>
*
* </rdf:RDF>
* -->
*
*/
public class MTRandom extends Random {
/**
* Auto-generated serial version UID. Note that MTRandom does NOT
* support serialisation of its internal state and it may even be
* necessary to implement read/write methods to re-seed it properly.
* This is only here to make Eclipse shut up about it being missing.
*/
private static final long serialVersionUID = -515082678588212038L;
// Constants used in the original C implementation
private final static int UPPER_MASK = 0x80000000;
private final static int LOWER_MASK = 0x7fffffff;
private final static int N = 624;
private final static int M = 397;
private final static int MAGIC[] = { 0x0, 0x9908b0df };
private final static int MAGIC_FACTOR1 = 1812433253;
private final static int MAGIC_FACTOR2 = 1664525;
private final static int MAGIC_FACTOR3 = 1566083941;
private final static int MAGIC_MASK1 = 0x9d2c5680;
private final static int MAGIC_MASK2 = 0xefc60000;
private final static int MAGIC_SEED = 19650218;
private final static long DEFAULT_SEED = 5489L;
// Internal state
private transient int[] mt;
private transient int mti;
private transient boolean compat = false;
// Temporary buffer used during setSeed(long)
private transient int[] ibuf;
/**
* The default constructor for an instance of MTRandom. This invokes
* the no-argument constructor for java.util.Random which will result
* in the class being initialised with a seed value obtained by calling
* System.currentTimeMillis().
*/
public MTRandom() { }
/**
* This version of the constructor can be used to implement identical
* behaviour to the original C code version of this algorithm including
* exactly replicating the case where the seed value had not been set
* prior to calling genrand_int32.
* <p>
* If the compatibility flag is set to true, then the algorithm will be
* seeded with the same default value as was used in the original C
* code. Furthermore the setSeed() method, which must take a 64 bit
* long value, will be limited to using only the lower 32 bits of the
* seed to facilitate seamless migration of existing C code into Java
* where identical behaviour is required.
* <p>
* Whilst useful for ensuring backwards compatibility, it is advised
* that this feature not be used unless specifically required, due to
* the reduction in strength of the seed value.
*
* @param compatible Compatibility flag for replicating original
* behaviour.
*/
public MTRandom(boolean compatible) {
super(0L);
compat = compatible;
setSeed(compat?DEFAULT_SEED:System.currentTimeMillis());
}
/**
* This version of the constructor simply initialises the class with
* the given 64 bit seed value. For a better random number sequence
* this seed value should contain as much entropy as possible.
*
* @param seed The seed value with which to initialise this class.
*/
public MTRandom(long seed) {
super(seed);
}
/**
* This version of the constructor initialises the class with the
* given byte array. All the data will be used to initialise this
* instance.
*
* @param buf The non-empty byte array of seed information.
* @throws NullPointerException if the buffer is null.
* @throws IllegalArgumentException if the buffer has zero length.
*/
public MTRandom(byte[] buf) {
super(0L);
setSeed(buf);
}
/**
* This version of the constructor initialises the class with the
* given integer array. All the data will be used to initialise
* this instance.
*
* @param buf The non-empty integer array of seed information.
* @throws NullPointerException if the buffer is null.
* @throws IllegalArgumentException if the buffer has zero length.
*/
public MTRandom(int[] buf) {
super(0L);
setSeed(buf);
}
// Initializes mt[N] with a simple integer seed. This method is
// required as part of the Mersenne Twister algorithm but need
// not be made public.
private final void setSeed(int seed) {
// Annoying runtime check for initialisation of internal data
// caused by java.util.Random invoking setSeed() during init.
// This is unavoidable because no fields in our instance will
// have been initialised at this point, not even if the code
// were placed at the declaration of the member variable.
if (mt == null) mt = new int[N];
// ---- Begin Mersenne Twister Algorithm ----
mt[0] = seed;
for (mti = 1; mti < N; mti++) {
mt[mti] = (MAGIC_FACTOR1 * (mt[mti-1] ^ (mt[mti-1] >>> 30)) + mti);
}
// ---- End Mersenne Twister Algorithm ----
}
/**
* This method resets the state of this instance using the 64
* bits of seed data provided. Note that if the same seed data
* is passed to two different instances of MTRandom (both of
* which share the same compatibility state) then the sequence
* of numbers generated by both instances will be identical.
* <p>
* If this instance was initialised in 'compatibility' mode then
* this method will only use the lower 32 bits of any seed value
* passed in and will match the behaviour of the original C code
* exactly with respect to state initialisation.
*
* @param seed The 64 bit value used to initialise the random
* number generator state.
*/
public final synchronized void setSeed(long seed) {
if (compat) {
setSeed((int)seed);
} else {
// Annoying runtime check for initialisation of internal data
// caused by java.util.Random invoking setSeed() during init.
// This is unavoidable because no fields in our instance will
// have been initialised at this point, not even if the code
// were placed at the declaration of the member variable.
if (ibuf == null) ibuf = new int[2];
ibuf[0] = (int)seed;
ibuf[1] = (int)(seed >>> 32);
setSeed(ibuf);
}
}
/**
* This method resets the state of this instance using the byte
* array of seed data provided. Note that calling this method
* is equivalent to calling "setSeed(pack(buf))" and in particular
* will result in a new integer array being generated during the
* call. If you wish to retain this seed data to allow the pseudo
* random sequence to be restarted then it would be more efficient
* to use the "pack()" method to convert it into an integer array
* first and then use that to re-seed the instance. The behaviour
* of the class will be the same in both cases but it will be more
* efficient.
*
* @param buf The non-empty byte array of seed information.
* @throws NullPointerException if the buffer is null.
* @throws IllegalArgumentException if the buffer has zero length.
*/
public final void setSeed(byte[] buf) {
setSeed(pack(buf));
}
/**
* This method resets the state of this instance using the integer
* array of seed data provided. This is the canonical way of
* resetting the pseudo random number sequence.
*
* @param buf The non-empty integer array of seed information.
* @throws NullPointerException if the buffer is null.
* @throws IllegalArgumentException if the buffer has zero length.
*/
public final synchronized void setSeed(int[] buf) {
int length = buf.length;
if (length == 0) throw new IllegalArgumentException("Seed buffer may not be empty");
// ---- Begin Mersenne Twister Algorithm ----
int i = 1, j = 0, k = (N > length ? N : length);
setSeed(MAGIC_SEED);
for (; k > 0; k--) {
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >>> 30)) * MAGIC_FACTOR2)) + buf[j] + j;
i++; j++;
if (i >= N) { mt[0] = mt[N-1]; i = 1; }
if (j >= length) j = 0;
}
for (k = N-1; k > 0; k--) {
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >>> 30)) * MAGIC_FACTOR3)) - i;
i++;
if (i >= N) { mt[0] = mt[N-1]; i = 1; }
}
mt[0] = UPPER_MASK; // MSB is 1; assuring non-zero initial array
// ---- End Mersenne Twister Algorithm ----
}
/**
* This method forms the basis for generating a pseudo random number
* sequence from this class. If given a value of 32, this method
* behaves identically to the genrand_int32 function in the original
* C code and ensures that using the standard nextInt() function
* (inherited from Random) we are able to replicate behaviour exactly.
* <p>
* Note that where the number of bits requested is not equal to 32
* then bits will simply be masked out from the top of the returned
* integer value. That is to say that:
* <pre>
* mt.setSeed(12345);
* int foo = mt.nextInt(16) + (mt.nextInt(16) << 16);</pre>
* will not give the same result as
* <pre>
* mt.setSeed(12345);
* int foo = mt.nextInt(32);</pre>
*
* @param bits The number of significant bits desired in the output.
* @return The next value in the pseudo random sequence with the
* specified number of bits in the lower part of the integer.
*/
protected final synchronized int next(int bits) {
// ---- Begin Mersenne Twister Algorithm ----
int y, kk;
if (mti >= N) { // generate N words at one time
// In the original C implementation, mti is checked here
// to determine if initialisation has occurred; if not
// it initialises this instance with DEFAULT_SEED (5489).
// This is no longer necessary as initialisation of the
// Java instance must result in initialisation occurring
// Use the constructor MTRandom(true) to enable backwards
// compatible behaviour.
for (kk = 0; kk < N-M; kk++) {
y = (mt[kk] & UPPER_MASK) | (mt[kk+1] & LOWER_MASK);
mt[kk] = mt[kk+M] ^ (y >>> 1) ^ MAGIC[y & 0x1];
}
for (;kk < N-1; kk++) {
y = (mt[kk] & UPPER_MASK) | (mt[kk+1] & LOWER_MASK);
mt[kk] = mt[kk+(M-N)] ^ (y >>> 1) ^ MAGIC[y & 0x1];
}
y = (mt[N-1] & UPPER_MASK) | (mt[0] & LOWER_MASK);
mt[N-1] = mt[M-1] ^ (y >>> 1) ^ MAGIC[y & 0x1];
mti = 0;
}
y = mt[mti++];
// Tempering
y ^= (y >>> 11);
y ^= (y << 7) & MAGIC_MASK1;
y ^= (y << 15) & MAGIC_MASK2;
y ^= (y >>> 18);
// ---- End Mersenne Twister Algorithm ----
return (y >>> (32-bits));
}
// This is a fairly obscure little code section to pack a
// byte[] into an int[] in little endian ordering.
/**
* This simply utility method can be used in cases where a byte
* array of seed data is to be used to repeatedly re-seed the
* random number sequence. By packing the byte array into an
* integer array first, using this method, and then invoking
* setSeed() with that; it removes the need to re-pack the byte
* array each time setSeed() is called.
* <p>
* If the length of the byte array is not a multiple of 4 then
* it is implicitly padded with zeros as necessary. For example:
* <pre> byte[] { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06 }</pre>
* becomes
* <pre> int[] { 0x04030201, 0x00000605 }</pre>
* <p>
* Note that this method will not complain if the given byte array
* is empty and will produce an empty integer array, but the
* setSeed() method will throw an exception if the empty integer
* array is passed to it.
*
* @param buf The non-null byte array to be packed.
* @return A non-null integer array of the packed bytes.
* @throws NullPointerException if the given byte array is null.
*/
public static int[] pack(byte[] buf) {
int k, blen = buf.length, ilen = ((buf.length+3) >>> 2);
int[] ibuf = new int[ilen];
for (int n = 0; n < ilen; n++) {
int m = (n+1) << 2;
if (m > blen) m = blen;
for (k = buf[--m]&0xff; (m & 0x3) != 0; k = (k << 8) | buf[--m]&0xff);
ibuf[n] = k;
}
return ibuf;
}
}
| Java |
/*
* ClusterGenerator.java
* Copyright (C) 2010 RWTH Aachen University, Germany
* @author Jansen (moa@cs.rwth-aachen.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Random;
import moa.cluster.Clustering;
import moa.cluster.SphereCluster;
import moa.core.Measurement;
import moa.gui.visualization.DataPoint;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.Instance;
public class ClusterGenerator extends AbstractClusterer{
private static final long serialVersionUID = 1L;
public IntOption timeWindowOption = new IntOption("timeWindow",
't', "Rang of the window.", 1000);
public FloatOption radiusDecreaseOption = new FloatOption("radiusDecrease", 'r',
"The average radii of the centroids in the model.", 0, 0, 1);
public FloatOption radiusIncreaseOption = new FloatOption("radiusIncrease", 'R',
"The average radii of the centroids in the model.", 0, 0, 1);
public FloatOption positionOffsetOption = new FloatOption("positionOffset", 'p',
"The average radii of the centroids in the model.", 0, 0, 1);
public FloatOption clusterRemoveOption = new FloatOption("clusterRemove", 'D',
"Deletes complete clusters from the clustering.", 0, 0, 1);
public FloatOption joinClustersOption = new FloatOption("joinClusters", 'j',
"Join two clusters if their hull distance is less minRadius times this factor.", 0, 0, 1);
public FloatOption clusterAddOption = new FloatOption("clusterAdd", 'A',
"Adds additional clusters.", 0, 0, 1);
private static double err_intervall_width = 0.0;
private ArrayList<DataPoint> points;
private int instanceCounter;
private int windowCounter;
private Random random;
private Clustering sourceClustering = null;
@Override
public void resetLearningImpl() {
points = new ArrayList<DataPoint>();
instanceCounter = 0;
windowCounter = 0;
random = new Random(227);
//joinClustersOption.set();
//evaluateMicroClusteringOption.set();
}
@Override
public void trainOnInstanceImpl(Instance inst) {
if(windowCounter >= timeWindowOption.getValue()){
points.clear();
windowCounter = 0;
}
windowCounter++;
instanceCounter++;
points.add( new DataPoint(inst,instanceCounter));
}
@Override
public boolean implementsMicroClusterer() {
return true;
}
public void setSourceClustering(Clustering source){
sourceClustering = source;
}
@Override
public Clustering getMicroClusteringResult() {
//System.out.println("Numcluster:"+clustering.size()+" / "+num);
//Clustering source_clustering = new Clustering(points, overlapThreshold, microInitMinPoints);
if(sourceClustering == null){
System.out.println("You need to set a source clustering for the ClusterGenerator to work");
return null;
}
return alterClustering(sourceClustering);
}
public Clustering getClusteringResult(){
sourceClustering = new Clustering(points);
// if(sourceClustering == null){
// System.out.println("You need to set a source clustering for the ClusterGenerator to work");
// return null;
// }
return alterClustering(sourceClustering);
}
private Clustering alterClustering(Clustering scclustering){
//percentage of the radius that will be cut off
//0: no changes to radius
//1: radius of 0
double errLevelRadiusDecrease = radiusDecreaseOption.getValue();
//0: no changes to radius
//1: radius 100% bigger
double errLevelRadiusIncrease = radiusIncreaseOption.getValue();
//0: no changes
//1: distance between centers is 2 * original radius
double errLevelPosition = positionOffsetOption.getValue();
int numRemoveCluster = (int)(clusterRemoveOption.getValue()*scclustering.size());
int numAddCluster = (int)(clusterAddOption.getValue()*scclustering.size());
for (int c = 0; c < numRemoveCluster; c++) {
int delId = random.nextInt(scclustering.size());
scclustering.remove(delId);
}
int numCluster = scclustering.size();
double[] err_seeds = new double[numCluster];
double err_seed_sum = 0.0;
double tmp_seed;
for (int i = 0; i < numCluster; i++) {
tmp_seed = random.nextDouble();
err_seeds[i] = err_seed_sum + tmp_seed;
err_seed_sum+= tmp_seed;
}
double sumWeight = 0;
for (int i = 0; i <numCluster; i++) {
sumWeight+= scclustering.get(i).getWeight();
}
Clustering clustering = new Clustering();
for (int i = 0; i <numCluster; i++) {
if(!(scclustering.get(i) instanceof SphereCluster)){
System.out.println("Not a Sphere Cluster");
continue;
}
SphereCluster sourceCluster = (SphereCluster)scclustering.get(i);
double[] center = Arrays.copyOf(sourceCluster.getCenter(),sourceCluster.getCenter().length);
double weight = sourceCluster.getWeight();
double radius = sourceCluster.getRadius();
//move cluster center
if(errLevelPosition >0){
double errOffset = random.nextDouble()*err_intervall_width/2.0;
double errOffsetDirection = ((random.nextBoolean())? 1 : -1);
double level = errLevelPosition + errOffsetDirection * errOffset;
double[] vector = new double[center.length];
double vectorLength = 0;
for (int d = 0; d < center.length; d++) {
vector[d] = (random.nextBoolean()?1:-1)*random.nextDouble();
vectorLength += Math.pow(vector[d],2);
}
vectorLength = Math.sqrt(vectorLength);
//max is when clusters are next to each other
double length = 2 * radius * level;
for (int d = 0; d < center.length; d++) {
//normalize length and then strecht to reach error position
vector[d]=vector[d]/vectorLength*length;
}
// System.out.println("Center "+Arrays.toString(center));
// System.out.println("Vector "+Arrays.toString(vector));
//check if error position is within bounds
double [] newCenter = new double[center.length];
for (int d = 0; d < center.length; d++) {
//check bounds, otherwise flip vector
if(center[d] + vector[d] >= 0 && center[d] + vector[d] <= 1){
newCenter[d] = center[d] + vector[d];
}
else{
newCenter[d] = center[d] + (-1)*vector[d];
}
}
center = newCenter;
for (int d = 0; d < center.length; d++) {
if(newCenter[d] >= 0 && newCenter[d] <= 1){
}
else{
System.out.println("This shouldnt have happend, Cluster center out of bounds:"+Arrays.toString(newCenter));
}
}
//System.out.println("new Center "+Arrays.toString(newCenter));
}
//alter radius
if(errLevelRadiusDecrease > 0 || errLevelRadiusIncrease > 0){
double errOffset = random.nextDouble()*err_intervall_width/2.0;
int errOffsetDirection = ((random.nextBoolean())? 1 : -1);
if(errLevelRadiusDecrease > 0 && (errLevelRadiusIncrease == 0 || random.nextBoolean())){
double level = (errLevelRadiusDecrease + errOffsetDirection * errOffset);//*sourceCluster.getWeight()/sumWeight;
level = (level<0)?0:level;
level = (level>1)?1:level;
radius*=(1-level);
}
else{
double level = errLevelRadiusIncrease + errOffsetDirection * errOffset;
level = (level<0)?0:level;
level = (level>1)?1:level;
radius+=radius*level;
}
}
SphereCluster newCluster = new SphereCluster(center, radius, weight);
newCluster.setMeasureValue("Source Cluster", "C"+sourceCluster.getId());
clustering.add(newCluster);
}
if(joinClustersOption.getValue() > 0){
clustering = joinClusters(clustering);
}
//add new clusters by copying clusters and set a random center
for (int c = 0; c < numAddCluster; c++) {
int copyId = random.nextInt(clustering.size());
SphereCluster scorg = (SphereCluster)clustering.get(copyId);
int dim = scorg.getCenter().length;
double[] center = new double [dim];
double radius = scorg.getRadius();
boolean outofbounds = true;
int tryCounter = 0;
while(outofbounds && tryCounter < 20){
tryCounter++;
outofbounds = false;
for (int j = 0; j < center.length; j++) {
center[j] = random.nextDouble();
if(center[j]- radius < 0 || center[j] + radius > 1){
outofbounds = true;
break;
}
}
}
if(outofbounds){
System.out.println("Coludn't place additional cluster");
}
else{
SphereCluster scnew = new SphereCluster(center, radius, scorg.getWeight()/2);
scorg.setWeight(scorg.getWeight()-scnew.getWeight());
clustering.add(scnew);
}
}
return clustering;
}
private Clustering joinClusters(Clustering clustering){
double radiusFactor = joinClustersOption.getValue();
boolean[] merged = new boolean[clustering.size()];
Clustering mclustering = new Clustering();
if(radiusFactor >0){
for (int c1 = 0; c1 < clustering.size(); c1++) {
SphereCluster sc1 = (SphereCluster) clustering.get(c1);
double minDist = Double.MAX_VALUE;
double minOver = 1;
int maxindexCon = -1;
int maxindexOver = -1;
for (int c2 = 0; c2 < clustering.size(); c2++) {
SphereCluster sc2 = (SphereCluster) clustering.get(c2);
// double over = sc1.overlapRadiusDegree(sc2);
// if(over > 0 && over < minOver){
// minOver = over;
// maxindexOver = c2;
// }
double dist = sc1.getHullDistance(sc2);
double threshold = Math.min(sc1.getRadius(), sc2.getRadius())*radiusFactor;
if(dist > 0 && dist < minDist && dist < threshold){
minDist = dist;
maxindexCon = c2;
}
}
int maxindex = -1;
if(maxindexOver!=-1)
maxindex = maxindexOver;
else
maxindex = maxindexCon;
if(maxindex!=-1 && !merged[c1]){
merged[c1]=true;
merged[maxindex]=true;
SphereCluster scnew = new SphereCluster(sc1.getCenter(),sc1.getRadius(),sc1.getWeight());
SphereCluster sc2 = (SphereCluster) clustering.get(maxindex);
scnew.merge(sc2);
mclustering.add(scnew);
}
}
}
for (int i = 0; i < merged.length; i++) {
if(!merged[i])
mclustering.add(clustering.get(i));
}
return mclustering;
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isRandomizable() {
return false;
}
@Override
public boolean keepClassLabel(){
return true;
}
public double[] getVotesForInstance(Instance inst) {
return null;
}
}
| Java |
/*
* CobWeb.java
* Copyright (C) 2009 University of Waikato, Hamilton, New Zealand
* @author Mark Hall (mhall@cs.waikato.ac.nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa.clusterers;
import java.io.Serializable;
import moa.cluster.Clustering;
import moa.cluster.SphereCluster;
import moa.core.Measurement;
import moa.core.StringUtils;
import moa.options.FloatOption;
import moa.options.IntOption;
import weka.core.AttributeStats;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;
import weka.experiment.Stats;
import weka.filters.unsupervised.attribute.Add;
/**
* Class implementing the Cobweb and Classit clustering algorithms.
* See: http://en.wikipedia.org/wiki/Cobweb_%28clustering%29
*
* Citation: D. Fisher (1987).
* Knowledge acquisition via incremental conceptual clustering.
* Machine Learning. 2(2):139-172.
**/
public class CobWeb extends AbstractClusterer {
private static final long serialVersionUID = 1L;
public FloatOption acuityOption = new FloatOption("acuity",
'a', "Acuity (minimum standard deviation)", 1.0, 0.0, 90.0);
public FloatOption cutoffOption = new FloatOption("cutoff",
'c', "Cutoff (minimum category utility)", 0.002, 0.0, 90.0); //0.01 * Cobweb.m_normal
public IntOption randomSeedOption = new IntOption("randomSeed", 'r',
"Seed for random noise.", 1); //42
/**
* Inner class handling node operations for Cobweb.
*
* @see Serializable
*/
private class CNode implements Serializable {
/** for serialization */
static final long serialVersionUID = 3452097436933325631L;
/**
* Within cluster attribute statistics
*/
private AttributeStats[] m_attStats;
/**
* Number of attributes
*/
private int m_numAttributes;
/**
* Instances at this node
*/
protected Instances m_clusterInstances = null;
/**
* Children of this node
*/
private FastVector m_children = null;
/**
* Total instances at this node
*/
private double m_totalInstances = 0.0;
/**
* Cluster number of this node
*/
private int m_clusterNum = -1;
/**
* Creates an empty <code>CNode</code> instance.
*
* @param numAttributes the number of attributes in the data
*/
public CNode(int numAttributes) {
m_numAttributes = numAttributes;
}
/**
* Creates a new leaf <code>CNode</code> instance.
*
* @param numAttributes the number of attributes in the data
* @param leafInstance the instance to store at this leaf
*/
public CNode(int numAttributes, Instance leafInstance) {
this(numAttributes);
if (m_clusterInstances == null) {
//System.out.println(leafInstance.numAttributes()+"-"+leafInstance.value(0)+"-"+leafInstance.value(1)+"-"+leafInstance.value(2));
//System.out.println(leafInstance.numAttributes()+"-"+leafInstance.attribute(0).type()+"-"+leafInstance.attribute(1).type()+"-"+leafInstance.attribute(2).type());
m_clusterInstances = new Instances(leafInstance.dataset(), 1);
}
m_clusterInstances.add(leafInstance);
updateStats(leafInstance, false);
}
/**
* Adds an instance to this cluster.
*
* @param newInstance the instance to add
*/
protected void addInstance(Instance newInstance) {
// Add the instance to this cluster
if (m_clusterInstances == null) {
m_clusterInstances = new Instances(newInstance.dataset(), 1);
m_clusterInstances.add(newInstance);
updateStats(newInstance, false);
return;
} else if (m_children == null) {
/* we are a leaf, so make our existing instance(s) into a child
and then add the new instance as a child */
m_children = new FastVector();
CNode tempSubCluster = new CNode(m_numAttributes,
m_clusterInstances.instance(0));
// System.out.println("Dumping "+m_clusterInstances.numInstances());
for (int i = 1; i < m_clusterInstances.numInstances(); i++) {
tempSubCluster.m_clusterInstances.add(m_clusterInstances.instance(i));
tempSubCluster.updateStats(m_clusterInstances.instance(i), false);
}
m_children = new FastVector();
m_children.addElement(tempSubCluster);
m_children.addElement(new CNode(m_numAttributes, newInstance));
m_clusterInstances.add(newInstance);
updateStats(newInstance, false);
// here is where we check against cutoff (also check cutoff
// in findHost)
if (categoryUtility() < m_cutoff) {
// System.out.println("Cutting (leaf add) ");
m_children = null;
}
return;
}
// otherwise, find the best host for this instance
CNode bestHost = findHost(newInstance, false);
if (bestHost != null) {
// now add to the best host
bestHost.addInstance(newInstance);
}
}
/**
* Temporarily adds a new instance to each of this nodes children
* in turn and computes the category utility.
*
* @param newInstance the new instance to evaluate
* @return an array of category utility values---the result of considering
* each child in turn as a host for the new instance
* @throws Exception if an error occurs
*/
private double[] cuScoresForChildren(Instance newInstance) {
//throws Exception {
// look for a host in existing children
double[] categoryUtils = new double[m_children.size()];
// look for a home for this instance in the existing children
for (int i = 0; i < m_children.size(); i++) {
CNode temp = (CNode) m_children.elementAt(i);
// tentitively add the new instance to this child
temp.updateStats(newInstance, false);
categoryUtils[i] = categoryUtility();
// remove the new instance from this child
temp.updateStats(newInstance, true);
}
return categoryUtils;
}
private double cuScoreForBestTwoMerged(CNode merged,
CNode a, CNode b,
Instance newInstance) {//throws Exception {
double mergedCU = -Double.MAX_VALUE;
// consider merging the best and second
// best.
merged.m_clusterInstances = new Instances(m_clusterInstances, 1);
merged.addChildNode(a);
merged.addChildNode(b);
merged.updateStats(newInstance, false); // add new instance to stats
// remove the best and second best nodes
m_children.removeElementAt(m_children.indexOf(a));
m_children.removeElementAt(m_children.indexOf(b));
m_children.addElement(merged);
mergedCU = categoryUtility();
// restore the status quo
merged.updateStats(newInstance, true);
m_children.removeElementAt(m_children.indexOf(merged));
m_children.addElement(a);
m_children.addElement(b);
return mergedCU;
}
/**
* Finds a host for the new instance in this nodes children. Also
* considers merging the two best hosts and splitting the best host.
*
* @param newInstance the instance to find a host for
* @param structureFrozen true if the instance is not to be added to
* the tree and instead the best potential host is to be returned
* @return the best host
* @throws Exception if an error occurs
*/
private CNode findHost(Instance newInstance,
boolean structureFrozen) {//throws Exception {
if (!structureFrozen) {
updateStats(newInstance, false);
}
// look for a host in existing children and also consider as a new leaf
double[] categoryUtils = cuScoresForChildren(newInstance);
// make a temporary new leaf for this instance and get CU
CNode newLeaf = new CNode(m_numAttributes, newInstance);
m_children.addElement(newLeaf);
double bestHostCU = categoryUtility();
CNode finalBestHost = newLeaf;
// remove new leaf when seaching for best and second best nodes to
// consider for merging and splitting
m_children.removeElementAt(m_children.size() - 1);
// now determine the best host (and the second best)
int best = 0;
int secondBest = 0;
for (int i = 0; i < categoryUtils.length; i++) {
if (categoryUtils[i] > categoryUtils[secondBest]) {
if (categoryUtils[i] > categoryUtils[best]) {
secondBest = best;
best = i;
} else {
secondBest = i;
}
}
}
CNode a = (CNode) m_children.elementAt(best);
CNode b = (CNode) m_children.elementAt(secondBest);
if (categoryUtils[best] > bestHostCU) {
bestHostCU = categoryUtils[best];
finalBestHost = a;
// System.out.println("Node is best");
}
if (structureFrozen) {
if (finalBestHost == newLeaf) {
return null; // *this* node is the best host
} else {
return finalBestHost;
}
}
double mergedCU = -Double.MAX_VALUE;
CNode merged = new CNode(m_numAttributes);
if (a != b) {
mergedCU = cuScoreForBestTwoMerged(merged, a, b, newInstance);
if (mergedCU > bestHostCU) {
bestHostCU = mergedCU;
finalBestHost = merged;
}
}
// Consider splitting the best
double splitCU = -Double.MAX_VALUE;
double splitBestChildCU = -Double.MAX_VALUE;
double splitPlusNewLeafCU = -Double.MAX_VALUE;
double splitPlusMergeBestTwoCU = -Double.MAX_VALUE;
if (a.m_children != null) {
FastVector tempChildren = new FastVector();
for (int i = 0; i < m_children.size(); i++) {
CNode existingChild = (CNode) m_children.elementAt(i);
if (existingChild != a) {
tempChildren.addElement(existingChild);
}
}
for (int i = 0; i < a.m_children.size(); i++) {
CNode promotedChild = (CNode) a.m_children.elementAt(i);
tempChildren.addElement(promotedChild);
}
// also add the new leaf
tempChildren.addElement(newLeaf);
FastVector saveStatusQuo = m_children;
m_children = tempChildren;
splitPlusNewLeafCU = categoryUtility(); // split + new leaf
// remove the new leaf
tempChildren.removeElementAt(tempChildren.size() - 1);
// now look for best and second best
categoryUtils = cuScoresForChildren(newInstance);
// now determine the best host (and the second best)
best = 0;
secondBest = 0;
for (int i = 0; i < categoryUtils.length; i++) {
if (categoryUtils[i] > categoryUtils[secondBest]) {
if (categoryUtils[i] > categoryUtils[best]) {
secondBest = best;
best = i;
} else {
secondBest = i;
}
}
}
CNode sa = (CNode) m_children.elementAt(best);
CNode sb = (CNode) m_children.elementAt(secondBest);
splitBestChildCU = categoryUtils[best];
// now merge best and second best
CNode mergedSplitChildren = new CNode(m_numAttributes);
if (sa != sb) {
splitPlusMergeBestTwoCU =
cuScoreForBestTwoMerged(mergedSplitChildren, sa, sb, newInstance);
}
splitCU = (splitBestChildCU > splitPlusNewLeafCU)
? splitBestChildCU : splitPlusNewLeafCU;
splitCU = (splitCU > splitPlusMergeBestTwoCU)
? splitCU : splitPlusMergeBestTwoCU;
if (splitCU > bestHostCU) {
bestHostCU = splitCU;
finalBestHost = this;
// tempChildren.removeElementAt(tempChildren.size()-1);
} else {
// restore the status quo
m_children = saveStatusQuo;
}
}
if (finalBestHost != this) {
// can commit the instance to the set of instances at this node
m_clusterInstances.add(newInstance);
} else {
m_numberSplits++;
}
if (finalBestHost == merged) {
m_numberMerges++;
m_children.removeElementAt(m_children.indexOf(a));
m_children.removeElementAt(m_children.indexOf(b));
m_children.addElement(merged);
}
if (finalBestHost == newLeaf) {
finalBestHost = new CNode(m_numAttributes);
m_children.addElement(finalBestHost);
}
if (bestHostCU < m_cutoff) {
if (finalBestHost == this) {
// splitting was the best, but since we are cutting all children
// recursion is aborted and we still need to add the instance
// to the set of instances at this node
m_clusterInstances.add(newInstance);
}
m_children = null;
finalBestHost = null;
}
if (finalBestHost == this) {
// splitting is still the best, so downdate the stats as
// we'll be recursively calling on this node
updateStats(newInstance, true);
}
return finalBestHost;
}
/**
* Adds the supplied node as a child of this node. All of the child's
* instances are added to this nodes instances
*
* @param child the child to add
*/
protected void addChildNode(CNode child) {
for (int i = 0; i < child.m_clusterInstances.numInstances(); i++) {
Instance temp = child.m_clusterInstances.instance(i);
m_clusterInstances.add(temp);
updateStats(temp, false);
}
if (m_children == null) {
m_children = new FastVector();
}
m_children.addElement(child);
}
/**
* Computes the utility of all children with respect to this node
*
* @return the category utility of the children with respect to this node.
* @throws Exception if there are no children
*/
protected double categoryUtility() {// {throws Exception {
// if (m_children == null) {
//throw new Exception("categoryUtility: No children!");
// }
double totalCU = 0;
for (int i = 0; i < m_children.size(); i++) {
CNode child = (CNode) m_children.elementAt(i);
totalCU += categoryUtilityChild(child);
}
totalCU /= (double) m_children.size();
return totalCU;
}
/**
* Computes the utility of a single child with respect to this node
*
* @param child the child for which to compute the utility
* @return the utility of the child with respect to this node
* @throws Exception if something goes wrong
*/
protected double categoryUtilityChild(CNode child) {//throws Exception {
double sum = 0;
for (int i = 0; i < m_numAttributes; i++) {
if (m_clusterInstances.attribute(i).isNominal()) {
for (int j = 0;
j < m_clusterInstances.attribute(i).numValues(); j++) {
double x = child.getProbability(i, j);
double y = getProbability(i, j);
sum += (x * x) - (y * y);
}
} else {
// numeric attribute
sum += ((m_normal / child.getStandardDev(i))
- (m_normal / getStandardDev(i)));
}
}
return (child.m_totalInstances / m_totalInstances) * sum;
}
/**
* Returns the probability of a value of a nominal attribute in this node
*
* @param attIndex the index of the attribute
* @param valueIndex the index of the value of the attribute
* @return the probability
* @throws Exception if the requested attribute is not nominal
*/
protected double getProbability(int attIndex, int valueIndex) {
//throws Exception {
// if (!m_clusterInstances.attribute(attIndex).isNominal()) {
//throw new Exception("getProbability: attribute is not nominal");
// }
if (m_attStats[attIndex].totalCount <= 0) {
return 0;
}
return (double) m_attStats[attIndex].nominalCounts[valueIndex]
/ (double) m_attStats[attIndex].totalCount;
}
/**
* Returns the standard deviation of a numeric attribute
*
* @param attIndex the index of the attribute
* @return the standard deviation
* @throws Exception if an error occurs
*/
protected double getStandardDev(int attIndex) { //throws Exception {
// if (!m_clusterInstances.attribute(attIndex).isNumeric()) {
//throw new Exception("getStandardDev: attribute is not numeric");
// }
m_attStats[attIndex].numericStats.calculateDerived();
double stdDev = m_attStats[attIndex].numericStats.stdDev;
if (Double.isNaN(stdDev) || Double.isInfinite(stdDev)) {
return m_acuity;
}
return Math.max(m_acuity, stdDev);
}
/**
* Update attribute stats using the supplied instance.
*
* @param updateInstance the instance for updating
* @param delete true if the values of the supplied instance are
* to be removed from the statistics
*/
protected void updateStats(Instance updateInstance,
boolean delete) {
if (m_attStats == null) {
m_attStats = new AttributeStats[m_numAttributes];
for (int i = 0; i < m_numAttributes; i++) {
m_attStats[i] = new AttributeStats();
if (m_clusterInstances.attribute(i).isNominal()) {
m_attStats[i].nominalCounts =
new int[m_clusterInstances.attribute(i).numValues()];
} else {
m_attStats[i].numericStats = new Stats();
}
}
}
for (int i = 0; i < m_numAttributes; i++) {
if (!updateInstance.isMissing(i)) {
double value = updateInstance.value(i);
if (m_clusterInstances.attribute(i).isNominal()) {
m_attStats[i].nominalCounts[(int) value] += (delete)
? (-1.0 * updateInstance.weight())
: updateInstance.weight();
m_attStats[i].totalCount += (delete)
? (-1.0 * updateInstance.weight())
: updateInstance.weight();
} else {
if (delete) {
m_attStats[i].numericStats.subtract(value,
updateInstance.weight());
} else {
m_attStats[i].numericStats.add(value, updateInstance.weight());
}
}
}
}
m_totalInstances += (delete)
? (-1.0 * updateInstance.weight())
: (updateInstance.weight());
}
/**
* Recursively assigns numbers to the nodes in the tree.
*
* @param cl_num an <code>int[]</code> value
* @throws Exception if an error occurs
*/
private void assignClusterNums(int[] cl_num) { //throws Exception {
// if (m_children != null && m_children.size() < 2) {
//throw new Exception("assignClusterNums: tree not built correctly!");
// }
m_clusterNum = cl_num[0];
cl_num[0]++;
if (m_children != null) {
for (int i = 0; i < m_children.size(); i++) {
CNode child = (CNode) m_children.elementAt(i);
child.assignClusterNums(cl_num);
}
}
}
/**
* Recursively build a string representation of the Cobweb tree
*
* @param depth depth of this node in the tree
* @param text holds the string representation
*/
protected void dumpTree(int depth, StringBuffer text) {
if (depth == 0) {
determineNumberOfClusters();
}
if (m_children == null) {
text.append("\n");
for (int j = 0; j < depth; j++) {
text.append("| ");
}
text.append("leaf " + m_clusterNum + " ["
+ m_clusterInstances.numInstances() + "]");
} else {
for (int i = 0; i < m_children.size(); i++) {
text.append("\n");
for (int j = 0; j < depth; j++) {
text.append("| ");
}
text.append("node " + m_clusterNum + " ["
+ m_clusterInstances.numInstances()
+ "]");
((CNode) m_children.elementAt(i)).dumpTree(depth + 1, text);
}
}
}
/**
* Returns the instances at this node as a string. Appends the cluster
* number of the child that each instance belongs to.
*
* @return a <code>String</code> value
* @throws Exception if an error occurs
*/
protected String dumpData() { //throws Exception {
if (m_children == null) {
return m_clusterInstances.toString();
}
// construct instances string with cluster numbers attached
CNode tempNode = new CNode(m_numAttributes);
tempNode.m_clusterInstances = new Instances(m_clusterInstances, 1);
for (int i = 0; i < m_children.size(); i++) {
tempNode.addChildNode((CNode) m_children.elementAt(i));
}
Instances tempInst = tempNode.m_clusterInstances;
tempNode = null;
Add af = new Add();
af.setAttributeName("Cluster");
String labels = "";
for (int i = 0; i < m_children.size(); i++) {
CNode temp = (CNode) m_children.elementAt(i);
labels += ("C" + temp.m_clusterNum);
if (i < m_children.size() - 1) {
labels += ",";
}
}
af.setNominalLabels(labels);
//af.setInputFormat(tempInst);
//tempInst = Filter.useFilter(tempInst, af);
tempInst.setRelationName("Cluster " + m_clusterNum);
int z = 0;
for (int i = 0; i < m_children.size(); i++) {
CNode temp = (CNode) m_children.elementAt(i);
for (int j = 0; j < temp.m_clusterInstances.numInstances(); j++) {
tempInst.instance(z).setValue(m_numAttributes, (double) i);
z++;
}
}
return tempInst.toString();
}
/**
* Recursively generate the graph string for the Cobweb tree.
*
* @param text holds the graph string
* @throws Exception if generation fails
*/
protected void graphTree(StringBuffer text) { //throws Exception {
text.append("N" + m_clusterNum
+ " [label=\"" + ((m_children == null)
? "leaf " : "node ")
+ m_clusterNum + " "
+ " (" + m_clusterInstances.numInstances()
+ ")\" "
+ ((m_children == null)
? "shape=box style=filled " : "")
+ (m_saveInstances
? "data =\n" + dumpData() + "\n,\n"
: "")
+ "]\n");
if (m_children != null) {
for (int i = 0; i < m_children.size(); i++) {
CNode temp = (CNode) m_children.elementAt(i);
text.append("N" + m_clusterNum
+ "->"
+ "N" + temp.m_clusterNum
+ "\n");
}
for (int i = 0; i < m_children.size(); i++) {
CNode temp = (CNode) m_children.elementAt(i);
temp.graphTree(text);
}
}
}
/**
* Recursively build a clustering representation of the Cobweb tree
*
* @param depth depth of this node in the tree
* @param clustering holds the Clustering representation
*/
protected void computeTreeClustering(int depth, Clustering clustering) {
if (depth == 0) {
determineNumberOfClusters();
}
if (m_children == null) {
//Append Cluster
/*text.append("\n");
for (int j = 0; j < depth; j++) {
text.append("| ");
}
text.append("leaf " + m_clusterNum + " ["
+ m_clusterInstances.numInstances() + "]");
clustering.add(SphereCluster(this.coordinates, .05, m_clusterInstances.numInstances()));*/
if (depth == 0) {
double [] centroidCoordinates = new double[m_clusterInstances.numAttributes()];
for (int j = 0; j < m_clusterInstances.numAttributes()-1; j++) {
centroidCoordinates[j] = m_clusterInstances.meanOrMode(j);
}
clustering.add(new SphereCluster(centroidCoordinates, .05, m_clusterInstances.numInstances()));
}
} else {
for (int i = 0; i < m_children.size(); i++) {
/*text.append("\n");
for (int j = 0; j < depth; j++) {
text.append("| ");
}
text.append("node " + m_clusterNum + " ["
+ m_clusterInstances.numInstances()
+ "]");*/
double [] centroidCoordinates = new double[m_clusterInstances.numAttributes()];
for (int j = 0; j < m_clusterInstances.numAttributes()-1; j++) {
centroidCoordinates[j] = m_clusterInstances.meanOrMode(j);
}
clustering.add(new SphereCluster(centroidCoordinates, .05, m_clusterInstances.numInstances()));
((CNode) m_children.elementAt(i)).computeTreeClustering(depth + 1, clustering);
}
}
}
}
/**
* Normal constant.
*/
protected static final double m_normal = 1.0 / (2 * Math.sqrt(Math.PI));
/**
* Acuity (minimum standard deviation).
*/
protected double m_acuity = 1.0;
/**
* Cutoff (minimum category utility).
*/
protected double m_cutoff = 0.002;//0.01 * Cobweb.m_normal;
/**
* Holds the root of the Cobweb tree.
*/
protected CNode m_cobwebTree = null;
/**
* Number of clusters (nodes in the tree). Must never be queried directly,
* only via the method numberOfClusters(). Otherwise it's not guaranteed that
* it contains the correct value.
*
* @see #numberOfClusters()
* @see #m_numberOfClustersDetermined
*/
protected int m_numberOfClusters = -1;
/** whether the number of clusters was already determined */
protected boolean m_numberOfClustersDetermined = false;
/** the number of splits that happened */
protected int m_numberSplits;
/** the number of merges that happened */
protected int m_numberMerges;
/**
* Output instances in graph representation of Cobweb tree (Allows
* instances at nodes in the tree to be visualized in the Explorer).
*/
protected boolean m_saveInstances = false;
@SuppressWarnings("hiding")
public static final String classifierPurposeString = "Cobweb and Classit clustering algorithms: it always compares the best host, adding a new leaf, merging the two best hosts, and splitting the best host when considering where to place a new instance..";
@Override
public void resetLearningImpl() {
setAcuity(this.acuityOption.getValue());
setCutoff(this.cutoffOption.getValue());
m_numberOfClusters = -1;
m_cobwebTree = null;
m_numberSplits = 0;
m_numberMerges = 0;
}
/**
* Adds an instance to the clusterer.
*
* @param newInstance the instance to be added
* @throws Exception if something goes wrong
*/
// public void updateClusterer(Instance newInstance) throws Exception {
@Override
public void trainOnInstanceImpl(Instance newInstance) { //throws Exception {
m_numberOfClustersDetermined = false;
if (m_cobwebTree == null) {
m_cobwebTree = new CNode(newInstance.numAttributes(), newInstance);
} else {
m_cobwebTree.addInstance(newInstance);
}
}
/**
* Classifies a given instance.
*
* @param instance the instance to be assigned to a cluster
* @return the number of the assigned cluster as an interger
* if the class is enumerated, otherwise the predicted value
* @throws Exception if instance could not be classified
* successfully
*/
public double[] getVotesForInstance(Instance instance) {
//public int clusterInstance(Instance instance) {//throws Exception {
CNode host = m_cobwebTree;
CNode temp = null;
determineNumberOfClusters();
if (this.m_numberOfClusters < 1) {
return (new double[0]);
}
double[] ret = new double[this.m_numberOfClusters];
do {
if (host.m_children == null) {
temp = null;
break;
}
host.updateStats(instance, false);
temp = host.findHost(instance, true);
host.updateStats(instance, true);
if (temp != null) {
host = temp;
}
} while (temp != null);
ret[host.m_clusterNum] = 1.0;
return ret;
}
/**
* determines the number of clusters if necessary
*
* @see #m_numberOfClusters
* @see #m_numberOfClustersDetermined
*/
protected void determineNumberOfClusters() {
if (!m_numberOfClustersDetermined
&& (m_cobwebTree != null)) {
int[] numClusts = new int[1];
numClusts[0] = 0;
// try {
m_cobwebTree.assignClusterNums(numClusts);
// }
// catch (Exception e) {
// e.printStackTrace();
// numClusts[0] = 0;
// }
m_numberOfClusters = numClusts[0];
m_numberOfClustersDetermined = true;
}
}
/**
* Returns the number of clusters.
*
* @return the number of clusters
*/
public int numberOfClusters() {
determineNumberOfClusters();
return m_numberOfClusters;
}
{
// return this.observedClassDistribution.getArrayCopy();
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
return null;
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
StringBuffer text = new StringBuffer();
if (m_cobwebTree == null) {
StringUtils.appendIndented(out, indent, "Cobweb hasn't been built yet!");
StringUtils.appendNewline(out);
} else {
m_cobwebTree.dumpTree(0, text);
StringUtils.appendIndented(out, indent, "CobWeb - ");
out.append("Number of merges: "
+ m_numberMerges + "\nNumber of splits: "
+ m_numberSplits + "\nNumber of clusters: "
+ numberOfClusters() + "\n" + text.toString());
StringUtils.appendNewline(out);
}
}
public boolean isRandomizable() {
return false;
}
/**
* Generates the graph string of the Cobweb tree
*
* @return a <code>String</code> value
* @throws Exception if an error occurs
*/
public String graph() {// throws Exception {
StringBuffer text = new StringBuffer();
text.append("digraph CobwebTree {\n");
m_cobwebTree.graphTree(text);
text.append("}\n");
return text.toString();
}
/**
* set the acuity.
* @param a the acuity value
*/
public void setAcuity(double a) {
m_acuity = a;
}
/**
* get the acuity value
* @return the acuity
*/
public double getAcuity() {
return m_acuity;
}
/**
* set the cutoff
* @param c the cutof
*/
public void setCutoff(double c) {
m_cutoff = c;
}
/**
* get the cutoff
* @return the cutoff
*/
public double getCutoff() {
return m_cutoff;
}
/**
* Get the value of saveInstances.
*
* @return Value of saveInstances.
*/
public boolean getSaveInstanceData() {
return m_saveInstances;
}
/**
* Set the value of saveInstances.
*
* @param newsaveInstances Value to assign to saveInstances.
*/
public void setSaveInstanceData(boolean newsaveInstances) {
m_saveInstances = newsaveInstances;
}
public Clustering getClusteringResult() {
//throw new UnsupportedOperationException("Not supported yet.");
Clustering result = new Clustering();
if (m_cobwebTree == null) {
//StringUtils.appendIndented(out, indent, "Cobweb hasn't been built yet!");
//StringUtils.appendNewline(out);
} else {
m_cobwebTree.computeTreeClustering(0,result);
System.out.println("After Number of clusters: "+numberOfClusters() );
}
System.out.println("Number of clusters: "+result.size());
return result;
}
}
| Java |
/**
* [IMacroClusterer.java] for Subspace MOA
*
* @author Stephen Wels
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro;
import moa.cluster.Clustering;
public interface IMacroClusterer {
public Clustering getClustering(Clustering microClusters);
}
| Java |
/**
* [AbstractMacroClusterer.java] for Subspace MOA
*
* @author Stephen Wels
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
public abstract class AbstractMacroClusterer {
public abstract Clustering getClustering(Clustering microClusters);
protected void setClusterIDs(Clustering clustering) {
// int numOfClusters = clustering.size();
// Set<Double> oldClusterIDs = new TreeSet<Double>();
//
// // Collect all the old IDs of the microclusters
// for (Cluster c : clustering.getClustering()) {
// NonConvexCluster ncc = (NonConvexCluster) c;
// for (Cluster mc : ncc.mMicroClusters) {
// if (!oldClusterIDs.contains(mc.getId()))
// oldClusterIDs.add(mc.getId());
// }
// }
HashMap<Double, Integer> countIDs = new HashMap<Double, Integer>();
for (Cluster c : clustering.getClustering()) {
HashMap<Double, Integer> ids = new HashMap<Double, Integer>();
NonConvexCluster ncc = (NonConvexCluster) c;
for (Cluster mc : ncc.getMicroClusters()) {
if (!ids.containsKey(mc.getId()))
ids.put(mc.getId(), new Integer(1));
else {
int i = ids.get(mc.getId());
i++;
ids.put(mc.getId(), i);
}
}
// find max
double maxID = -1d;
int max = -1;
for (Map.Entry<Double, Integer> entry : ids.entrySet()) {
if (entry.getValue() >= max) {
max = entry.getValue();
maxID = entry.getKey();
}
}
c.setId(maxID);
if (!countIDs.containsKey(maxID))
countIDs.put(maxID, new Integer(1));
else {
int i = countIDs.get(maxID);
i++;
countIDs.put(maxID, i);
}
}
// check if there are 2 clusters with the same color (same id, could
// appear after a split);
double freeID = 0;
List<Double> reservedIDs = new Vector<Double>();
reservedIDs.addAll(countIDs.keySet());
for (Map.Entry<Double, Integer> entry : countIDs.entrySet()) {
if (entry.getValue() > 1 || entry.getKey() == -1) {
// find first free id, search all the clusters which has the
// same id and replace the ids with free ids. One cluster can
// keep its id
int to = entry.getValue();
if (entry.getKey() != -1)
to--;
for (int i = 0; i < to; i++) {
while (reservedIDs.contains(freeID)
&& freeID < ColorArray.getNumColors())
freeID += 1.0;
for (int c = clustering.size() - 1; c >= 0; c--)
if (clustering.get(c).getId() == entry.getKey()) {
clustering.get(c).setId(freeID);
reservedIDs.add(freeID);
break;
}
}
}
}
for (Cluster c : clustering.getClustering()) {
NonConvexCluster ncc = (NonConvexCluster) c;
for (Cluster mc : ncc.getMicroClusters()) {
mc.setId(c.getId());
}
}
}
}
| Java |
/**
* [NonConvexCluster.java] for Subspace MOA
*
* A set of [CFCluster]s, grouped as a non-convex cluster.
*
* @author Stephen Wels
* @editor Yunsu Kim
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro;
import java.util.List;
import java.util.Vector;
import moa.cluster.CFCluster;
import moa.cluster.Clustering;
import weka.core.Instance;
public class NonConvexCluster extends CFCluster implements IDenseMacroCluster {
List<CFCluster> mMicroClusters;
/**
*
*/
private static final long serialVersionUID = 1L;
public NonConvexCluster(CFCluster cluster, List<CFCluster> microclusters) {
super(cluster); // required
mMicroClusters = new Vector<CFCluster>();
mMicroClusters.addAll(microclusters);
// assuming we have a circular shaped cluster, compute it's center: only
// for visualization
for (CFCluster cf : microclusters) {
if (!cf.equals(cluster))
this.add(cf);
}
}
@Override
public CFCluster getCF() {
// TODO Auto-generated method stub
return this;
}
public void insert(CFCluster cf) {
mMicroClusters.add(cf);
}
public void remove(CFCluster cf) {
mMicroClusters.remove(cf);
}
@Override
public double getInclusionProbability(Instance instance) {
double probability = 0;
for (CFCluster cf : mMicroClusters) {
probability = cf.getInclusionProbability(instance);
if (probability > 0d)
return probability;
}
return probability;
}
@Override
public double getRadius() {
// zero since this is an arbitrarily shaped cluster
return 0;
}
public Clustering getClustering() {
Clustering c = new Clustering();
for (CFCluster mc : mMicroClusters)
c.add(mc);
return c;
}
public List<CFCluster> getMicroClusters() {
return mMicroClusters;
}
}
| Java |
/**
* [ColorObject.java] for Subspace MOA
*
* @author Stephen Wels
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro;
import java.awt.Color;
public class ColorObject {
private Color mColor;
private String mName;
public ColorObject(String name, Color c) {
mColor = c;
mName = name;
}
public Color getColor() {
return mColor;
}
public String getName() {
return mName;
}
}
| Java |
/**
* [ColorArray.java] for Subspace MOA
*
* @author Stephen Wels
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro;
import java.awt.Color;
public class ColorArray {
public static ColorObject[] mVisibleColors = {
new ColorObject("blue", new Color(0x0000ff)),
new ColorObject("blueviolet", new Color(0x8a2be2)),
new ColorObject("brown", new Color(0xa52a2a)),
new ColorObject("burlywood", new Color(0xdeb887)),
new ColorObject("cadetblue", new Color(0x5f9ea0)),
//new ColorObject("chartreuse", new Color(0x7fff00)),
new ColorObject("chocolate", new Color(0xd2691e)),
new ColorObject("coral", new Color(0xff7f50)),
new ColorObject("cornflowerblue", new Color(0x6495ed)),
new ColorObject("crimson", new Color(0xdc143c)),
new ColorObject("cyan", new Color(0x00ffff)),
new ColorObject("darkblue", new Color(0x00008b)),
new ColorObject("darkcyan", new Color(0x008b8b)),
new ColorObject("darkgoldenrod", new Color(0xb8860b)),
new ColorObject("darkgreen", new Color(0x006400)),
new ColorObject("darkkhaki", new Color(0xbdb76b)),
new ColorObject("darkmagenta", new Color(0x8b008b)),
new ColorObject("darkolivegreen", new Color(0x556b2f)),
new ColorObject("darkorange", new Color(0xff8c00)),
// new ColorObject("darkorchid", new Color(0x9932cc)),
new ColorObject("darkred", new Color(0x8b0000)),
new ColorObject("darksalmon", new Color(0xe9967a)),
new ColorObject("darkseagreen", new Color(0x8fbc8f)),
new ColorObject("darkslateblue", new Color(0x483d8b)),
new ColorObject("darkslategray", new Color(0x2f4f4f)),
// new ColorObject("darkturquoise", new Color(0x00ced1)),
new ColorObject("darkviolet", new Color(0x9400d3)),
new ColorObject("deeppink", new Color(0xff1493)),
new ColorObject("deepskyblue", new Color(0x00bfff)),
// new ColorObject("dodgerblue", new Color(0x1e90ff)),
new ColorObject("firebrick", new Color(0xb22222)),
new ColorObject("forestgreen", new Color(0x228b22)),
new ColorObject("fuchsia", new Color(0xff00ff)),
new ColorObject("gold", new Color(0xffd700)),
new ColorObject("goldenrod", new Color(0xdaa520)),
//new ColorObject("green", new Color(0x008000)),
new ColorObject("greenyellow", new Color(0xadff2f)),
new ColorObject("hotpink", new Color(0xff69b4)),
new ColorObject("indianred", new Color(0xcd5c5c)),
new ColorObject("indigo", new Color(0x4b0082)),
//new ColorObject("lawngreen", new Color(0x7cfc00)),
// new ColorObject("lime", new Color(0x00ff00)),
// new ColorObject("limegreen", new Color(0x32cd32)),
new ColorObject("magenta", new Color(0xff00ff)),
new ColorObject("maroon", new Color(0x800000)),
new ColorObject("olive", new Color(0x808000)),
new ColorObject("orange", new Color(0xffa500)),
new ColorObject("orangered", new Color(0xff4500)),
new ColorObject("pink", new Color(0xffc0cb)),
new ColorObject("powderblue", new Color(0xb0e0e6)),
new ColorObject("purple", new Color(0x800080)),
new ColorObject("red", new Color(0xff0000)),
new ColorObject("royalblue", new Color(0x4169e1)),
new ColorObject("saddlebrown", new Color(0x8b4513)),
new ColorObject("salmon", new Color(0xfa8072)),
new ColorObject("seagreen", new Color(0x2e8b57)),
new ColorObject("skyblue", new Color(0x87ceeb)),
new ColorObject("slateblue", new Color(0x6a5acd)),
new ColorObject("tomato", new Color(0xff6347)),
new ColorObject("violet", new Color(0xee82ee)) };
public static Color getColor(int i) {
Color res;
try {
res = mVisibleColors[i].getColor();
} catch (ArrayIndexOutOfBoundsException e) {
return Color.BLACK;
}
return res;
}
public static String getName(int i) {
String res;
try {
res = mVisibleColors[i].getName();
} catch (ArrayIndexOutOfBoundsException e) {
throw e;
}
return res;
}
public static double getNumColors() {
return mVisibleColors.length;
}
}
| Java |
/**
* [DBScan.java] for Subspace MOA
*
* An implementation of DBSCAN.
*
* @editor Yunsu Kim
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro.dbscan;
import java.util.ArrayList;
import java.util.List;
import java.util.Vector;
import moa.cluster.CFCluster;
import moa.cluster.Cluster;
import moa.cluster.Clustering;
import moa.clusterers.macro.AbstractMacroClusterer;
import moa.clusterers.macro.NonConvexCluster;
public class DBScan extends AbstractMacroClusterer {
Clustering datasource;
private double mEps;
private int mMinPts;
public DBScan(Clustering microClusters, double eps, int MinPts) {
datasource = microClusters;
mEps = eps;
mMinPts = MinPts;
}
private ArrayList<DenseMicroCluster> expandCluster(DenseMicroCluster dmc,
List<DenseMicroCluster> neighbours,
ArrayList<DenseMicroCluster> arrayList,
Vector<DenseMicroCluster> dbmc) {
if (!dmc.isClustered()) {
dmc.setClustered();
arrayList.add(dmc);
}
while (!neighbours.isEmpty()) {
DenseMicroCluster mc = neighbours.get(0);
neighbours.remove(0);
if (!mc.isVisited()) {
mc.setVisited();
List<DenseMicroCluster> neighbours2 = getNeighbourhood(mc, dbmc);
if (neighbours2.size() >= mMinPts) {
while (!neighbours2.isEmpty()) {
DenseMicroCluster temp = neighbours2.get(0);
neighbours2.remove(0);
if (!temp.isVisited()) {
neighbours.add(temp);
}
}
neighbours.addAll(neighbours2);
if (!mc.isClustered()) {
mc.setClustered();
arrayList.add(mc);
}
}
}
}
return arrayList;
}
private List<DenseMicroCluster> getNeighbourhood(DenseMicroCluster mc,
Vector<DenseMicroCluster> dbmc) {
List<DenseMicroCluster> res = new Vector<DenseMicroCluster>();
for (DenseMicroCluster dmc : dbmc) {
if (distance(dmc.getCFCluster().getCenter(), mc.getCFCluster().getCenter()) < mEps) {
res.add(dmc);
}
}
return res;
}
/**
* eclidean distance
*
* @param center
* @param center2
* @return
*/
private double distance(double[] center, double[] center2) {
double d = 0D;
for (int i = 0; i < center.length; i++) {
d += Math.pow((center[i] - center2[i]), 2);
}
return Math.sqrt(d);
}
@Override
public Clustering getClustering(Clustering microClusters) {
if (microClusters != null && microClusters.size() != 0) {
Vector<DenseMicroCluster> dbmc = new Vector<DenseMicroCluster>();
for (Cluster c : microClusters.getClustering()) {
CFCluster cf = null;
if (c instanceof CFCluster) {
cf = (CFCluster) c;
dbmc.add(new DenseMicroCluster(cf));
} else
throw new RuntimeException();
}
ArrayList<ArrayList<DenseMicroCluster>> clusters = new ArrayList<ArrayList<DenseMicroCluster>>();
for (DenseMicroCluster dmc : dbmc) {
if (!dmc.isVisited()) {
dmc.setVisited();
List<DenseMicroCluster> neighbours = getNeighbourhood(dmc,
dbmc);
if (neighbours.size() >= mMinPts) {
ArrayList<DenseMicroCluster> cluster = expandCluster(
dmc, neighbours,
new ArrayList<DenseMicroCluster>(), dbmc);
clusters.add(cluster);
}
}
}
// ** create big microclusters,
// CFCluster[] res = new CFCluster[clusters.size()];
// int clusterPos = 0;
// for (ArrayList<DenseMicroCluster> cluster : clusters) {
// if (cluster.size() != 0) {
// CFCluster temp = (CFCluster) (cluster.get(0).mCluster.copy());
// res[clusterPos] = temp;
// for (int i = 1; i < cluster.size(); i++) {
// res[clusterPos].add(cluster.get(i).mCluster);
// }
// clusterPos++;
// }
// }
// Clustering result = new Clustering(res);
// **
CFCluster[] res = new CFCluster[clusters.size()];
int clusterPos = 0;
for (ArrayList<DenseMicroCluster> cluster : clusters) {
if (cluster.size() != 0) {
CFCluster temp = new NonConvexCluster(
cluster.get(0).getCFCluster(),
Convert2microclusterList(cluster));
res[clusterPos] = temp;
for (int i = 1; i < cluster.size(); i++) {
res[clusterPos].add(cluster.get(i).getCFCluster());
}
clusterPos++;
}
}
// //// count Noise
int noise = 0;
for (DenseMicroCluster c : dbmc) {
if (!c.isClustered()) {
noise++;
}
}
System.out.println("microclusters which are not clustered:: "
+ noise);
Clustering result = new Clustering(res);
setClusterIDs(result);
// int i = 0;
// for (Cluster c : result.getClustering()) {
// c.setId(i++);
// }
return result;
}
return new Clustering();
}
private List<CFCluster> Convert2microclusterList(
ArrayList<DenseMicroCluster> cluster) {
List<CFCluster> cfCluster = new Vector<CFCluster>();
for (DenseMicroCluster d : cluster) {
cfCluster.add(d.getCFCluster());
}
return cfCluster;
}
}
| Java |
/**
* [DenseMicroCluster.java] for Subspace MOA
*
* A microcluster class for DBSCAN.
*
* @editor Yunsu Kim
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro.dbscan;
import moa.cluster.CFCluster;
public class DenseMicroCluster {
private CFCluster mCluster;
private boolean mVisited;
private boolean mIsClustered;
public DenseMicroCluster(CFCluster mc){
mCluster = mc;
mVisited = false;
}
public void setVisited(){
mVisited = true;
}
public boolean isVisited(){
return mVisited;
}
public void setClustered(){
mIsClustered = true;
}
public boolean isClustered(){
return mIsClustered;
}
public CFCluster getCFCluster(){
return mCluster;
}
}
| Java |
/**
* [IDenseMacroCluster.java] for Subspace MOA
*
* @author Stephen Wels
* Data Management and Data Exploration Group, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package moa.clusterers.macro;
import java.util.List;
import moa.cluster.CFCluster;
import moa.cluster.Clustering;
public interface IDenseMacroCluster {
public Clustering getClustering();
public List<CFCluster> getMicroClusters();
}
| Java |
/*
* ChangeDetectorLearner.java
* Copyright (C) 2011 University of Waikato, Hamilton, New Zealand
* @author Albert Bifet (abifet at cs dot waikato dot ac dot nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package moa.learners;
import moa.classifiers.AbstractClassifier;
import moa.core.Measurement;
import moa.classifiers.core.driftdetection.ChangeDetector;
import moa.options.ClassOption;
import weka.core.Instance;
/**
* Class for detecting concept drift and to be used as a learner.<p>data
*
* Valid options are:<p>
*
* -l classname <br> Specify the full class name of a classifier as the basis
* for the concept drift classifier.<p>
*
*
* @author Albert Bifet (abifet at cs dot waikato dot ac dot nz)
* @version $Revision: 7 $
*/
public class ChangeDetectorLearner extends AbstractClassifier {
private static final long serialVersionUID = 1L;
public ClassOption driftDetectionMethodOption = new ClassOption("driftDetectionMethod", 'd',
"Drift detection method to use.", ChangeDetector.class, "DDM");
protected ChangeDetector driftDetectionMethod;
@Override
public void resetLearningImpl() {
this.driftDetectionMethod = ((ChangeDetector) getPreparedClassOption(this.driftDetectionMethodOption)).copy();
}
@Override
public void trainOnInstanceImpl(Instance inst) {
this.driftDetectionMethod.input(inst.value(0));
}
public double[] getVotesForInstance(Instance inst) {
return this.driftDetectionMethod.getOutput();
}
public boolean isRandomizable() {
return false;
}
@Override
public void getModelDescription(StringBuilder out, int indent) {
//((AbstractClassifier) this.classifier).getModelDescription(out, indent);
}
@Override
protected Measurement[] getModelMeasurementsImpl() {
//return ((AbstractClassifier) this.classifier).getModelMeasurementsImpl();
return new Measurement[0];
}
}
| Java |
/*
* DoTask.java
* Copyright (C) 2007 University of Waikato, Hamilton, New Zealand
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa;
import moa.core.Globals;
import moa.core.Measurement;
import moa.core.StringUtils;
import moa.core.TimingUtils;
import moa.options.ClassOption;
import moa.options.FlagOption;
import moa.options.IntOption;
import moa.options.Option;
import moa.tasks.FailedTaskReport;
import moa.tasks.Task;
import moa.tasks.TaskThread;
import weka.core.Version;
/**
* Class for running a MOA task from the command line.
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @version $Revision: 7 $
*/
public class DoTask {
/** Array of characters to use to animate the progress of tasks running. */
public static final char[] progressAnimSequence = new char[]{'-', '\\',
'|', '/'};
/** Maximum length of the status string that shows the progress of tasks running. */
public static final int MAX_STATUS_STRING_LENGTH = 79;
/**
* Checks if the Java version is recent enough to run MOA.
*
* @return true if the Java version is recent.
*/
public static boolean isJavaVersionOK() {
boolean isJavaVersionOK = true;
String version = System.getProperty("java.version");
char minor = version.charAt(2);
char point = version.charAt(4);
if (minor < '6' || point < '0') {
isJavaVersionOK = false;
System.err.println();
System.err.println(Globals.getWorkbenchInfoString());
System.err.println();
System.err.print("JDK 1.6.0 or higher is required to run MOA. ");
System.err.println("JDK version " + version + " found");
}
return isJavaVersionOK;
}
/**
* Checks if the Weka version is recent enough to run MOA.
* For example, if the Weka version is not recent, there may be problems
* due to the fact that <code>Instance</code> was a class before 3.7.1 and
* now is an interface.
*
* @return true if the Weka version is recent.
*/
public static boolean isWekaVersionOK() {
Version version = new Version();
if (version.isOlder("3.7.1")) {
System.err.println();
System.err.println(Globals.getWorkbenchInfoString());
System.err.println();
System.err.print("Weka 3.7.1 or higher is required to run MOA. ");
System.err.println("Weka version " + Version.VERSION + " found");
return false;
} else {
return true;
}
}
/**
* Main method for running tasks from the command line.
*
* @param args the options
*/
public static void main(String[] args) {
try {
if (args.length < 1) {
System.err.println();
System.err.println(Globals.getWorkbenchInfoString());
System.err.println();
System.err.println("No task specified.");
} else {
if (isJavaVersionOK() == false || isWekaVersionOK() == false) {
return;
}
// create standard options
FlagOption suppressStatusOutputOption = new FlagOption(
"suppressStatusOutput", 'S',
"Suppress the task status output that is normally send to stderr.");
FlagOption suppressResultOutputOption = new FlagOption(
"suppressResultOutput", 'R',
"Suppress the task result output that is normally send to stdout.");
IntOption statusUpdateFrequencyOption = new IntOption(
"statusUpdateFrequency",
'F',
"How many milliseconds to wait between status updates.",
1000, 0, Integer.MAX_VALUE);
Option[] extraOptions = new Option[]{
suppressStatusOutputOption, suppressResultOutputOption,
statusUpdateFrequencyOption};
// build a single string by concatenating cli options
StringBuilder cliString = new StringBuilder();
for (int i = 0; i < args.length; i++) {
cliString.append(" ").append(args[i]);
}
// parse options
Task task = (Task) ClassOption.cliStringToObject(cliString.toString(), Task.class, extraOptions);
Object result = null;
if (suppressStatusOutputOption.isSet()) {
result = task.doTask();
} else {
System.err.println();
System.err.println(Globals.getWorkbenchInfoString());
System.err.println();
boolean preciseTiming = TimingUtils.enablePreciseTiming();
// start the task thread
TaskThread taskThread = new TaskThread(task);
taskThread.start();
int progressAnimIndex = 0;
// inform user of progress
while (!taskThread.isComplete()) {
StringBuilder progressLine = new StringBuilder();
progressLine.append(progressAnimSequence[progressAnimIndex]);
progressLine.append(' ');
progressLine.append(StringUtils.secondsToDHMSString(taskThread.getCPUSecondsElapsed()));
progressLine.append(" [");
progressLine.append(taskThread.getCurrentStatusString());
progressLine.append("] ");
double fracComplete = taskThread.getCurrentActivityFracComplete();
if (fracComplete >= 0.0) {
progressLine.append(StringUtils.doubleToString(
fracComplete * 100.0, 2, 2));
progressLine.append("% ");
}
progressLine.append(taskThread.getCurrentActivityString());
while (progressLine.length() < MAX_STATUS_STRING_LENGTH) {
progressLine.append(" ");
}
if (progressLine.length() > MAX_STATUS_STRING_LENGTH) {
progressLine.setLength(MAX_STATUS_STRING_LENGTH);
progressLine.setCharAt(
MAX_STATUS_STRING_LENGTH - 1, '~');
}
System.err.print(progressLine.toString());
System.err.print('\r');
if (++progressAnimIndex >= progressAnimSequence.length) {
progressAnimIndex = 0;
}
try {
Thread.sleep(statusUpdateFrequencyOption.getValue());
} catch (InterruptedException ignored) {
// wake up
}
}
StringBuilder cleanupString = new StringBuilder();
for (int i = 0; i < MAX_STATUS_STRING_LENGTH; i++) {
cleanupString.append(' ');
}
System.err.println(cleanupString);
result = taskThread.getFinalResult();
if (!(result instanceof FailedTaskReport)) {
System.err.print("Task completed in "
+ StringUtils.secondsToDHMSString(taskThread.getCPUSecondsElapsed()));
if (preciseTiming) {
System.err.print(" (CPU time)");
}
System.err.println();
System.err.println();
}
}
if (result instanceof FailedTaskReport) {
System.err.println("Task failed. Reason: ");
((FailedTaskReport) result).getFailureReason().printStackTrace();
} else {
if (!suppressResultOutputOption.isSet()) {
if (result instanceof Measurement[]) {
StringBuilder sb = new StringBuilder();
Measurement.getMeasurementsDescription(
(Measurement[]) result, sb, 0);
System.out.println(sb.toString());
} else {
System.out.println(result);
}
System.out.flush();
}
}
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
| Java |
/*
* MakeObject.java
* Copyright (C) 2007 University of Waikato, Hamilton, New Zealand
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa;
import java.io.File;
import java.io.Serializable;
import moa.core.Globals;
import moa.core.SerializeUtils;
import moa.options.ClassOption;
/**
* Class for writing a MOA object to a file from the command line.
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @version $Revision: 7 $
*/
public class MakeObject {
/**
* Main method for writing an object to a file from the command line.
*
* @param args the options
*/
public static void main(String[] args) {
try {
System.err.println();
System.err.println(Globals.getWorkbenchInfoString());
System.err.println();
if (args.length < 2) {
System.err.println("usage: java " + MakeObject.class.getName()
+ " outputfile.moa \"<object name> <options>\"");
System.err.println();
} else {
String filename = args[0];
// build a single string by concatenating cli options
StringBuilder cliString = new StringBuilder();
for (int i = 1; i < args.length; i++) {
cliString.append(" " + args[i]);
}
// parse options
System.err.println("Making object...");
Object result = ClassOption.cliStringToObject(cliString.toString(), Object.class, null);
System.err.println("Writing object to file: " + filename);
SerializeUtils.writeToFile(new File(filename),
(Serializable) result);
System.err.println("Done.");
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
| Java |
/*
* AbstractMOAObject.java
* Copyright (C) 2007 University of Waikato, Hamilton, New Zealand
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package moa;
import moa.core.SerializeUtils;
import moa.core.SizeOf;
/**
* Abstract MOA Object. All classes that are serializable, copiable,
* can measure its size, and can give a description, extend this class.
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @version $Revision: 7 $
*/
public abstract class AbstractMOAObject implements MOAObject {
@Override
public MOAObject copy() {
return copy(this);
}
@Override
public int measureByteSize() {
return measureByteSize(this);
}
/**
* Returns a description of the object.
*
* @return a description of the object
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
getDescription(sb, 0);
return sb.toString();
}
/**
* This method produces a copy of an object.
*
* @param obj object to copy
* @return a copy of the object
*/
public static MOAObject copy(MOAObject obj) {
try {
return (MOAObject) SerializeUtils.copyObject(obj);
} catch (Exception e) {
throw new RuntimeException("Object copy failed.", e);
}
}
/**
* Gets the memory size of an object.
*
* @param obj object to measure the memory size
* @return the memory size of this object
*/
public static int measureByteSize(MOAObject obj) {
return (int) SizeOf.fullSizeOf(obj);
}
}
| Java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.