repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
JSAT | JSAT-master/JSAT/src/jsat/classifiers/svm/extended/OnlineAMM.java | package jsat.classifiers.svm.extended;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.BaseUpdateableClassifier;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.DataPoint;
import jsat.distributions.Distribution;
import jsat.distributions.Gamma;
import jsat.distributions.LogUniform;
import jsat.exceptions.FailedToFitException;
import jsat.linear.DenseVector;
import jsat.linear.ScaledVector;
import jsat.linear.Vec;
import jsat.linear.VecWithNorm;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
/**
* This is the Online variant of the Adaptive Multi-Hyperplane Machine (AMM)
* algorithm. It is related to linear SVMs where instead of having only a single
* weight vector, it is extended to multi-class problems by giving each class
* its own weight vector. It is further extended by allowing each class to
* dynamically add new weight vectors to increase the non-linearity of the
* solution. <br>
* This algorithm works best for problems with a very large number of data
* points where traditional kernelized SVMs are prohibitively expensive to train
* due to computational cost. <br>
* <br>
* Unlike the batch variant, the online AMM algorithm has no convergence
* guarantees. However it still produces good results.
* <br>
* See:
* <ul>
* <li>Wang, Z., Djuric, N., Crammer, K., & Vucetic, S. (2011). <i>Trading
* representability for scalability Adaptive Multi-Hyperplane Machine for
* nonlinear Classification</i>. In Proceedings of the 17th ACM SIGKDD
* international conference on Knowledge discovery and data mining - KDD ’11
* (p. 24). New York, New York, USA: ACM Press. doi:10.1145/2020408.2020420</li>
* <li>Djuric, N., Lan, L., Vucetic, S., & Wang, Z. (2014). <i>BudgetedSVM: A
* Toolbox for Scalable SVM Approximations</i>. Journal of Machine Learning
* Research, 14, 3813–3817. Retrieved from
* <a href="http://jmlr.org/papers/v14/djuric13a.html">here</a></li>
* </ul>
*
* @author Edward Raff
*/
public class OnlineAMM extends BaseUpdateableClassifier implements Parameterized
{
private static final long serialVersionUID = 8291068484917637037L;
/*
* b/c of the batch learner we use a map, so that we dont have to think
* about how to handle re-assignment of data points to weight vectors.
* Also allows us to handle removed cases by checking if our owner is in the
* map. Use nextID to make sure we give every new vec a unique ID with
* respect to the class label
*/
protected List<Map<Integer, Vec>> weightMatrix;
protected int[] nextID;
protected double lambda;
protected int k;
protected double c;
protected int time;
protected int classBudget;
/**
* The default {@link #setPruneFrequency(int) frequency for pruning} is
* {@value #DEFAULT_PRUNE_FREQUENCY}.
*/
public static final int DEFAULT_PRUNE_FREQUENCY = 10000;
/**
* The default {@link #setC(double) pruning constant } is
* {@value #DEFAULT_PRUNE_CONSTANT}.
*/
public static final double DEFAULT_PRUNE_CONSTANT = 10.0;
/**
* The default {@link #setClassBudget(int) class budget} is
* {@value #DEFAULT_CLASS_BUDGET}.
*/
public static final int DEFAULT_CLASS_BUDGET = 50;
/**
* The default {@link #setLambda(double) regularization value} is
* {@value #DEFAULT_REGULARIZER}.
*/
public static final double DEFAULT_REGULARIZER = 1e-2;
/**
* Creates a new online AMM learner
*/
public OnlineAMM()
{
this(DEFAULT_REGULARIZER);
}
/**
* Creates a new online AMM learner
* @param lambda the regularization value to use
*/
public OnlineAMM(double lambda)
{
this(lambda, DEFAULT_CLASS_BUDGET);
}
/**
* Creates a new online AMM learner
* @param lambda the regularization value to use
* @param classBudget the maximum number of weight vectors for each class
*/
public OnlineAMM(double lambda, int classBudget)
{
setLambda(lambda);
setClassBudget(classBudget);
setPruneFrequency(DEFAULT_PRUNE_FREQUENCY);
setC(DEFAULT_PRUNE_CONSTANT);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public OnlineAMM(OnlineAMM toCopy)
{
if(toCopy.weightMatrix != null)
{
this.weightMatrix = new ArrayList<Map<Integer, Vec>>(toCopy.weightMatrix.size());
for(Map<Integer, Vec> oldW : toCopy.weightMatrix)
{
Map<Integer, Vec> newW = new LinkedHashMap<Integer, Vec>(oldW.size());
for(Map.Entry<Integer, Vec> entry : oldW.entrySet())
newW.put(entry.getKey(), entry.getValue().clone());
this.weightMatrix.add(newW);
}
this.nextID = Arrays.copyOf(toCopy.nextID, toCopy.nextID.length);
}
this.time = toCopy.time;
this.lambda = toCopy.lambda;
this.k = toCopy.k;
this.c = toCopy.c;
this.classBudget = toCopy.classBudget;
this.setEpochs(toCopy.getEpochs());
}
@Override
public OnlineAMM clone()
{
return new OnlineAMM(this);
}
/**
* Sets the regularization parameter for this algorithm. The original paper
* suggests trying values 10<sup>-2</sup>, 10<sup>-3</sup>, ...,
* 10<sup>-6</sup>, 10<sup>-7</sup>.
*
* @param lambda the positive regularization parameter in (0, ∞)
*/
public void setLambda(double lambda)
{
if(lambda <= 0 || Double.isNaN(lambda) || Double.isInfinite(lambda))
throw new IllegalArgumentException("Lambda must be positive, not " + lambda);
this.lambda = lambda;
}
/**
* Returns the regularization parameter
* @return the regularization parameter
*/
public double getLambda()
{
return lambda;
}
/**
* Sets the frequency at which the weight vectors are pruned. Increasing the
* frequency increases the aggressiveness of pruning.
*
* @param frequency the number of iterations between each pruning
*/
public void setPruneFrequency(int frequency )
{
if(frequency < 1)
throw new IllegalArgumentException("Pruning frequency must be positive, not " + frequency);
this.k = frequency;
}
/**
* Returns the number of iterations between each pruning
* @return the number of iterations between each pruning
*/
public int getPruneFrequency()
{
return k;
}
/**
* Sets the pruning constant which controls how powerful pruning is when
* pruning occurs. Increasing C increases how many weights will be pruned.
* Changes to the scaling of feature vectors may require a change in the
* value of C
* <br>
* <b>NOTE:</b> This parameter <i>is not the same</i> as the standard C
* parameter associated with SVMs.
* @param c the positive pruning constant to use in (0, ∞)
*/
public void setC(double c)
{
if(c <= 0 || Double.isNaN(c) || Double.isInfinite(c))
throw new IllegalArgumentException("C must be positive, not " + c);
this.c = c;
}
/**
* Returns the pruning constant
* @return the pruning constant
*/
public double getC()
{
return c;
}
/**
* When given bad parameters there is the possibility for unbounded growth
* in the number of hyperplanes used. By setting this value to a reasonable
* upperbound catastrophic memory and CPU use can be avoided.
* @param classBudget the maximum number of hyperplanes allowed per class
*/
public void setClassBudget(int classBudget)
{
if(classBudget < 1)
throw new IllegalArgumentException("Number of hyperplanes must be positive, not " + classBudget);
this.classBudget = classBudget;
}
/**
* Returns the maximum number of hyperplanes allowed per class
* @return the maximum number of hyperplanes allowed per class
*/
public int getClassBudget()
{
return classBudget;
}
@Override
public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting)
{
if(numericAttributes < 1)
throw new FailedToFitException("OnlineAMM requires numeric features to perform classification");
weightMatrix = new ArrayList<Map<Integer, Vec>>(predicting.getNumOfCategories());
for(int i = 0; i < predicting.getNumOfCategories(); i++)
weightMatrix.add(new LinkedHashMap<Integer, Vec>());
nextID = new int[weightMatrix.size()];
time = 1;
}
@Override
public void update(DataPoint dataPoint, double weight, final int y_t)
{
update(dataPoint, y_t, Integer.MIN_VALUE);
}
/**
* Performs the work for an update. It can be used for regular online
* learning, or in the batch scenario where the assignments should not be
* updated with each online update. <br>
* The output will change under certain circumstances. <br>
* NOTE: this method may change in the future, dont rely on it
*
* @param dataPoint the data point to use in the update
* @param y_t the true label of the data point
* @param z_t the hyperplane of the true class with the maximum response, or {@link Integer#MIN_VALUE} if it should be calculated
* @return the index of the hyperplane of the true class with that maximum response
*/
protected int update(DataPoint dataPoint, final int y_t, int z_t)
{
//2: (x_t, y_t) ← t-th example from S;
final Vec x_t = dataPoint.getNumericalValues();
//3: calculate z_t by (10);
/*
* Note, we use the same code for both online and batch AMM. If the
* input is the miniumum value, we are doing a normal online update.
* If not, we use the given input & need to find out the response for
* the specified z_t instead
*
* Not clear in the paper, how do we handle the case when z_t was
* assigned to someone removed? Lets just treat it as unknown. Should be
* rare, so we wont worry about values changing and being reassigned.
* Especially since weight vectors near the front should be stable (they
* survided longer than those formerly infront of them after all).
*/
double z_t_val;
if(z_t == Integer.MIN_VALUE || z_t > nextID[y_t])//z_t is not known, so we will "update" it ourselves
{
z_t_val = 0.0;//infinte implicit zero weight vectors, so max is always at least 0
z_t = -1;//negative value used to indicate the implicit was largest
Map<Integer, Vec> w_yt = weightMatrix.get(y_t);
for(Map.Entry<Integer, Vec> entry_yt : w_yt.entrySet())
{
Vec v = entry_yt.getValue();
double tmp = x_t.dot(v);
if(tmp >= z_t_val)
{
z_t = entry_yt.getKey();
z_t_val = tmp;
}
}
}
else//z_t is given, we just need z_t_val
{
if(!weightMatrix.get(y_t).containsKey(z_t))
{
//happens if we were owned by a vec that has been removed
return update(dataPoint, y_t, Integer.MIN_VALUE);//restart and have a new assignment given
}
if(z_t == -1)
z_t_val = 0.0;//again, implicit
else
z_t_val = weightMatrix.get(y_t).get(z_t).dot(x_t);
}
//4: update W(++t) by (11)
final double eta = 1.0/(lambda*time++);
//computing i_t and j_t from equation (13)
int i_t = (y_t > 0 ? 0 : 1);//j_t may be implicit, but i_t needs to belong to someone in the event of a tie. So just give it to the first class that isn't y_t
double i_t_val = 0.0;
int j_t = -1;
for(int k = 0; k < weightMatrix.size(); k++)
{
if(k == y_t)
continue;
Map<Integer, Vec> w_k = weightMatrix.get(k);
for(Map.Entry<Integer, Vec> entry_kj : w_k.entrySet())
{
Vec w_kj = entry_kj.getValue();
double tmp = x_t.dot(w_kj);
if(tmp > i_t_val)
{
i_t = k;
j_t = entry_kj.getKey();
i_t_val = tmp;
}
}
}
//We need to check if the loss was greater than 0
boolean nonZeroLoss = 0 < 1+i_t_val-z_t_val;
//Now shrink all weights
for(int i = 0; i < weightMatrix.size(); i++)
{
Map<Integer, Vec> w_i = weightMatrix.get(i);
for(Map.Entry<Integer, Vec> w_entry_ij : w_i.entrySet())
{
int j = w_entry_ij.getKey();
Vec w_ij = w_entry_ij.getValue();
w_ij.mutableMultiply(-(eta*lambda-1));
if(i == i_t && j == j_t && nonZeroLoss)
w_ij.mutableSubtract(eta, x_t);
else if(i == y_t && j == z_t && nonZeroLoss)
w_ij.mutableAdd(eta, x_t);
}
//Also must check for implicit weight vectors needing an update (making them non-implicit)
if (i == i_t && j_t == -1 && nonZeroLoss && w_i.size() < classBudget)
{
double norm = x_t.pNorm(2);
Vec v = new DenseVector(x_t);
v = new VecWithNorm(v, norm);
v = new ScaledVector(v);
v.mutableMultiply(-eta);
w_i.put(nextID[i]++, v);
}
else if (i == y_t && z_t == -1 && nonZeroLoss && w_i.size() < classBudget)
{
double norm = x_t.pNorm(2);
Vec v = new DenseVector(x_t);
v = new VecWithNorm(v, norm);
v = new ScaledVector(v);
v.mutableMultiply(eta);
w_i.put(nextID[i]++, v);
//update z_t to point to the added value so we can return it correctly
z_t = w_i.size()-1;
}
}
if(time % k == 0)//Pruning time!
{
double threshold = c/((time-1)*lambda);
IntList classOwner = new IntList(weightMatrix.size());
IntList vecID = new IntList(weightMatrix.size());
DoubleList normVal = new DoubleList(weightMatrix.size());
for(int i = 0; i < weightMatrix.size(); i++)
{
for(Map.Entry<Integer, Vec> entry : weightMatrix.get(i).entrySet())
{
Vec v = entry.getValue();
classOwner.add(i);
vecID.add(entry.getKey());
normVal.add(v.dot(v));
}
}
IndexTable it = new IndexTable(normVal);
for(int orderIndx = 0; orderIndx < normVal.size(); orderIndx++)
{
int i = it.index(orderIndx);
double norm = normVal.get(i);
if(norm >= threshold)
break;
threshold -= norm;
int classOf = classOwner.getI(i);
weightMatrix.get(classOf).remove(vecID.getI(i));
}
}
return z_t;
}
@Override
public CategoricalResults classify(DataPoint data)
{
Vec x = data.getNumericalValues();
int k_indx = 0;
double maxVal = Double.NEGATIVE_INFINITY;
for(int k = 0; k < weightMatrix.size(); k++)
{
for(Vec w_kj : weightMatrix.get(k).values())
{
double tmp = x.dot(w_kj);
if(tmp > maxVal)
{
k_indx = k;
maxVal = tmp;
}
}
}
CategoricalResults cr = new CategoricalResults(weightMatrix.size());
cr.setProb(k_indx, 1.0);
return cr;
}
@Override
public boolean supportsWeightedData()
{
return false;
}
/**
* Guess the distribution to use for the regularization term
* {@link #setLambda(double) λ } in AMM.
*
* @param d the data set to get the guess for
* @return the guess for the λ parameter
*/
public static Distribution guessLambda(DataSet d)
{
return new LogUniform(1e-7, 1e-2);
}
}
| 16,943 | 35.127932 | 166 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/DecisionStump.java |
package jsat.classifiers.trees;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.classifiers.*;
import jsat.classifiers.trees.ImpurityScore.ImpurityMeasure;
import jsat.exceptions.FailedToFitException;
import jsat.linear.Vec;
import jsat.math.OnLineStatistics;
import jsat.parameters.Parameterized;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.utils.*;
import jsat.utils.concurrent.AtomicDouble;
import jsat.utils.concurrent.ParallelUtils;
/**
* This class is a 1-rule. It creates one rule that is used to classify all inputs,
* making it a decision tree with only one node. It can be used as a weak learner
* for ensemble learners, or as the nodes in a true decision tree.
* <br><br>
* Categorical values are handled similarly under all circumstances. <br>
* During classification, numeric attributes are separated based on most
* likely probability into their classes. <br>
* During regression, numeric attributes are done with only binary splits,
* finding the split that minimizes the total squared error sum. <br>
* <br>
* The Decision Stump supports missing values in training and prediction.
*
* @author Edward Raff
*/
public class DecisionStump implements Classifier, Regressor, Parameterized
{
private static final long serialVersionUID = -2849268862089019514L;
/**
* Indicates which attribute to split on
*/
private int splittingAttribute;
/**
* Used only when trained for classification. Contains information about the class being predicted
*/
private CategoricalData predicting;
/**
* Contains the information about the attributes in the data set
*/
private CategoricalData[] catAttributes;
/**
* The number of numeric features in the dataset that this Stump was trained from
*/
private int numNumericFeatures;
/**
* Used only in classification. Contains the numeric boundaries to split on
*/
private List<Double> boundries;
/**
* Used only in classification. Contains the most likely class corresponding to each boundary split
*/
private List<Integer> owners;
/**
* Used only in classification. Contains the results for each of the split options
*/
private CategoricalResults[] results;
/**
* How much of the data went to each path
*/
protected double[] pathRatio;
/**
* Only used during regression. Contains the averages for each branch in
* the first and 2nd index. 3rd index contains the split value.
* If no split could be done, the length is zero and it contains only the
* return value
*/
private double[] regressionResults;
private ImpurityMeasure gainMethod;
private boolean removeContinuousAttributes;
/**
* The minimum number of points that must be inside the split result for a
* split to occur.
*/
private int minResultSplitSize = 10;
/**
* Creates a new decision stump
*/
public DecisionStump()
{
gainMethod = ImpurityMeasure.INFORMATION_GAIN_RATIO;
removeContinuousAttributes = false;
}
/**
* Unlike categorical values, when a continuous attribute is selected to split on, not
* all values of the attribute become the same. It can be useful to split on the same
* attribute multiple times. If set true, continuous attributes will be removed from
* the options list. Else, they will be left in the options list.
*
* @param removeContinuousAttributes whether or not to remove continuous attributes on a call to {@link #trainC(java.util.List, java.util.Set) }
*/
public void setRemoveContinuousAttributes(boolean removeContinuousAttributes)
{
this.removeContinuousAttributes = removeContinuousAttributes;
}
public void setGainMethod(ImpurityMeasure gainMethod)
{
this.gainMethod = gainMethod;
}
public ImpurityMeasure getGainMethod()
{
return gainMethod;
}
/**
*
* @return The number of numeric features in the dataset that this Stump was
* trained from
*/
protected int numNumeric()
{
return numNumericFeatures;
}
/**
*
* @return the number of categorical features in the dataset that this Stump
* was trained from.
*/
protected int numCategorical()
{
return catAttributes.length;
}
/**
* When a split is made, it may be that outliers cause the split to
* segregate a minority of points from the majority. The min result split
* size parameter specifies the minimum allowable number of points to end up
* in one of the splits for it to be admisible for consideration.
*
* @param minResultSplitSize the minimum result split size to use
*/
public void setMinResultSplitSize(int minResultSplitSize)
{
if(minResultSplitSize <= 1)
throw new ArithmeticException("Min split size must be a positive value ");
this.minResultSplitSize = minResultSplitSize;
}
/**
* Returns the minimum result split size that may be considered for use as
* the attribute to split on.
*
* @return the minimum result split size in use
*/
public int getMinResultSplitSize()
{
return minResultSplitSize;
}
/**
* Returns the attribute that this stump has decided to use to compute
* results. Numeric features start from 0, and categorical features start
* from the number of numeric features.
*
* @return the attribute that this stump has decided to use to compute results.
*/
public int getSplittingAttribute()
{
//TODO refactor the splittingAttribute to just be in this order already
if(splittingAttribute < catAttributes.length)//categorical feature
return numNumericFeatures+splittingAttribute;
//else, is Numerical attribute
int numerAttribute = splittingAttribute - catAttributes.length;
return numerAttribute;
}
/**
* Sets the DecisionStump's predicting information. This will be set automatically
* by calling {@link #train(jsat.classifiers.ClassificationDataSet) } or
* {@link #train(jsat.classifiers.ClassificationDataSet, java.util.concurrent.ExecutorService) },
* but it must be called before using {@link #trainC(java.util.List, java.util.Set) }.
*
* @param predicting the information about the attribute that will be predicted by this classifier
*/
public void setPredicting(CategoricalData predicting)
{
this.predicting = predicting;
}
@Override
public double regress(DataPoint data)
{
if(regressionResults == null)
throw new RuntimeException("Decusion stump has not been trained for regression");
int path = whichPath(data);
if(path >= 0)
return regressionResults[path];
//else, was missing, average
double avg = 0;
for(int i = 0; i < pathRatio.length; i++)
avg += pathRatio[i]*regressionResults[i];
return avg;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
Set<Integer> options = new IntSet(dataSet.getNumFeatures());
for(int i = 0; i < dataSet.getNumFeatures(); i++)
options.add(i);
List<RegressionDataSet> split = trainR(dataSet, options, parallel);
if(split == null)
throw new FailedToFitException("Tree could not be fit, make sure your data is good. Potentially file a bug");
}
/**
* From the score for the original set that is being split, this computes
* the gain as the improvement in classification from the original split.
* @param origScore the score of the unsplit set
* @param source
* @param aSplit the splitting of the data points
* @return the gain score for this split
*/
protected double getGain(ImpurityScore origScore, ClassificationDataSet source, List<IntList> aSplit)
{
ImpurityScore[] scores = getSplitScores(source, aSplit);
return ImpurityScore.gain(origScore, scores);
}
private ImpurityScore[] getSplitScores(ClassificationDataSet source, List<IntList> aSplit)
{
ImpurityScore[] scores = new ImpurityScore[aSplit.size()];
for(int i = 0; i < aSplit.size(); i++)
scores[i] = getClassGainScore(source, aSplit.get(i));
return scores;
}
/**
* A value that is just above zero
*/
private static final double almost0 = 1e-6;
/**
* A value that is just below one
*/
private static final double almost1 = 1.0-almost0;
/**
* Determines which split path this data point would follow from this decision stump.
* Works for both classification and regression.
*
* @param data the data point in question
* @return the integer indicating which path to take. -1 returned if stump is not trained
*/
public int whichPath(DataPoint data)
{
int paths = getNumberOfPaths();
if(paths < 0)
return paths;//Not trained
else if(paths == 1)//ONLY one option, entropy was zero
return 0;
else if(splittingAttribute < catAttributes.length)//Same for classification and regression
return data.getCategoricalValue(splittingAttribute);
//else, is Numerical attribute - but regression or classification?
int numerAttribute = splittingAttribute - catAttributes.length;
double val = data.getNumericalValues().get(numerAttribute);
if(Double.isNaN(val))
return -1;//missing
if (results != null)//Categorical!
{
int pos = Collections.binarySearch(boundries, val);
pos = pos < 0 ? -pos-1 : pos;
return owners.get(pos);
}
else//Regression! It is trained, it would have been grabed at the top if not
{
if(regressionResults.length == 1)
return 0;
else if(val <= regressionResults[2])
return 0;
else
return 1;
}
}
/**
* Returns the number of paths that this decision stump leads to. The stump may not ever
* direct a data point on some of the paths. A result of 1 path means that all data points
* will be given the same decision, and is generated when the entropy of a set is 0.0.
* <br><br>
* -1 is returned for an untrained stump
*
* @return the number of paths this decision stump has stored
*/
public int getNumberOfPaths()
{
if(results != null)//Categorical!
return results.length;
else if(catAttributes != null)//Regression!
if(regressionResults.length == 1)
return 1;
else if(splittingAttribute < catAttributes.length)//Categorical
return catAttributes[splittingAttribute].getNumOfCategories();
else//Numerical is always binary
return 2;
return Integer.MIN_VALUE;//Not trained!
}
@Override
public CategoricalResults classify(DataPoint data)
{
if(results == null)
throw new RuntimeException("DecisionStump has not been trained for classification");
int path = whichPath(data);
if(path >= 0)
return results[path];
else//missing value case, so average
{
Vec tmp = results[0].getVecView().clone();
tmp.mutableMultiply(pathRatio[0]);
for(int i = 1; i < results.length; i++)
tmp.mutableAdd(pathRatio[i], results[i].getVecView());
return new CategoricalResults(tmp.arrayCopy());
}
}
/**
* Returns the categorical result of the i'th path.
* @param i the path to get the result for
* @return the result that would be returned if a data point went down the given path
* @throws IndexOutOfBoundsException if an invalid path is given
* @throws NullPointerException if the stump has not been trained for classification
*/
public CategoricalResults result(int i)
{
if(i < 0 || i >= getNumberOfPaths())
throw new IndexOutOfBoundsException("Invalid path, can to return a result for path " + i);
return results[i];
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
Set<Integer> splitOptions = new IntSet(dataSet.getNumFeatures());
for(int i = 0; i < dataSet.getNumFeatures(); i++)
splitOptions.add(i);
this.predicting = dataSet.getPredicting();
trainC(dataSet, splitOptions, parallel);
}
/**
* This is a helper function that does the work of training this stump. It may be
* called directly by other classes that are creating decision trees to avoid
* redundant repackaging of lists.
*
* @param dataPoints the lists of datapoint to train on, paired with the true category of each training point
* @param options the set of attributes that this classifier may choose from. The attribute it does choose will be removed from the set.
* @return the a list of lists, containing all the datapoints that would have followed each path. Useful for training a decision tree
*/
public List<ClassificationDataSet> trainC(ClassificationDataSet dataPoints, Set<Integer> options)
{
return trainC(dataPoints, options, false);
}
public List<ClassificationDataSet> trainC(final ClassificationDataSet data, Set<Integer> options, boolean parallel)
{
//TODO remove paths that have zero probability of occuring, so that stumps do not have an inflated branch value
if(predicting == null)
throw new RuntimeException("Predicting value has not been set");
catAttributes = data.getCategories();
numNumericFeatures = data.getNumNumericalVars();
final ImpurityScore origScoreObj = getClassGainScore(data);
double origScore = origScoreObj.getScore();
if(origScore == 0.0 || data.size() < minResultSplitSize*2)//Then all data points belond to the same category!
{
results = new CategoricalResults[1];//Only one path!
results[0] = new CategoricalResults(predicting.getNumOfCategories());
results[0].setProb(data.getDataPointCategory(0), 1.0);
pathRatio = new double[]{0};
List<ClassificationDataSet> toReturn = new ArrayList<>();
toReturn.add(data);
return toReturn;
}
/**
* The splitting for the split on the attribute with the best gain
*/
final List<ClassificationDataSet> bestSplit = Collections.synchronizedList(new ArrayList<>());
/**
* best gain in information we have seen so far
*/
final AtomicDouble bestGain = new AtomicDouble(-1);
final DoubleList bestRatio = new DoubleList();
/**
* The best attribute to split on
*/
splittingAttribute = -1;
final CountDownLatch latch = new CountDownLatch(options.size());
final ThreadLocal<ClassificationDataSet> localList = ThreadLocal.withInitial(()->data.shallowClone());
ExecutorService ex = parallel ? ParallelUtils.CACHED_THREAD_POOL : new FakeExecutor();
for(final int attribute_to_consider : options)
{
ex.submit(() ->
{
try{
ClassificationDataSet DPs = localList.get();
int attribute = attribute_to_consider;
final double[] gainRet = new double[]{Double.NaN};
gainRet[0] = Double.NaN;
List<ClassificationDataSet> aSplit;
PairedReturn<List<Double>, List<Integer>> tmp = null;//Used on numerical attributes
ImpurityScore[] split_scores = null;//used for cat
double weightScale = 1.0;
if(attribute < catAttributes.length)//Then we are doing a categorical split
{
//Create a list of lists to hold the split variables
aSplit = listOfLists(data, catAttributes[attribute].getNumOfCategories());
split_scores = new ImpurityScore[aSplit.size()];
for(int i=0; i < split_scores.length; i++)
split_scores[i] = new ImpurityScore(predicting.getNumOfCategories(), gainMethod);
IntList wasMissing = new IntList();
double missingSum = 0.0;
//Now seperate the values in our current list into their proper split bins
for(int i = 0; i < data.size(); i++)
{
int val = data.getDataPoint(i).getCategoricalValue(attribute);
double weight = data.getWeight(i);
if (val >= 0)
{
aSplit.get(val).addDataPoint(data.getDataPoint(i), data.getDataPointCategory(i), weight);
split_scores[val].addPoint(weight, data.getDataPointCategory(i));
}
else
{
wasMissing.add(i);
missingSum += weight;
}
}
int pathsTaken = 0;
for(ClassificationDataSet split : aSplit)
if(split.size() > 0)
pathsTaken++;
if(pathsTaken <= 1)//not a good path, avoid looping on this junk. Can be caused by missing data
{
latch.countDown();
return;
}
if(missingSum > 0)//move missing values into others
{
double newSum = (origScoreObj.getSumOfWeights()-missingSum);
weightScale = newSum/origScoreObj.getSumOfWeights();
double[] fracs = new double[split_scores.length];
for(int i = 0; i < fracs.length; i++)
fracs[i] = split_scores[i].getSumOfWeights()/newSum;
distributMissing(aSplit, fracs, data, wasMissing);
}
}
else//Spliting on a numerical value
{
attribute -= catAttributes.length;
int N = predicting.getNumOfCategories();
//Create a list of lists to hold the split variables
aSplit = listOfLists(data, 2);//Size at least 2
split_scores = new ImpurityScore[2];
tmp = createNumericCSplit(DPs, N, attribute, aSplit,
origScoreObj, gainRet, split_scores);
if(tmp == null)
{
latch.countDown();
return;
}
//Fix it back so it can be used below
attribute+= catAttributes.length;
}
//Now everything is seperated!
double gain;//= Double.isNaN(gainRet[0]) ? : gainRet[0];
if(!Double.isNaN(gainRet[0]))
gain = gainRet[0];
else
{
if(split_scores == null)
split_scores = getClassGainScore(aSplit);
gain = ImpurityScore.gain(origScoreObj, weightScale, split_scores);
}
if(gain > bestGain.get())
{
synchronized(bestRatio)
{
if(gain > bestGain.get())//double check incase changed
{
bestGain.set(gain);
splittingAttribute = attribute;
bestSplit.clear();
bestSplit.addAll(aSplit);
bestRatio.clear();
double sum = 1e-8;
for(int i = 0; i < split_scores.length; i++)
{
sum += split_scores[i].getSumOfWeights();
bestRatio.add(split_scores[i].getSumOfWeights());
}
for(int i = 0; i < split_scores.length; i++)
bestRatio.set(i, bestRatio.getD(i)/sum);
if(attribute >= catAttributes.length)
{
boundries = tmp.getFirstItem();
owners = tmp.getSecondItem();
}
}
}
}
latch.countDown();
}catch(Exception easx)
{
easx.printStackTrace();
System.out.println();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex1)
{
Logger.getLogger(DecisionStump.class.getName()).log(Level.SEVERE, null, ex1);
throw new FailedToFitException(ex1);
}
if(splittingAttribute == -1)//We could not find a good split at all
{
bestSplit.clear();
bestSplit.add(data);
CategoricalResults badResult = new CategoricalResults(data.getPriors());
results = new CategoricalResults[] {badResult};
pathRatio = new double[]{1};
return bestSplit;
}
if(splittingAttribute < catAttributes.length || removeContinuousAttributes)
options.remove(splittingAttribute);
results = new CategoricalResults[bestSplit.size()];
pathRatio = bestRatio.getVecView().arrayCopy();
for(int i = 0; i < bestSplit.size(); i++)
results[i] = new CategoricalResults(bestSplit.get(i).getPriors());
return bestSplit;
}
/**
*
* @param dataPoints the original list of data points
* @param N number of predicting target options
* @param attribute the numeric attribute to try and find a split on
* @param aSplit the list of lists to place the results of splitting in
* @param origScore the score value for the data set we are splitting
* @param finalGain array used to reference a double that can be returned.
* If this method determined the gain in order to find the split, it sets
* the value at index zero to the gain it computed. May be null, in which
* case it is ignored.
* @return A pair of lists of the same size. The list of doubles containing
* the split boundaries, and the integers containing the path number.
* Multiple splits could go down the same path.
*/
private PairedReturn<List<Double>, List<Integer>> createNumericCSplit(
ClassificationDataSet dataPoints, int N, final int attribute,
List<ClassificationDataSet> aSplit, ImpurityScore origScore, double[] finalGain, ImpurityScore[] subScores)
{
//cache misses are killing us, move data into a double[] to get more juice!
double[] vals = new double[dataPoints.size()];//TODO put this in a thread local somewhere and re-use
IntList workSet = new IntList(dataPoints.size());
IntList wasNaN = new IntList();
for(int i = 0; i < dataPoints.size(); i++)
{
double val = dataPoints.getDataPoint(i).getNumericalValues().get(attribute);
if(!Double.isNaN(val))
{
vals[i-wasNaN.size()] = val;
workSet.add(i);
}
else
{
wasNaN.add(i);
}
}
if(workSet.size() < minResultSplitSize*2)//Too many values were NaN for us to do any more splitting
return null;
//do what i want! Sort workSet based on "vals" array
Collection<List<?>> paired = (Collection<List<?>> )(Collection<?> )Arrays.asList(workSet);
QuickSort.sort(vals, 0, vals.length-wasNaN.size(), paired );//sort the numeric values and put our original list of data points in the correct order at the same time
double bestGain = Double.NEGATIVE_INFINITY;
double bestSplit = Double.NEGATIVE_INFINITY;
int splitIndex = -1;
ImpurityScore rightSide = origScore.clone();
ImpurityScore leftSide = new ImpurityScore(N, gainMethod);
//remove any Missing Value nodes from considering from the start
double nanWeightRemoved = 0;
for(int i : wasNaN)
{
double weight = dataPoints.getWeight(i);
int truth = dataPoints.getDataPointCategory(i);
nanWeightRemoved += weight;
rightSide.removePoint(weight, truth);
}
double wholeRescale = rightSide.getSumOfWeights()/(rightSide.getSumOfWeights()+nanWeightRemoved);
for(int i = 0; i < minResultSplitSize; i++)
{
if(i >= dataPoints.size())
System.out.println("WHAT?");
int indx = workSet.getI(i);
double weight = dataPoints.getWeight(indx);
int truth = dataPoints.getDataPointCategory(indx);
leftSide.addPoint(weight, truth);
rightSide.removePoint(weight, truth);
}
for(int i = minResultSplitSize; i < dataPoints.size()-minResultSplitSize-1-wasNaN.size(); i++)
{
int indx = workSet.getI(i);
double w = dataPoints.getWeight(indx);
int y = dataPoints.getDataPointCategory(indx);
rightSide.removePoint(w, y);
leftSide.addPoint(w, y);
double leftVal = vals[i];
double rightVal = vals[i+1];
if( (rightVal-leftVal) < 1e-14 )//Values are too close!
continue;
double curGain = ImpurityScore.gain(origScore, wholeRescale, leftSide, rightSide);
if(curGain >= bestGain)
{
double curSplit = (leftVal + rightVal) / 2;
bestGain = curGain;
bestSplit = curSplit;
splitIndex = i+1;
subScores[0] = leftSide.clone();
subScores[1] = rightSide.clone();
}
}
if(splitIndex == -1)
return null;
if(finalGain != null)
finalGain[0] = bestGain;
ClassificationDataSet cds_left = dataPoints.emptyClone();
ClassificationDataSet cds_right = dataPoints.emptyClone();
for(int i : workSet.subList(0, splitIndex))
{
cds_left.addDataPoint(dataPoints.getDataPoint(i), dataPoints.getDataPointCategory(i), dataPoints.getWeight(i));
}
for(int i : workSet.subList(splitIndex, workSet.size()))
{
cds_right.addDataPoint(dataPoints.getDataPoint(i), dataPoints.getDataPointCategory(i), dataPoints.getWeight(i));
}
aSplit.set(0, cds_left);
aSplit.set(1, cds_right);
if(wasNaN.size() > 0)
{
double weightScale = leftSide.getSumOfWeights()/(leftSide.getSumOfWeights() + rightSide.getSumOfWeights()+0.0);
distributMissing(aSplit, new double[]{weightScale, 1-weightScale}, dataPoints, wasNaN);
}
PairedReturn<List<Double>, List<Integer>> tmp =
new PairedReturn<>(
Arrays.asList(bestSplit, Double.POSITIVE_INFINITY),
Arrays.asList(0, 1));
return tmp;
}
/**
* Distributes a list of datapoints that had missing values to each split, re-weighted by the indicated fractions
* @param <T>
* @param splits a list of lists, where each inner list is a split
* @param fracs the fraction of weight to each split, should sum to one
* @param source
* @param hadMissing the list of datapoints that had missing values
*/
static protected <T> void distributMissing(List<ClassificationDataSet> splits, double[] fracs, ClassificationDataSet source, IntList hadMissing)
{
for (int i : hadMissing)
{
DataPoint dp = source.getDataPoint(i);
for (int j = 0; j < fracs.length; j++)
{
double nw = fracs[j] * source.getWeight(i);
if (Double.isNaN(nw))//happens when no weight is available
continue;
if (nw <= 1e-13)
continue;
splits.get(j).addDataPoint(dp, source.getDataPointCategory(i), nw);
}
}
}
static protected <T> void distributMissing(List<RegressionDataSet> splits, double[] fracs, RegressionDataSet source, IntList hadMissing)
{
for (int i : hadMissing)
{
DataPoint dp = source.getDataPoint(i);
for (int j = 0; j < fracs.length; j++)
{
double nw = fracs[j] * source.getWeight(i);
if (Double.isNaN(nw))//happens when no weight is available
continue;
if (nw <= 1e-13)
continue;
splits.get(j).addDataPoint(dp, source.getTargetValue(i), nw);
}
}
}
public List<RegressionDataSet> trainR(final RegressionDataSet dataPoints, Set<Integer> options)
{
return trainR(dataPoints, options, false);
}
public List<RegressionDataSet> trainR(final RegressionDataSet data, Set<Integer> options, boolean parallel)
{
catAttributes = data.getCategories();
numNumericFeatures = data.getNumNumericalVars();
//Not enough points for a split to occur
if(data.size() <= minResultSplitSize*2)
{
splittingAttribute = catAttributes.length;
regressionResults = new double[1];
double avg = 0.0;
double sum = 0.0;
for(int i = 0; i < data.size(); i++)
{
double weight = data.getWeight(i);
avg += data.getTargetValue(i)*weight;
sum += weight;
}
regressionResults[0] = avg/sum;
List<RegressionDataSet> toRet = new ArrayList<>(1);
toRet.add(data);
return toRet;
}
final List<RegressionDataSet> bestSplit = new ArrayList<>();
final AtomicDouble lowestSplitSqrdError = new AtomicDouble(Double.MAX_VALUE);
final ThreadLocal<RegressionDataSet> localList = ThreadLocal.withInitial(()->data.shallowClone());
ExecutorService ex = parallel ? ParallelUtils.CACHED_THREAD_POOL : new FakeExecutor();
final CountDownLatch latch = new CountDownLatch(options.size());
for(int attribute_to_consider : options)
{
final int attribute = attribute_to_consider;
ex.submit(() ->
{
final RegressionDataSet DPs = localList.get();
List<RegressionDataSet> thisSplit = null;
//The squared error for this split
double thisSplitSqrdErr = Double.MAX_VALUE;
//Contains the means of each split
double[] thisMeans = null;
double[] thisRatio;
if(attribute < catAttributes.length)
{
thisSplit = listOfLists(DPs, catAttributes[attribute].getNumOfCategories());
OnLineStatistics[] stats = new OnLineStatistics[thisSplit.size()];
thisRatio = new double[thisSplit.size()];
for(int i = 0; i < thisSplit.size(); i++)
stats[i] = new OnLineStatistics();
//Now seperate the values in our current list into their proper split bins
IntList wasMissing = new IntList();
for(int i = 0; i < DPs.size(); i++)
{
int category = DPs.getDataPoint(i).getCategoricalValue(attribute);
if(category >= 0)
{
thisSplit.get(category).addDataPoint(DPs.getDataPoint(i), DPs.getTargetValue(i), DPs.getWeight(i));
stats[category].add(DPs.getTargetValue(i), DPs.getWeight(i));
}
else//was negative, missing value
{
wasMissing.add(i);
}
}
thisMeans = new double[stats.length];
thisSplitSqrdErr = 0.0;
double sum = 0;
for(int i = 0; i < stats.length; i++)
{
sum += (thisRatio[i] = stats[i].getSumOfWeights());
thisSplitSqrdErr += stats[i].getVarance()*stats[i].getSumOfWeights();
thisMeans[i] = stats[i].getMean();
}
for(int i = 0; i < stats.length; i++)
thisRatio[i] /= sum;
if(!wasMissing.isEmpty())
distributMissing(thisSplit, thisRatio, DPs, wasMissing);
}
else//Findy a binary split that reduces the variance!
{
final int numAttri = attribute - catAttributes.length;
//2 passes, first to sum up the right side, 2nd to move down the grow the left side
OnLineStatistics rightSide = new OnLineStatistics();
OnLineStatistics leftSide = new OnLineStatistics();
//We need our list in sorted order by attribute!
DoubleList att_vals = new DoubleList(DPs.size());
IntList order = new IntList(DPs.size());
DoubleList weights = new DoubleList(DPs.size());
DoubleList targets = new DoubleList(DPs.size());
IntList wasNaN = new IntList();
for(int i = 0; i < DPs.size(); i++)
{
double v = DPs.getDataPoint(i).getNumericalValues().get(numAttri);
if(Double.isNaN(v))
wasNaN.add(i);
else
{
rightSide.add(DPs.getTargetValue(i), DPs.getWeight(i));
att_vals.add(v);
order.add(i);
weights.add(DPs.getWeight(i));
targets.add(DPs.getTargetValue(i));
}
}
QuickSort.sort(att_vals.getBackingArray(), 0, att_vals.size(), Arrays.asList(order, weights, targets));
int bestS = 0;
thisSplitSqrdErr = Double.POSITIVE_INFINITY;
final double allWeight = rightSide.getSumOfWeights();
thisMeans = new double[3];
thisRatio = new double[2];
for(int i = 0; i < att_vals.size(); i++)
{
double weight = weights.getD(i);
double val = targets.getD(i);
rightSide.remove(val, weight);
leftSide.add(val, weight);
if(i < minResultSplitSize)
continue;
else if(i > att_vals.size()-minResultSplitSize)
break;
double tmpSVariance = rightSide.getVarance()*rightSide.getSumOfWeights()
+ leftSide.getVarance()*leftSide.getSumOfWeights();
if(tmpSVariance < thisSplitSqrdErr && !Double.isInfinite(tmpSVariance))//Infinity can occur once the weights get REALY small
{
thisSplitSqrdErr = tmpSVariance;
bestS = i;
thisMeans[0] = leftSide.getMean();
thisMeans[1] = rightSide.getMean();
//Third spot contains the split value!
thisMeans[2] = (att_vals.get(bestS) + att_vals.get(bestS+1))/2.0;
thisRatio[0] = leftSide.getSumOfWeights()/allWeight;
thisRatio[1] = rightSide.getSumOfWeights()/allWeight;
}
}
if(att_vals.size() >= minResultSplitSize)
{
//Now we have the binary split that minimizes the variances of the 2 sets,
thisSplit = listOfLists(DPs, 2);
for(int i : order.subList(0, bestS))
{
thisSplit.get(0).addDataPoint(DPs.getDataPoint(i), DPs.getTargetValue(i), DPs.getWeight(i));
}
for(int i : order.subList(bestS+1, order.size()))
{
thisSplit.get(1).addDataPoint(DPs.getDataPoint(i), DPs.getTargetValue(i), DPs.getWeight(i));
}
if(wasNaN.size() > 0)
distributMissing(thisSplit, thisRatio, DPs, wasNaN);
}
else//not a good split, we can't trust it
thisSplitSqrdErr = Double.NEGATIVE_INFINITY;
}
//numerical issue check. When we get a REALLy good split, error can be a tiny negative value due to numerical instability. Check and swap sign if small
if(Math.abs(thisSplitSqrdErr) < 1e-13)//no need to check sign, make simpler
thisSplitSqrdErr = Math.abs(thisSplitSqrdErr);
//Now compare what weve done
if(thisSplitSqrdErr >= 0 && thisSplitSqrdErr < lowestSplitSqrdError.get())//how did we get -Inf?
{
synchronized(bestSplit)
{
if(thisSplitSqrdErr < lowestSplitSqrdError.get())
{
lowestSplitSqrdError.set(thisSplitSqrdErr);
bestSplit.clear();
bestSplit.addAll(thisSplit);
splittingAttribute = attribute;
regressionResults = thisMeans;
pathRatio = thisRatio;
}
}
}
latch.countDown();
});
}
try
{
latch.await();
}
catch (InterruptedException ex1)
{
Logger.getLogger(DecisionStump.class.getName()).log(Level.SEVERE, null, ex1);
throw new FailedToFitException(ex1);
}
//Removal of attribute from list if needed
if(splittingAttribute < catAttributes.length || removeContinuousAttributes)
options.remove(splittingAttribute);
if(bestSplit.size() == 0)//no good option selected. Keep old behavior, return null in that case
return null;
return bestSplit;
}
private static <T extends DataSet<T>> List<T> listOfLists(T type, int n )
{
List<T> aSplit = new ArrayList<>(n);
for (int i = 0; i < n; i++)
aSplit.add((T)type.emptyClone());
return aSplit;
}
@Override
public boolean supportsWeightedData()
{
return true;
}
private ImpurityScore getClassGainScore(ClassificationDataSet dataPoints, IntList subset)
{
ImpurityScore cgs = new ImpurityScore(predicting.getNumOfCategories(), gainMethod);
for(int i : subset)
cgs.addPoint(dataPoints.getWeight(i), dataPoints.getDataPointCategory(i));
return cgs;
}
private ImpurityScore[] getClassGainScore(List<ClassificationDataSet> splits)
{
ImpurityScore[] toRet = new ImpurityScore[splits.size()];
for(int i = 0; i < toRet.length; i++)
toRet[i] = getClassGainScore(splits.get(i));
return toRet;
}
private ImpurityScore getClassGainScore(ClassificationDataSet dataPoints)
{
ImpurityScore cgs = new ImpurityScore(predicting.getNumOfCategories(), gainMethod);
for(int i = 0; i < dataPoints.size(); i++)
cgs.addPoint(dataPoints.getWeight(i), dataPoints.getDataPointCategory(i));
return cgs;
}
@Override
public DecisionStump clone()
{
DecisionStump copy = new DecisionStump();
if(this.catAttributes != null)
copy.catAttributes = CategoricalData.copyOf(catAttributes);
if(this.results != null)
{
copy.results = new CategoricalResults[this.results.length];
for(int i = 0; i < this.results.length; i++ )
copy.results[i] = this.results[i].clone();
}
copy.removeContinuousAttributes = this.removeContinuousAttributes;
copy.splittingAttribute = this.splittingAttribute;
if(this.boundries != null)
copy.boundries = new DoubleList(this.boundries);
if(this.owners != null)
copy.owners = new IntList(this.owners);
if(this.predicting != null)
copy.predicting = this.predicting.clone();
if(regressionResults != null)
copy.regressionResults = Arrays.copyOf(this.regressionResults, this.regressionResults.length);
if(pathRatio != null)
copy.pathRatio = Arrays.copyOf(this.pathRatio, this.pathRatio.length);
copy.minResultSplitSize = this.minResultSplitSize;
copy.gainMethod = this.gainMethod;
copy.numNumericFeatures = this.numNumericFeatures;
return copy;
}
}
| 41,822 | 38.983748 | 172 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/DecisionTree.java |
package jsat.classifiers.trees;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.classifiers.DataPointPair;
import jsat.classifiers.trees.ImpurityScore.ImpurityMeasure;
import static jsat.classifiers.trees.TreePruner.*;
import jsat.classifiers.trees.TreePruner.PruningMethod;
import jsat.exceptions.FailedToFitException;
import jsat.exceptions.ModelMismatchException;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.utils.*;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* Creates a decision tree from {@link DecisionStump DecisionStumps}. How this
* tree performs is controlled by pruning method selected, and the methods used
* in the stump.<br>
* A Decision Tree supports missing values in training and prediction.
*
* @author Edward Raff
*/
public class DecisionTree implements Classifier, Regressor, Parameterized, TreeLearner
{
private static final long serialVersionUID = 9220980056440500214L;
private int maxDepth;
private int minSamples;
private Node root;
private CategoricalData predicting;
private PruningMethod pruningMethod;
/**
* What portion of the training data will be set aside for pruning.
*/
private double testProportion;
/**
* Base decision stump used to clone so that we can keep certain features
* inside the stump instead of duplicating them here.
*/
private DecisionStump baseStump = new DecisionStump();
@Override
public double regress(DataPoint data)
{
if(data.numNumericalValues() != root.stump.numNumeric() || data.numCategoricalValues() != root.stump.numCategorical())
throw new ModelMismatchException("Tree expected " + root.stump.numNumeric() + " numeric and " +
root.stump.numCategorical() + " categorical features, instead received data with " +
data.numNumericalValues() + " and " + data.numCategoricalValues() + " features respectively");
return root.regress(data);
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
Set<Integer> options = new IntSet(dataSet.getNumFeatures());
for(int i = 0; i < dataSet.getNumFeatures(); i++)
options.add(i);
train(dataSet, options, parallel);
}
public void train(RegressionDataSet dataSet, Set<Integer> options)
{
train(dataSet, options, false);
}
public void train(RegressionDataSet dataSet, Set<Integer> options, boolean parallel)
{
ModifiableCountDownLatch mcdl = new ModifiableCountDownLatch(1);
root = makeNodeR(dataSet, options, 0, parallel, mcdl);
try
{
mcdl.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DecisionTree.class.getName()).log(Level.SEVERE, null, ex);
}
if(root == null)//fitting issure, most likely too few datums. try just a stump
{
DecisionStump stump = new DecisionStump();
stump.train(dataSet, parallel);
root = new Node(stump);
}
//TODO add pruning for regression
}
/**
* Creates a Decision Tree that uses {@link PruningMethod#REDUCED_ERROR}
* pruning on a held out 10% of the data.
*/
public DecisionTree()
{
this(Integer.MAX_VALUE, 10, PruningMethod.REDUCED_ERROR, 0.1);
}
/**
* Creates a Decision Tree that does not do any pruning, and is built out only to the specified depth
* @param maxDepth
*/
public DecisionTree(int maxDepth)
{
this(maxDepth, 10, PruningMethod.NONE, 0.00001);
}
/**
* Creates a new decision tree classifier
*
* @param maxDepth the maximum depth of the tree to create
* @param minSamples the minimum number of samples needed to continue branching
* @param pruningMethod the method of pruning to use after construction
* @param testProportion the proportion of the data set to put aside to use for pruning
*/
public DecisionTree(int maxDepth, int minSamples, PruningMethod pruningMethod, double testProportion)
{
setMaxDepth(maxDepth);
setMinSamples(minSamples);
setPruningMethod(pruningMethod);
setTestProportion(testProportion);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected DecisionTree(DecisionTree toCopy)
{
this.maxDepth = toCopy.maxDepth;
this.minSamples = toCopy.minSamples;
if(toCopy.root != null)
this.root = toCopy.root.clone();
if(toCopy.predicting != null)
this.predicting = toCopy.predicting.clone();
this.pruningMethod = toCopy.pruningMethod;
this.testProportion = toCopy.testProportion;
this.baseStump = toCopy.baseStump.clone();
}
/**
* Returns a Decision Tree with settings initialized so that its behavior is
* approximately that of the C4.5 decision tree algorithm when used on
* classification data. The exact behavior not identical, and certain
* base cases may not behave in the exact same manner. However, it uses all
* of the same general algorithms. <br><br>
* The returned tree does not perform or support
* <ul>
* <li>discrete attribute grouping</li>
* <li>windowing</li>
* <li>subsidiary cutpoints (soft boundaries)</li>
* </ul>
*
* @return a decision tree that will behave in a manner similar to C4.5
*/
public static DecisionTree getC45Tree()
{
DecisionTree tree = new DecisionTree();
tree.setMinResultSplitSize(2);
tree.setMinSamples(3);
tree.setMinResultSplitSize(2);
tree.setTestProportion(1.0);
tree.setPruningMethod(PruningMethod.ERROR_BASED);
tree.baseStump.setGainMethod(ImpurityMeasure.INFORMATION_GAIN_RATIO);
return tree;
}
public void setGainMethod(ImpurityMeasure gainMethod)
{
baseStump.setGainMethod(gainMethod);
}
public ImpurityMeasure getGainMethod()
{
return baseStump.getGainMethod();
}
/**
* When a split is made, it may be that outliers cause the split to
* segregate a minority of points from the majority. The min result split
* size parameter specifies the minimum allowable number of points to end up
* in one of the splits for it to be admisible for consideration.
* @param size the minimum result split size to use
*/
public void setMinResultSplitSize(int size)
{
baseStump.setMinResultSplitSize(size);
}
/**
* Returns the minimum result split size that may be considered for use as
* the attribute to split on.
* @return the minimum result split size in use
*/
public int getMinResultSplitSize()
{
return baseStump.getMinResultSplitSize();
}
/**
* Sets the maximum depth that this classifier may build trees to.
* @param maxDepth the maximum depth of the trained tree
*/
public void setMaxDepth(int maxDepth)
{
if(maxDepth < 0)
throw new RuntimeException("The maximum depth must be a positive number");
this.maxDepth = maxDepth;
}
/**
* The maximum depth that this classifier may build trees to.
* @return the maximum depth that this classifier may build trees to.
*/
public int getMaxDepth()
{
return maxDepth;
}
/**
* Sets the minimum number of samples needed at each step in order to continue branching
* @param minSamples the minimum number of samples needed to branch
*/
public void setMinSamples(int minSamples)
{
this.minSamples = minSamples;
}
/**
* The minimum number of samples needed at each step in order to continue branching
* @return the minimum number of samples needed at each step in order to continue branching
*/
public int getMinSamples()
{
return minSamples;
}
/**
* Sets the method of pruning that will be used after tree construction
* @param pruningMethod the method of pruning that will be used after tree construction
* @see PruningMethod
*/
public void setPruningMethod(PruningMethod pruningMethod)
{
this.pruningMethod = pruningMethod;
}
/**
* Returns the method of pruning used after tree construction
* @return the method of pruning used after tree construction
*/
public PruningMethod getPruningMethod()
{
return pruningMethod;
}
/**
* Returns the proportion of the training set that is put aside to perform pruning with
* @return the proportion of the training set that is put aside to perform pruning with
*/
public double getTestProportion()
{
return testProportion;
}
/**
* Sets the proportion of the training set that is put aside to perform pruning with.
* <br> NOTE: The values 0 and 1 are special cases. <br>
* 0 indicates that no pruning will occur regardless of the set pruning method <br>
* 1 indicates that the training set will be used as the testing set. This is
* valid for some pruning methods.
* @param testProportion the proportion, must be in the range [0, 1]
*/
public void setTestProportion(double testProportion)
{
if(testProportion < 0 || testProportion > 1 || Double.isInfinite(testProportion) || Double.isNaN(testProportion))
throw new ArithmeticException("Proportion must be in the range [0, 1], not " + testProportion);
this.testProportion = testProportion;
}
@Override
public CategoricalResults classify(DataPoint data)
{
if(data.numNumericalValues() != root.stump.numNumeric() || data.numCategoricalValues() != root.stump.numCategorical())
throw new ModelMismatchException("Tree expected " + root.stump.numNumeric() + " numeric and " +
root.stump.numCategorical() + " categorical features, instead received data with " +
data.numNumericalValues() + " and " + data.numCategoricalValues() + " features respectively");
return root.classify(data);
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
Set<Integer> options = new IntSet(dataSet.getNumFeatures());
for(int i = 0; i < dataSet.getNumFeatures(); i++)
options.add(i);
trainC(dataSet, options, parallel);
}
/**
* Performs exactly the same as
* {@link #train(jsat.classifiers.ClassificationDataSet, java.util.concurrent.ExecutorService) },
* but the user can specify a subset of the features to be considered.
*
* @param dataSet the data set to train from
* @param options the subset of features to split on
* @param parallel whether or not to use multiple cores in training
*/
protected void trainC(ClassificationDataSet dataSet, Set<Integer> options, boolean parallel)
{
if(dataSet.size() < minSamples)
throw new FailedToFitException("There are only " +
dataSet.size() +
" data points in the sample set, at least " + minSamples +
" are needed to make a tree");
this.predicting = dataSet.getPredicting();
ModifiableCountDownLatch mcdl = new ModifiableCountDownLatch(1);
ClassificationDataSet train = dataSet;
ClassificationDataSet test = null;
if(pruningMethod != PruningMethod.NONE && testProportion != 0.0)//Then we need to set aside a testing set
{
if(testProportion < 1)
{
List<ClassificationDataSet> split = dataSet.randomSplit(RandomUtil.getRandom(), 1-testProportion, testProportion);
train = split.get(0);
test = split.get(1);
}
else
test = train;
}
this.root = makeNodeC(dataSet, options, 0, parallel, mcdl);
try
{
mcdl.await();
}
catch (InterruptedException ex)
{
System.err.println(ex.getMessage());
Logger.getLogger(DecisionTree.class.getName()).log(Level.SEVERE, null, ex);
}
if(root == null)//fitting issure, most likely too few datums. try just a stump
{
DecisionStump stump = new DecisionStump();
stump.train(train, parallel);
root = new Node(stump);
}
else
prune(root, pruningMethod, test);
}
/**
* Makes a new node for classification
* @param dataPoints the list of data points paired with their class
* @param options the attributes that this tree may select from
* @param depth the current depth of the tree
* @param parallel whether or not to use multiple threads when training
* @param mcdl count down latch
* @return the node created, or null if no node was created
*/
protected Node makeNodeC(ClassificationDataSet dataPoints, final Set<Integer> options, final int depth,
final boolean parallel, final ModifiableCountDownLatch mcdl)
{
//figure out what level of parallelism we are going to use, feature wise or depth wise
boolean mePara = (1L<<depth) < SystemInfo.LogicalCores*2;//should THIS node use the Stump parallelism
boolean depthPara = (1L<<(depth+1)) >= SystemInfo.LogicalCores*2;//should the NEXT node use the stump parallelism
if(depth > maxDepth || options.isEmpty() || dataPoints.size() < minSamples || dataPoints.isEmpty())
{
mcdl.countDown();
return null;
}
DecisionStump stump = baseStump.clone();
stump.setPredicting(this.predicting);
final List<ClassificationDataSet> splits;
if(mePara)
splits = stump.trainC(dataPoints, options, parallel);
else
splits = stump.trainC(dataPoints, options);
final Node node = new Node(stump);
if(stump.getNumberOfPaths() > 1)//If there is 1 path, we are perfectly classifier - nothing more to do
for(int i = 0; i < node.paths.length; i++)
{
final int ii = i;
final ClassificationDataSet splitI = splits.get(i);
mcdl.countUp();
if(depthPara)
{
(parallel ? ParallelUtils.CACHED_THREAD_POOL : new FakeExecutor()).submit(() ->
{
node.paths[ii] = makeNodeC(splitI, new IntSet(options), depth+1, parallel, mcdl);
});
}
else
node.paths[ii] = makeNodeC(splitI, new IntSet(options), depth+1, parallel, mcdl);
}
mcdl.countDown();
return node;
}
/**
* Makes a new node for regression
* @param dataPoints the list of data points paired with their associated real value
* @param options the attributes that this tree may select from
* @param depth the current depth of the tree
* @param parallel whether or not to perform parallel computation
* @param mcdl count down latch
* @return the node created, or null if no node was created
*/
protected Node makeNodeR(RegressionDataSet dataPoints, final Set<Integer> options, final int depth,
final boolean parallel, final ModifiableCountDownLatch mcdl)
{
//figure out what level of parallelism we are going to use, feature wise or depth wise
boolean mePara = (1L<<depth) < SystemInfo.LogicalCores*2;//should THIS node use the Stump parallelism
boolean depthPara = (1L<<(depth+1)) >= SystemInfo.LogicalCores*2;//should the NEXT node use the stump parallelism
if(depth > maxDepth || options.isEmpty() || dataPoints.size() < minSamples || dataPoints.isEmpty())
{
mcdl.countDown();
return null;
}
DecisionStump stump = baseStump.clone();
final List<RegressionDataSet> splits;
if(mePara)
splits = stump.trainR(dataPoints, options, parallel);
else
splits = stump.trainR(dataPoints, options);
if(splits == null)//an error occured, probably not enough data for many categorical values
{
mcdl.countDown();
return null;
}
final Node node = new Node(stump);
if(stump.getNumberOfPaths() > 1)//If there is 1 path, we are perfectly classifier - nothing more to do
for(int i = 0; i < node.paths.length; i++)
{
final int ii = i;
final RegressionDataSet splitI = splits.get(i);
mcdl.countUp();
if(depthPara)
{
(parallel ? ParallelUtils.CACHED_THREAD_POOL : new FakeExecutor())
.submit(() ->
{
node.paths[ii] = makeNodeR(splitI, new IntSet(options), depth+1, parallel, mcdl);
});
}
else
node.paths[ii] = makeNodeR(splitI, new IntSet(options), depth+1, parallel, mcdl);
}
mcdl.countDown();
return node;
}
public void trainC(ClassificationDataSet dataSet, Set<Integer> options)
{
trainC(dataSet, options, false);
}
@Override
public boolean supportsWeightedData()
{
return true;
}
@Override
public DecisionTree clone()
{
DecisionTree copy = new DecisionTree(maxDepth, minSamples, pruningMethod, testProportion);
if(this.predicting != null)
copy.predicting = this.predicting.clone();
if(this.root != null)
copy.root = this.root.clone();
copy.baseStump = this.baseStump.clone();
return copy;
}
@Override
public TreeNodeVisitor getTreeNodeVisitor()
{
return root;
}
protected static class Node extends TreeNodeVisitor
{
private static final long serialVersionUID = -7507748424627088734L;
final protected DecisionStump stump;
protected Node[] paths;
public Node(DecisionStump stump)
{
this.stump = stump;
paths = new Node[stump.getNumberOfPaths()];
}
@Override
public double getPathWeight(int path)
{
return stump.pathRatio[path];
}
@Override
public boolean isLeaf()
{
if(paths == null )
return true;
for(int i = 0; i < paths.length; i++)
if(paths[i] != null)
return false;
return true;
}
@Override
public int childrenCount()
{
return paths.length;
}
@Override
public CategoricalResults localClassify(DataPoint dp)
{
return stump.classify(dp);
}
@Override
public double localRegress(DataPoint dp)
{
return stump.regress(dp);
}
@Override
public Node clone()
{
Node copy = new Node( (DecisionStump)this.stump.clone());
for(int i = 0; i < this.paths.length; i++)
copy.paths[i] = this.paths[i] == null ? null : this.paths[i].clone();
return copy;
}
@Override
public TreeNodeVisitor getChild(int child)
{
if(isLeaf())
return null;
else
return paths[child];
}
@Override
public void setPath(int child, TreeNodeVisitor node)
{
if(node instanceof Node)
paths[child] = (Node) node;
else
super.setPath(child, node);
}
@Override
public void disablePath(int child)
{
paths[child] = null;
}
@Override
public int getPath(DataPoint dp)
{
return stump.whichPath(dp);
}
@Override
public boolean isPathDisabled(int child)
{
if(isLeaf())
return true;
return paths[child] == null;
}
@Override
public Collection<Integer> featuresUsed()
{
IntList used = new IntList(1);
used.add(stump.getSplittingAttribute());
return used;
}
}
@Override
public List<Parameter> getParameters()
{
List<Parameter> toRet = new ArrayList<>(Parameter.getParamsFromMethods(this));
for (Parameter param : baseStump.getParameters())//We kno the two setGainMethods will colide
if(!param.getName().contains("Gain Method") && !param.getName().contains("Numeric Handling"))
toRet.add(param);
return Collections.unmodifiableList(toRet);
}
}
| 21,656 | 33.874396 | 126 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/ERTrees.java |
package jsat.classifiers.trees;
import static java.lang.Math.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
import jsat.exceptions.FailedToFitException;
import jsat.math.OnLineStatistics;
import jsat.regression.RegressionDataSet;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* Extra Randomized Trees (ERTrees) is an ensemble method built on top of
* {@link ExtraTree}. The randomness of the trees provides incredibly high
* variance, yet a low bias. The sum of many randomized trees proves to be
* a powerful and fast learner. <br>
* The default settings are those suggested in the paper. However, the default
* stop size suggested (especially for classification) is often too small. You
* may want to consider increasing it if the accuracy is too low. <br>
* See: <br>
* Geurts, P., Ernst, D.,&Wehenkel, L. (2006). <i>Extremely randomized trees
* </i>. Machine learning, 63(1), 3–42. doi:10.1007/s10994-006-6226-1
*
* @author Edward Raff
*/
public class ERTrees extends ExtraTree
{
private static final long serialVersionUID = 7139392253403373132L;
//NOTE ExtraTrees uses the dynamic reflection, so extening it the new getter/setter paris are automatically picked up
private ExtraTree baseTree = new ExtraTree();
private boolean useDefaultSelectionCount = true;
private boolean useDefaultStopSize = true;
private ExtraTree[] forrest;
private int forrestSize;
/**
* Creates a new Extremely Randomized Trees learner
*/
public ERTrees()
{
this(100);
}
/**
* Creates a new Extremely Randomized Trees learner
* @param forrestSize the number of trees to construct
*/
public ERTrees(int forrestSize)
{
this.forrestSize = forrestSize;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public ERTrees(ERTrees toCopy)
{
super(toCopy);
this.forrestSize = toCopy.forrestSize;
this.useDefaultSelectionCount = toCopy.useDefaultSelectionCount;
this.useDefaultStopSize = toCopy.useDefaultStopSize;
this.baseTree = toCopy.baseTree.clone();
if(toCopy.forrest != null)
{
this.forrest = new ExtraTree[toCopy.forrest.length];
for(int i = 0; i < toCopy.forrest.length; i++)
this.forrest[i] = toCopy.forrest[i].clone();
}
}
/**
* Measures the statistics of feature importance from the trees in this
* forest. For classification datasets, the {@link MDI} method with Gini
* impurity will be used. For others, the {@link ImportanceByUses} method
* will be used. This may change in the future.
*
* @param <Type>
* @param data the dataset to infer the feature importance from with respect
* to the current model.
* @return an array of statistics, which each index corresponds to a
* specific feature. Numeric features start from the zero index, categorical
* features start from the index equal to the number of numeric features.
*/
public <Type extends DataSet> OnLineStatistics[] evaluateFeatureImportance(DataSet<Type> data)
{
if(data instanceof ClassificationDataSet)
return evaluateFeatureImportance(data, new MDI(ImpurityScore.ImpurityMeasure.GINI));
else
return evaluateFeatureImportance(data, new ImportanceByUses());
}
/**
* Measures the statistics of feature importance from the trees in this
* forest.
*
* @param <Type>
* @param data the dataset to infer the feature importance from with respect
* to the current model.
* @param imp the method of determing the feature importance that will be
* applied to each tree in this model
* @return an array of statistics, which each index corresponds to a
* specific feature. Numeric features start from the zero index, categorical
* features start from the index equal to the number of numeric features.
*/
public <Type extends DataSet> OnLineStatistics[] evaluateFeatureImportance(DataSet<Type> data, TreeFeatureImportanceInference imp)
{
OnLineStatistics[] importances = new OnLineStatistics[data.getNumFeatures()];
for(int i = 0; i < importances.length; i++)
importances[i] = new OnLineStatistics();
for(ExtraTree tree :forrest)
{
double[] feats = imp.getImportanceStats(tree, data);
for(int i = 0; i < importances.length; i++)
importances[i].add(feats[i]);
}
return importances;
}
/**
* Sets whether or not to use the default heuristic for the number of random
* features to select as candidates for each node. If <tt>true</tt> the
* value of selectionCount will be modified during training, using sqrt(n)
* features for classification and all features for regression. Otherwise,
* whatever value set before hand will be used.
* @param useDefaultSelectionCount whether or not to use the heuristic
* version
*/
public void setUseDefaultSelectionCount(boolean useDefaultSelectionCount)
{
this.useDefaultSelectionCount = useDefaultSelectionCount;
}
/**
* Returns if the default heuristic for the selection count is used
* @return if the default heuristic for the selection count is used
*/
public boolean getUseDefaultSelectionCount()
{
return useDefaultSelectionCount;
}
/**
* Sets whether or not to us the default heuristic for the number of points
* to force a new node to be a leaf. If <tt>true</tt> the value for stopSize
* will be altered during training, set to 2 for classification and 5 for
* regression. Otherwise, whatever value set beforehand will be used.
* @param useDefaultStopSize whether or not to use the heuristic version
*/
public void setUseDefaultStopSize(boolean useDefaultStopSize)
{
this.useDefaultStopSize = useDefaultStopSize;
}
/**
* Returns if the default heuristic for the stop size is used
* @return if the default heuristic for the stop size is used
*/
public boolean getUseDefaultStopSize()
{
return useDefaultStopSize;
}
public void setForrestSize(int forrestSize)
{
this.forrestSize = forrestSize;
}
public int getForrestSize()
{
return forrestSize;
}
@Override
public CategoricalResults classify(DataPoint data)
{
CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories());
for(ExtraTree tree : forrest)
cr.incProb(tree.classify(data).mostLikely(), 1.0);
cr.normalize();
return cr;
}
private void doTraining(boolean parallel, DataSet dataSet) throws FailedToFitException
{
forrest = new ExtraTree[forrestSize];
ParallelUtils.run(parallel, forrestSize, (start, end) ->
{
if (dataSet instanceof ClassificationDataSet)
{
ClassificationDataSet cds = (ClassificationDataSet) dataSet;
for (int i = start; i < end; i++)
{
forrest[i] = baseTree.clone();
forrest[i].train(cds);
}
}
else if (dataSet instanceof RegressionDataSet)
{
RegressionDataSet rds = (RegressionDataSet) dataSet;
for (int i = start; i < end; i++)
{
forrest[i] = baseTree.clone();
forrest[i].train(rds);
}
}
else
throw new RuntimeException("BUG: Please report");
});
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
if(useDefaultSelectionCount)
baseTree.setSelectionCount((int)max(round(sqrt(dataSet.getNumFeatures())), 1));
if(useDefaultStopSize)
baseTree.setStopSize(2);
predicting = dataSet.getPredicting();
doTraining(parallel, dataSet);
}
@Override
public boolean supportsWeightedData()
{
return true;
}
@Override
public double regress(DataPoint data)
{
double mean = 0.0;
for(ExtraTree tree : forrest)
mean += tree.regress(data);
return mean/forrest.length;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
if(useDefaultSelectionCount)
baseTree.setSelectionCount(dataSet.getNumFeatures());
if(useDefaultStopSize)
baseTree.setStopSize(5);
doTraining(parallel, dataSet);
}
@Override
public ERTrees clone()
{
return new ERTrees(this);
}
@Override
public TreeNodeVisitor getTreeNodeVisitor()
{
throw new UnsupportedOperationException("Can not get the tree node vistor becase ERTrees is really a ensemble");
}
}
| 9,436 | 32.703571 | 134 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/ExtraTree.java | package jsat.classifiers.trees;
import java.util.*;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.classifiers.trees.ImpurityScore.ImpurityMeasure;
import jsat.math.OnLineStatistics;
import jsat.parameters.Parameterized;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.utils.IntList;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.random.RandomUtil;
/**
* The ExtraTree is an Extremely Randomized Tree. Splits are chosen at random,
* and the features that are selected are also chosen at random for each new
* node in the tree. <br>
* If set to randomly select one feature for each node, it becomes a <i>Totally
* Randomized Tree</i><br>
* See: <br>
* Geurts, P., Ernst, D.,&Wehenkel, L. (2006). <i>Extremely randomized trees
* </i>. Machine learning, 63(1), 3–42. doi:10.1007/s10994-006-6226-1
*
* @author Edward Raff
*/
public class ExtraTree implements Classifier, Regressor, TreeLearner, Parameterized
{
//TODO in both of the train methods, 2 passes are done for numeric features. This can be done in one pass by fiding the min/max when we split, and passing that info in the argument parameters
private static final long serialVersionUID = 7433728970041876327L;
private int stopSize;
private int selectionCount;
protected CategoricalData predicting;
private boolean binaryCategoricalSplitting = true;
/**
* Just stores the number of numeric features that were in the dataset for
* that the getFeatures method can be implemented correctly for categorical
* variables.
*/
private int numNumericFeatures;
private ImpurityScore.ImpurityMeasure impMeasure = ImpurityMeasure.NMI;
private TreeNodeVisitor root;
/**
* Creates a new Extra Tree that will use all features in the training set
*/
public ExtraTree()
{
this(Integer.MAX_VALUE, 5);
}
/**
* Creates a new Extra Tree
*
* @param selectionCount the number of features to select
* @param stopSize the stop size
*/
public ExtraTree(int selectionCount, int stopSize)
{
this.stopSize = stopSize;
this.selectionCount = selectionCount;
this.impMeasure = ImpurityMeasure.NMI;
}
/**
* Copy constructor.
* @param toCopy the object to copy
*/
public ExtraTree(ExtraTree toCopy)
{
this.stopSize = toCopy.stopSize;
this.selectionCount = toCopy.selectionCount;
if(toCopy.predicting != null)
this.predicting = toCopy.predicting;
this.numNumericFeatures = toCopy.numNumericFeatures;
this.binaryCategoricalSplitting = toCopy.binaryCategoricalSplitting;
this.impMeasure = toCopy.impMeasure;
if(toCopy.root != null)
this.root = toCopy.root.clone();
}
/**
* Sets the impurity measure used during classification tree construction to
* select the best of the features.
* @param impurityMeasure the impurity measure to use
*/
public void setImpurityMeasure(ImpurityMeasure impurityMeasure)
{
this.impMeasure = impurityMeasure;
}
/**
* Returns the impurity measure in use
* @return the impurity measure in use
*/
public ImpurityMeasure getImpurityMeasure()
{
return impMeasure;
}
/**
* Sets the stopping size for tree growth. When a node has less than or
* equal to <tt>stopSize</tt> data points to train from, it terminates and
* produces a leaf node.
* @param stopSize the size of the testing set to refuse to split
*/
public void setStopSize(int stopSize)
{
if(stopSize <= 0)
throw new ArithmeticException("The stopping size must be a positive value");
this.stopSize = stopSize;
}
/**
* Returns the stopping size for tree growth
* @return the stopping size for tree growth
*/
public int getStopSize()
{
return stopSize;
}
/**
* The ExtraTree will select the best of a random subset of features at each
* level, this sets the number of random features to select. If set larger
* than the number of features in the training set, all features will be
* eligible for selection at every level.
* @param selectionCount the number of random features to select
*/
public void setSelectionCount(int selectionCount)
{
this.selectionCount = selectionCount;
}
/**
* Returns the number of random features chosen at each level in the tree
* @return the number of random features to chose
*/
public int getSelectionCount()
{
return selectionCount;
}
/**
* The normal implementation of ExtraTree always produces binary splits,
* including for categorical features. If set to <tt>false</tt> categorical
* features will expand out for each value in the category. This reduces the
* randomness of the tree.
* @param binaryCategoricalSplitting whether or not to use the original
* splitting algorithm, or to fully expand nominal features
*/
public void setBinaryCategoricalSplitting(boolean binaryCategoricalSplitting)
{
this.binaryCategoricalSplitting = binaryCategoricalSplitting;
}
/**
* Returns whether or not binary splitting is used for nominal features
* @return whether or not binary splitting is used for nominal features
*/
public boolean isBinaryCategoricalSplitting()
{
return binaryCategoricalSplitting;
}
@Override
public CategoricalResults classify(DataPoint data)
{
return root.classify(data);
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
Random rand = RandomUtil.getRandom();
IntList features = new IntList(dataSet.getNumFeatures());
ListUtils.addRange(features, 0, dataSet.getNumFeatures(), 1);
predicting = dataSet.getPredicting();
ImpurityScore score = new ImpurityScore(predicting.getNumOfCategories(), impMeasure);
for(int i = 0; i < dataSet.size(); i++)
score.addPoint(dataSet.getWeight(i), dataSet.getDataPointCategory(i));
numNumericFeatures = dataSet.getNumNumericalVars();
root = trainC(score, dataSet, features, dataSet.getCategories(), rand);
}
/**
* Creates a new tree top down
* @param setScore the impurity score for the set of data points being evaluated
* @param subSet the set of data points to perform a split on
* @param features the features available to split on
* @param catInfo the categorical information
* @param rand the source of randomness
* @param reusableLists a stack of already allocated lists that can be added and removed from
* @return the new top node created for the given data
*/
private TreeNodeVisitor trainC(ImpurityScore setScore, ClassificationDataSet subSet, List<Integer> features, CategoricalData[] catInfo, Random rand)
{
//Should we stop? Stop split(S)
if(subSet.size() < stopSize || setScore.getScore() == 0.0)
{
if(subSet.isEmpty())
return null;
return new NodeC(setScore.getResults());
}
double bestGain = Double.NEGATIVE_INFINITY;
double bestThreshold = Double.NaN;
int bestAttribute = -1;
ImpurityScore[] bestScores = null;
List<ClassificationDataSet> bestSplit = null;
Set<Integer> bestLeftSide = null;
/*
* TODO use smarter random feature selection based on how many features
* we need relative to how many are available
*/
Collections.shuffle(features);
//It is possible, if we test all attribute - that one was categorical and no longer an option
final int goTo = Math.min(selectionCount, features.size());
for(int i = 0; i < goTo; i++)
{
double gain;
double threshold = Double.NaN;
Set<Integer> leftSide = null;
ImpurityScore[] scores;
int a = features.get(i);
List<ClassificationDataSet> aSplit;
if(a < catInfo.length)
{
final int vals = catInfo[a].getNumOfCategories();
if(binaryCategoricalSplitting || vals == 2)
{
scores = createScores(2);
Set<Integer> catsValsInUse = new IntSet(vals*2);
for(int j = 0; j < subSet.size(); j++)
catsValsInUse.add(subSet.getDataPoint(j).getCategoricalValue(a));
if(catsValsInUse.size() == 1)
return new NodeC(setScore.getResults());
leftSide = new IntSet(vals);
int toUse = rand.nextInt(catsValsInUse.size()-1)+1;
ListUtils.randomSample(catsValsInUse, leftSide, toUse, rand);
//Now we have anything in leftSide is path 0, we can do the bining
aSplit = new ArrayList<>(2);
aSplit.add(subSet.emptyClone());
aSplit.add(subSet.emptyClone());
for(int j = 0; j < subSet.size(); j++)
{
int dest = leftSide.contains(subSet.getDataPoint(j).getCategoricalValue(a)) ? 0 : 1;
scores[dest].addPoint(subSet.getWeight(j), subSet.getDataPointCategory(j));
aSplit.get(dest).addDataPoint(subSet.getDataPoint(j), subSet.getDataPointCategory(j), subSet.getWeight(j));
}
}
else//split on each value
{
scores = createScores(vals);
//Bin all the points to get their scores
aSplit = new ArrayList<>(vals);
for(int z = 0; z < vals; z++)
aSplit.add(subSet.emptyClone());
for (int j = 0; j < subSet.size(); j++)
{
DataPoint dp = subSet.getDataPoint(j);
int y_j = subSet.getDataPointCategory(j);
double w_j = subSet.getWeight(j);
scores[dp.getCategoricalValue(a)].addPoint(w_j, y_j);
aSplit.get(dp.getCategoricalValue(a)).addDataPoint(dp, y_j, w_j);
}
}
}
else
{
int numerA = a - catInfo.length;
double min = Double.POSITIVE_INFINITY, max = Double.NEGATIVE_INFINITY;
for (int j = 0; j < subSet.size(); j++)
{
double val = subSet.getDataPoint(j).getNumericalValues().get(numerA);
min = Math.min(min, val);
max = Math.max(max, val);
}
//Uniform random threshold
threshold = rand.nextDouble()*(max-min)+min;
scores = createScores(2);
aSplit = new ArrayList<>(2);
aSplit.add(subSet.emptyClone());
aSplit.add(subSet.emptyClone());
for (int j = 0; j < subSet.size(); j++)
{
double val = subSet.getDataPoint(j).getNumericalValues().get(numerA);
double w_j = subSet.getWeight(j);
int y_j = subSet.getDataPointCategory(j);
int toAddTo = val <= threshold ? 0 : 1;
aSplit.get(toAddTo).addDataPoint(subSet.getDataPoint(j), y_j, w_j);
scores[toAddTo].addPoint(w_j, y_j);
}
}
gain = ImpurityScore.gain(setScore, scores);
if(gain > bestGain)
{
bestGain = gain;
bestAttribute = a;
bestThreshold = threshold;
bestScores = scores;
bestSplit = aSplit;
bestLeftSide = leftSide;
}
}
//Best attribute has been selected
NodeBase toReturn;
if(bestAttribute < 0)
return null;
if(bestAttribute < catInfo.length)
if(bestSplit.size() == 2)//2 paths only
toReturn = new NodeCCat(bestAttribute, bestLeftSide, setScore.getResults());
else
{
toReturn = new NodeCCat(goTo, bestSplit.size(), setScore.getResults());
features.remove(new Integer(bestAttribute));//Feature nolonger viable in this case
}
else
toReturn = new NodeCNum(bestAttribute-catInfo.length, bestThreshold, setScore.getResults());
for(int i = 0; i < toReturn.children.length; i++)
{
toReturn.children[i] = trainC(bestScores[i], bestSplit.get(i), features, catInfo, rand);
}
return toReturn;
}
/**
* Creates a new tree top down
* @param setScore the impurity score for the set of data points being evaluated
* @param subSet the set of data points to perform a split on
* @param features the features available to split on
* @param catInfo the categorical information
* @param rand the source of randomness
* @param reusableLists a stack of already allocated lists that can be added and removed from
* @return the new top node created for the given data
*/
private TreeNodeVisitor train(OnLineStatistics setScore, RegressionDataSet subSet, List<Integer> features, CategoricalData[] catInfo, Random rand)
{
//Should we stop? Stop split(S)
if(subSet.size() < stopSize || setScore.getVarance() <= 0.0 || Double.isNaN(setScore.getVarance()))
return new NodeR(setScore.getMean());
double bestGain = Double.NEGATIVE_INFINITY;
double bestThreshold = Double.NaN;
int bestAttribute = -1;
OnLineStatistics[] bestScores = null;
List<RegressionDataSet> bestSplit = null;
Set<Integer> bestLeftSide = null;
/*
* TODO use smarter random feature selection based on how many features
* we need relative to how many are available
*/
Collections.shuffle(features);
//It is possible, if we test all attribute - that one was categorical and no longer an option
final int goTo = Math.min(selectionCount, features.size());
for(int i = 0; i < goTo; i++)
{
double gain;
double threshold = Double.NaN;
Set<Integer> leftSide = null;
OnLineStatistics[] stats;
int a = features.get(i);
List<RegressionDataSet> aSplit;
if(a < catInfo.length)
{
final int vals = catInfo[a].getNumOfCategories();
if(binaryCategoricalSplitting || vals == 2)
{
stats = createStats(2);
Set<Integer> catsValsInUse = new IntSet(vals*2);
for(int j = 0; j < subSet.size(); j++)
catsValsInUse.add(subSet.getDataPoint(j).getCategoricalValue(a));
if(catsValsInUse.size() == 1)
return new NodeR(setScore.getMean());
leftSide = new IntSet(vals);
int toUse = rand.nextInt(catsValsInUse.size()-1)+1;
ListUtils.randomSample(catsValsInUse, leftSide, toUse, rand);
//Now we have anything in leftSide is path 0, we can do the bining
aSplit = new ArrayList<>(2);
aSplit.add(subSet.emptyClone());
aSplit.add(subSet.emptyClone());
for(int j = 0; j < subSet.size(); j++)
{
DataPoint dp = subSet.getDataPoint(j);
double w_j = subSet.getWeight(j);
double y_j = subSet.getTargetValue(j);
int dest = leftSide.contains(dp.getCategoricalValue(a)) ? 0 : 1;
stats[dest].add(y_j, w_j);
aSplit.get(dest).addDataPoint(dp, y_j, w_j);
}
}
else//split on each value
{
stats = createStats(vals);
//Bin all the points to get their scores
aSplit = new ArrayList<>(vals);
for(int z = 0; z < vals; z++)
aSplit.add(subSet.emptyClone());
for (int j = 0; j < subSet.size(); j++)
{
DataPoint dp = subSet.getDataPoint(j);
double w_j = subSet.getWeight(j);
double y_j = subSet.getTargetValue(j);
stats[dp.getCategoricalValue(a)].add(y_j, w_j);
aSplit.get(dp.getCategoricalValue(a)).addDataPoint(dp, y_j, w_j);
}
}
}
else
{
int numerA = a - catInfo.length;
double min = Double.POSITIVE_INFINITY, max = Double.NEGATIVE_INFINITY;
for(int j = 0; j < subSet.size(); j++)
{
DataPoint dp = subSet.getDataPoint(j);
double val = dp.getNumericalValues().get(numerA);
min = Math.min(min, val);
max = Math.max(max, val);
}
//Uniform random threshold
threshold = rand.nextDouble()*(max-min)+min;
stats = createStats(2);
aSplit = new ArrayList<>(2);
aSplit.add(subSet.emptyClone());
aSplit.add(subSet.emptyClone());
for (int j = 0; j < subSet.size(); j++)
{
DataPoint dp = subSet.getDataPoint(j);
double w_j = subSet.getWeight(j);
double y_j = subSet.getTargetValue(j);
double val = dp.getNumericalValues().get(numerA);
int toAddTo = val <= threshold ? 0 : 1;
aSplit.get(toAddTo).addDataPoint(dp, y_j, w_j);
stats[toAddTo].add(y_j, w_j);
}
}
gain = 1;
double varNorm = setScore.getVarance();
double varSum = setScore.getSumOfWeights();
for(OnLineStatistics stat : stats)
gain -= stat.getSumOfWeights()/varSum*(stat.getVarance()/varNorm);
if(gain > bestGain)
{
bestGain = gain;
bestAttribute = a;
bestThreshold = threshold;
bestScores = stats;
bestSplit = aSplit;
bestLeftSide = leftSide;
}
}
//Best attribute has been selected
NodeBase toReturn;
if (bestAttribute >= 0)
{
if (bestAttribute < catInfo.length)
if (bestSplit.size() == 2)//2 paths only
toReturn = new NodeRCat(bestAttribute, bestLeftSide, setScore.getMean());
else
{
toReturn = new NodeRCat(goTo, bestSplit.size(), setScore.getMean());
features.remove(new Integer(bestAttribute));//Feature nolonger viable in this case
}
else
toReturn = new NodeRNum(bestAttribute - catInfo.length, bestThreshold, setScore.getMean());
for (int i = 0; i < toReturn.children.length; i++)
{
toReturn.children[i] = train(bestScores[i], bestSplit.get(i), features, catInfo, rand);
}
return toReturn;
}
return new NodeR(setScore.getMean());
}
@Override
public boolean supportsWeightedData()
{
return true;
}
@Override
public ExtraTree clone()
{
return new ExtraTree(this);
}
@Override
public TreeNodeVisitor getTreeNodeVisitor()
{
return root;
}
/**
* Add lists to a list of lists
* @param <T> the content type of the list
* @param listsToAdd the number of lists to add
* @param reusableLists available pre allocated lists for reuse
* @param aSplit the list of lists to add to
*/
static private <T> void fillList(final int listsToAdd, Stack<List<T>> reusableLists, List<List<T>> aSplit)
{
for(int j = 0; j < listsToAdd; j++)
if(reusableLists.isEmpty())
aSplit.add(new ArrayList<>());
else
aSplit.add(reusableLists.pop());
}
private ImpurityScore[] createScores(int count)
{
ImpurityScore[] scores = new ImpurityScore[count];
for(int j = 0; j < scores.length; j++)
scores[j] = new ImpurityScore(predicting.getNumOfCategories(), impMeasure);
return scores;
}
@Override
public double regress(DataPoint data)
{
return root.regress(data);
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public void train(RegressionDataSet dataSet)
{
Random rand = RandomUtil.getRandom();
IntList features = new IntList(dataSet.getNumFeatures());
ListUtils.addRange(features, 0, dataSet.getNumFeatures(), 1);
OnLineStatistics score = new OnLineStatistics();
for (int j = 0; j < dataSet.size(); j++)
{
double w_j = dataSet.getWeight(j);
double y_j = dataSet.getTargetValue(j);
score.add(y_j, w_j);
}
numNumericFeatures = dataSet.getNumNumericalVars();
root = train(score, dataSet, features, dataSet.getCategories(), rand);
}
private OnLineStatistics[] createStats(int count)
{
OnLineStatistics[] stats = new OnLineStatistics[count];
for(int i = 0; i < stats.length; i++)
stats[i] = new OnLineStatistics();
return stats;
}
/**
* Node for classification that splits on a categorical feature
*/
private class NodeCCat extends NodeC
{
private static final long serialVersionUID = 7413428280703235600L;
/**
* Categorical attribute to split on
*/
private int catAtt;
/**
* The cat values that go to the left branch, or null if no binary cats are being used
*/
private int[] leftBranch;
public NodeCCat(int catAtt, int children, CategoricalResults crResult)
{
super(crResult, children);
this.catAtt = catAtt;
this.leftBranch = null;
}
public NodeCCat(int catAtt, Set<Integer> left, CategoricalResults crResult)
{
super(crResult, 2);
this.catAtt = catAtt;
this.leftBranch = new int[left.size()];
int pos = 0;
for(int i : left)
leftBranch[pos++] = i;
Arrays.sort(leftBranch);
}
public NodeCCat(NodeCCat toClone)
{
super(toClone);
this.catAtt = toClone.catAtt;
if(toClone.leftBranch != null)
this.leftBranch = Arrays.copyOf(toClone.leftBranch, toClone.leftBranch.length);
}
@Override
public int getPath(DataPoint dp)
{
int[] catVals = dp.getCategoricalValues();
if (leftBranch == null)
return catVals[catAtt];
else
{
if (Arrays.binarySearch(leftBranch, catVals[catAtt]) < 0)
return 1;
else
return 0;
}
}
@Override
public TreeNodeVisitor clone()
{
return new NodeCCat(this);
}
@Override
public Collection<Integer> featuresUsed()
{
IntList used = new IntList(1);
used.add(catAtt+numNumericFeatures);
return used;
}
}
/**
* Node for classification that splits on a numeric feature
*/
private static class NodeCNum extends NodeC
{
private static final long serialVersionUID = 3967180517059509869L;
private int numerAtt;
private double threshold;
public NodeCNum(int numerAtt, double threshold, CategoricalResults crResult)
{
super(crResult, 2);
this.numerAtt = numerAtt;
this.threshold = threshold;
}
public NodeCNum(NodeCNum toClone)
{
super(toClone);
this.numerAtt = toClone.numerAtt;
this.threshold = toClone.threshold;
}
@Override
public int getPath(DataPoint dp)
{
double val = dp.getNumericalValues().get(numerAtt);
if( val <= threshold)
return 0;
else
return 1;
}
@Override
public TreeNodeVisitor clone()
{
return new NodeCNum(this);
}
@Override
public Collection<Integer> featuresUsed()
{
IntList used = new IntList(1);
used.add(numerAtt);
return used;
}
}
/**
* Base node for classification
*/
private static class NodeC extends NodeBase
{
private static final long serialVersionUID = -3977497656918695759L;
private CategoricalResults crResult;
/**
* Creates a new leaf node
* @param crResult the results to return
*/
public NodeC(CategoricalResults crResult)
{
super();
this.crResult = crResult;
children = null;
}
/**
* Creates a new node with children that start out with null (path disabled)
* @param crResult the results to return
* @param children the number of children this node has
*/
public NodeC(CategoricalResults crResult, int children)
{
super(children);
this.crResult = crResult;
}
public NodeC(NodeC toClone)
{
super(toClone);
this.crResult = toClone.crResult.clone();
}
@Override
public CategoricalResults localClassify(DataPoint dp)
{
return crResult;
}
@Override
public int getPath(DataPoint dp)
{
return -1;
}
@Override
public TreeNodeVisitor clone()
{
return new NodeC(this);
}
@Override
public Collection<Integer> featuresUsed()
{
return Collections.EMPTY_SET;
}
}
/**
* Base node for regression and classification
*/
private static abstract class NodeBase extends TreeNodeVisitor
{
private static final long serialVersionUID = 6783491817922690901L;
protected TreeNodeVisitor[] children;
public NodeBase()
{
}
public NodeBase(int children)
{
this.children = new TreeNodeVisitor[children];
}
public NodeBase(NodeBase toClone)
{
if(toClone.children != null)
{
children = new TreeNodeVisitor[toClone.children.length];
for(int i = 0; i < toClone.children.length; i++)
if(toClone.children[i] != null)
children[i] = toClone.children[i].clone();
}
}
@Override
public int childrenCount()
{
return children.length;
}
@Override
public boolean isLeaf()
{
if(children == null)
return true;
for(int i = 0; i < children.length; i++)
if(children[i] != null)
return false;
return true;
}
@Override
public TreeNodeVisitor getChild(int child)
{
if(child < 0 || child > childrenCount())
return null;
return children[child];
}
@Override
public void disablePath(int child)
{
if(!isLeaf())
children[child] = null;
}
@Override
public boolean isPathDisabled(int child)
{
if(isLeaf())
return true;
return children[child] == null;
}
}
/**
* Base node for regression
*/
private static class NodeR extends NodeBase
{
private static final long serialVersionUID = -2461046505444129890L;
private double result;
/**
* Creates a new leaf node
* @param result the result to return
*/
public NodeR(double result)
{
super();
this.result = result;
}
/**
* Creates a new node with children that start out with null (path disabled)
* @param crResult the results to return
* @param children the number of children this node has
*/
public NodeR(double result, int children)
{
super(children);
this.result = result;
}
public NodeR(NodeR toClone)
{
super(toClone);
this.result = toClone.result;
}
@Override
public double localRegress(DataPoint dp)
{
return result;
}
@Override
public int getPath(DataPoint dp)
{
return -1;
}
@Override
public TreeNodeVisitor clone()
{
return new NodeR(this);
}
@Override
public Collection<Integer> featuresUsed()
{
return Collections.EMPTY_SET;
}
}
/**
* Base node for regression that splits no a numeric feature
*/
private static class NodeRNum extends NodeR
{
private static final long serialVersionUID = -6775472771777960211L;
private int numerAtt;
private double threshold;
public NodeRNum(int numerAtt, double threshold, double result)
{
super(result, 2);
this.numerAtt = numerAtt;
this.threshold = threshold;
}
public NodeRNum(NodeRNum toClone)
{
super(toClone);
this.numerAtt = toClone.numerAtt;
this.threshold = toClone.threshold;
}
@Override
public int getPath(DataPoint dp)
{
double val = dp.getNumericalValues().get(numerAtt);
if( val <= threshold)
return 0;
else
return 1;
}
@Override
public TreeNodeVisitor clone()
{
return new NodeRNum(this);
}
@Override
public Collection<Integer> featuresUsed()
{
IntList used = new IntList(1);
used.add(numerAtt);
return used;
}
}
private class NodeRCat extends NodeR
{
private static final long serialVersionUID = 5868393594474661054L;
/**
* Categorical attribute to split on
*/
private int catAtt;
/**
* The cat values that go to the left branch, or null if no binary cats are being used
*/
private int[] leftBranch;
public NodeRCat(int catAtt, int children, double result)
{
super(result, children);
this.catAtt = catAtt;
this.leftBranch = null;
}
public NodeRCat(int catAtt, Set<Integer> left, double result)
{
super(result, 2);
this.catAtt = catAtt;
this.leftBranch = new int[left.size()];
int pos = 0;
for(int i : left)
leftBranch[pos++] = i;
Arrays.sort(leftBranch);
}
public NodeRCat(NodeRCat toClone)
{
super(toClone);
this.catAtt = toClone.catAtt;
if(toClone.leftBranch != null)
this.leftBranch = Arrays.copyOf(toClone.leftBranch, toClone.leftBranch.length);
}
@Override
public int getPath(DataPoint dp)
{
int[] catVals = dp.getCategoricalValues();
if (leftBranch == null)
return catVals[catAtt];
else
{
if (Arrays.binarySearch(leftBranch, catVals[catAtt]) < 0)
return 1;
else
return 0;
}
}
@Override
public Collection<Integer> featuresUsed()
{
IntList used = new IntList(1);
used.add(catAtt+numNumericFeatures);
return used;
}
@Override
public TreeNodeVisitor clone()
{
return new NodeRCat(this);
}
}
}
| 33,212 | 30.84372 | 195 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/ID3.java |
package jsat.classifiers.trees;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.classifiers.DataPointPair;
import jsat.utils.FakeExecutor;
import jsat.utils.IntSet;
import jsat.utils.ModifiableCountDownLatch;
import jsat.utils.concurrent.ParallelUtils;
/**
*
* @author Edward Raff
*/
public class ID3 implements Classifier
{
private static final long serialVersionUID = -8473683139353205898L;
private CategoricalData predicting;
private CategoricalData[] attributes;
private ID3Node root;
private ModifiableCountDownLatch latch;
public CategoricalResults classify(DataPoint data)
{
return walkTree(root, data);
}
static private CategoricalResults walkTree(ID3Node node, DataPoint data)
{
if(node.isLeaf())
return node.getResult();
return walkTree(node.getNode(data.getCategoricalValue(node.getAttributeId())), data);
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
if(dataSet.getNumNumericalVars() != 0)
throw new RuntimeException("ID3 only supports categorical data");
predicting = dataSet.getPredicting();
attributes = dataSet.getCategories();
List<DataPointPair<Integer>> dataPoints = dataSet.getAsDPPList();
Set<Integer> availableAttributes = new IntSet(dataSet.getNumCategoricalVars());
for(int i = 0; i < dataSet.getNumCategoricalVars(); i++)
availableAttributes.add(i);
latch = new ModifiableCountDownLatch(1);
ExecutorService threadPool = parallel ? ParallelUtils.CACHED_THREAD_POOL : new FakeExecutor();
root = buildTree(dataPoints, availableAttributes, threadPool);
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(ID3.class.getName()).log(Level.SEVERE, null, ex);
}
}
private ID3Node buildTree( List<DataPointPair<Integer>> dataPoints, Set<Integer> remainingAtribues, final ExecutorService threadPool)
{
double curEntropy = entropy(dataPoints);
double size = dataPoints.size();
if(remainingAtribues.isEmpty() || curEntropy == 0)
{
CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories());
for(DataPointPair<Integer> dpp : dataPoints)
cr.setProb(dpp.getPair(), cr.getProb(dpp.getPair()) + 1);
cr.divideConst(size);
latch.countDown();
return new ID3Node(cr);
}
int bestAttribute = -1;
double bestInfoGain = Double.MIN_VALUE;
List<List<DataPointPair<Integer>>> bestSplit = null;
for(int attribute : remainingAtribues)
{
List<List<DataPointPair<Integer>>> newSplit = new ArrayList<>(attributes[attribute].getNumOfCategories());
for( int i = 0; i < attributes[attribute].getNumOfCategories(); i++)
newSplit.add( new ArrayList<>());
//Putting the datapoints in their respective bins by attribute value
for(DataPointPair<Integer> dpp : dataPoints)
newSplit.get(dpp.getDataPoint().getCategoricalValue(attribute)).add(dpp);
double splitEntrop = 0;
for(int i = 0; i < newSplit.size(); i++)
splitEntrop += entropy(newSplit.get(i))*newSplit.get(i).size()/size;
double infoGain = curEntropy - splitEntrop;
if(infoGain > bestInfoGain)
{
bestAttribute = attribute;
bestInfoGain = infoGain;
bestSplit = newSplit;
}
}
final ID3Node node = new ID3Node(attributes[bestAttribute].getNumOfCategories(), bestAttribute);
final Set<Integer> newRemaining = new IntSet(remainingAtribues);
newRemaining.remove(bestAttribute);
for(int i = 0; i < bestSplit.size(); i++)
{
final int ii = i;
final List<DataPointPair<Integer>> bestSplitII = bestSplit.get(ii);
latch.countUp();
threadPool.submit(() ->
{
node.setNode(ii, buildTree(bestSplitII, newRemaining, threadPool));
});
}
latch.countDown();
return node;
}
static private class ID3Node
{
ID3Node[] children;
CategoricalResults cr;
int attributeId;
private ID3Node()
{
}
/**
* Constructs a parent
* @param atributes the number of possible values for the attribute this node should split on
*/
public ID3Node(int atributes, int attributeId)
{
cr = null;
children = new ID3Node[atributes];
this.attributeId = attributeId;
}
/**
* Constructs a leaf
* @param cr the result to return for reaching this leaf node
*/
public ID3Node( CategoricalResults cr)
{
this.children = null;
this.cr = cr;
}
public boolean isLeaf()
{
return cr != null;
}
public void setNode(int i, ID3Node node)
{
children[i] = node;
}
public ID3Node getNode(int i)
{
return children[i];
}
public int getAttributeId()
{
return attributeId;
}
public CategoricalResults getResult()
{
return cr;
}
public ID3Node copy()
{
ID3Node copy = new ID3Node();
copy.cr = this.cr;
copy.attributeId = this.attributeId;
if(this.children != null)
{
copy.children = new ID3Node[this.children.length];
for(int i = 0; i < children.length; i++)
copy.children[i] = this.children[i].copy();
}
return copy;
}
}
private double entropy( List<DataPointPair<Integer>> s)
{
if(s.isEmpty())
return 0;
double[] probs = new double[predicting.getNumOfCategories()];
for(DataPointPair<Integer> dpp : s)
probs[dpp.getPair()] += 1;
for(int i = 0; i < probs.length; i++)
probs[i] /= s.size();
double entr = 0;
for(int i = 0; i < probs.length; i++)
if(probs[i] != 0)
entr += probs[i] * (Math.log(probs[i])/Math.log(2));
//The entr will be negative unless it is zero, this way we dont return negative zero
return Math.abs(entr);
}
@Override
public boolean supportsWeightedData()
{
return false;
}
@Override
public Classifier clone()
{
ID3 copy = new ID3();
copy.attributes = this.attributes;
copy.latch = null;
copy.predicting = this.predicting;
copy.root = this.root.copy();
return copy;
}
}
| 7,598 | 29.2749 | 137 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/ImportanceByUses.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.classifiers.trees;
import java.util.Stack;
import jsat.DataSet;
/**
*
* @author Edward Raff <[email protected]>
*/
public class ImportanceByUses implements TreeFeatureImportanceInference
{
private boolean weightByDepth;
public ImportanceByUses(boolean weightByDepth)
{
this.weightByDepth = weightByDepth;
}
public ImportanceByUses()
{
this(true);
}
@Override
public <Type extends DataSet> double[] getImportanceStats(TreeLearner model, DataSet<Type> data)
{
double[] features = new double[data.getNumFeatures()];
visit(model.getTreeNodeVisitor(), 0, features);
return features;
}
private void visit(TreeNodeVisitor node, int curDepth, double[] features )
{
if (node == null)//invalid path was added, skip
return;
for (int feature : node.featuresUsed())
if (weightByDepth)
features[feature] += Math.pow(2, -curDepth);
else
features[feature]++;
if (!node.isLeaf())
{
for (int i = 0; i < node.childrenCount(); i++)
visit(node.getChild(i), curDepth + 1, features);
}
}
}
| 1,981 | 27.314286 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/ImpurityScore.java | package jsat.classifiers.trees;
import static java.lang.Math.*;
import java.util.Arrays;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.DataPoint;
/**
* ImpurityScore provides a measure of the impurity of a set of data points
* respective to their class labels. The impurity score is maximized when the
* classes are evenly distributed, and minimized when all points belong to one
* class. <br>
* The gain in purity can be computed using the static <i>gain</i> methods of
* the class. However, not all impurity measures can be used for arbitrary data
* and splits. Some may only support binary splits, and some may only support
* binary target classes.
*
* @author Edward Raff
*/
public class ImpurityScore implements Cloneable
{
/**
* Different methods of measuring the impurity in a set of data points
* based on nominal class labels
*/
public enum ImpurityMeasure
{
INFORMATION_GAIN,
INFORMATION_GAIN_RATIO,
/**
* Normalized Mutual Information. The {@link #getScore() } value will be
* the same as {@link #INFORMATION_GAIN}, however - the gain returned
* is considerably different - and is a normalization of the mutual
* information between the split and the class label by the class and
* split entropy.
*/
NMI,
GINI,
CLASSIFICATION_ERROR
}
private double sumOfWeights;
private double[] counts;
private ImpurityMeasure impurityMeasure;
/**
* Creates a new impurity score that can be updated
*
* @param classCount the number of target class values
* @param impurityMeasure
*/
public ImpurityScore(int classCount, ImpurityMeasure impurityMeasure)
{
sumOfWeights = 0.0;
counts = new double[classCount];
this.impurityMeasure = impurityMeasure;
}
/**
* Copy constructor
* @param toClone
*/
private ImpurityScore(ImpurityScore toClone)
{
this.sumOfWeights = toClone.sumOfWeights;
this.counts = Arrays.copyOf(toClone.counts, toClone.counts.length);
this.impurityMeasure = toClone.impurityMeasure;
}
/**
* Removes one point from the impurity score
* @param weight the weight of the point to add
* @param targetClass the class of the point to add
*/
public void removePoint(double weight, int targetClass)
{
counts[targetClass] -= weight;
sumOfWeights -= weight;
}
/**
* Adds one more point to the impurity score
* @param weight the weight of the point to add
* @param targetClass the class of the point to add
*/
public void addPoint(double weight, int targetClass)
{
counts[targetClass] += weight;
sumOfWeights += weight;
}
/**
* Computes the current impurity score for the points that have been added.
* A higher score is worse, a score of zero indicates a perfectly pure set
* of points (all one class).
* @return the impurity score
*/
public double getScore()
{
if(sumOfWeights <= 0)
return 0;
double score = 0.0;
if (impurityMeasure == ImpurityMeasure.INFORMATION_GAIN_RATIO
|| impurityMeasure == ImpurityMeasure.INFORMATION_GAIN
|| impurityMeasure == ImpurityMeasure.NMI)
{
for (Double count : counts)
{
double p = count / sumOfWeights;
if (p > 0)
score += p * log(p) / log(2);
}
}
else if (impurityMeasure == ImpurityMeasure.GINI)
{
score = 1;
for (double count : counts)
{
double p = count / sumOfWeights;
score -= p * p;
}
}
else if (impurityMeasure == ImpurityMeasure.CLASSIFICATION_ERROR)
{
double maxClass = 0;
for (double count : counts)
maxClass = Math.max(maxClass, count / sumOfWeights);
score = 1.0 - maxClass;
}
return abs(score);
}
/**
* Returns the sum of the weights for all points currently in the impurity
* score
* @return the sum of weights
*/
public double getSumOfWeights()
{
return sumOfWeights;
}
/**
* Returns the impurity measure being used
* @return the impurity measure being used
*/
public ImpurityMeasure getImpurityMeasure()
{
return impurityMeasure;
}
/**
* Obtains the current categorical results by prior probability
*
* @return the categorical results for the current score
*/
public CategoricalResults getResults()
{
CategoricalResults cr = new CategoricalResults(counts.length);
for(int i = 0; i < counts.length; i++)
cr.setProb(i, counts[i]/sumOfWeights);
return cr;
}
/*
* NOTE: for calulating the entropy in a split, if S is the current set of
* all data points, and S_i denotes one of the subsets gained from splitting
* The Gain for a split is
*
* n
* ===== |S |
* \ | i|
* Gain = Entropy(S) - > ---- Entropy/S \
* / |S| \ i/
* =====
* i = 1
*
* Gain
* GainRatio = ----------------
* SplitInformation
*
* n
* ===== |S | /|S |\
* \ | i| || i||
* SplitInformation = - > ---- log|----|
* / |S| \ |S|/
* =====
* i = 1
*/
/**
* Computes the gain in score from a splitting of the data set
*
* @param wholeData the score for the whole data set
* @param splits the scores for each of the splits
* @return the gain for the values given
*/
public static double gain(ImpurityScore wholeData, ImpurityScore... splits)
{
return gain(wholeData, 1.0, splits);
}
/**
* Computes the gain in score from a splitting of the data set
*
* @param wholeData the score for the whole data set
* @param wholeScale a constant to scale the wholeData counts and sums by, useful for handling missing value cases
* @param splits the scores for each of the splits
* @return the gain for the values given
*/
public static double gain(ImpurityScore wholeData, double wholeScale, ImpurityScore... splits)
{
double sumOfAllSums = wholeScale*wholeData.sumOfWeights;
if(splits[0].impurityMeasure == ImpurityMeasure.NMI)
{
double mi = 0, splitEntropy = 0.0, classEntropy = 0.0;
for(int c = 0; c < wholeData.counts.length; c++)//c: class
{
final double p_c = wholeScale*wholeData.counts[c]/sumOfAllSums;
if(p_c <= 0.0)
continue;
double logP_c = log(p_c);
classEntropy += p_c*logP_c;
for(int s = 0; s < splits.length; s++)//s: split
{
final double p_s = splits[s].sumOfWeights/sumOfAllSums;
if(p_s <= 0)
continue;
final double p_cs = splits[s].counts[c]/sumOfAllSums;
if(p_cs <= 0)
continue;
mi += p_cs * (log(p_cs) - logP_c - log(p_s));
if(c == 0)
splitEntropy += p_s * log(p_s);
}
}
splitEntropy = abs(splitEntropy);
classEntropy = abs(classEntropy);
return 2*mi/(splitEntropy+classEntropy);
}
//Else, normal cases
double splitScore = 0.0;
boolean useSplitInfo = splits[0].impurityMeasure == ImpurityMeasure.INFORMATION_GAIN_RATIO;
if(useSplitInfo)
{
/*
* TODO should actualy be 0, but performance bug is consistently
* occuring if I use another value. Needs serious investigation.
* I was testing on (Oracle) 1.7u51 & u20 smoething and both had the
* issue, on OSX and Windows.
*
* I was unable to replicate the issue with a smaller self contained
* program. So I suspect I might be at some threshold / corner case
* of the optimizer
*
* Adding a -1 at the final results causes the performance
* degredation agian. Occures with both client and server JVM
*
* Using the same code with an if stament seperating the 2 (see old revision) was originally backwards. Changing the correct way revealed the behavior. I'm leaving them seperated to ease investiation later.
*/
double splitInfo = 1.0;
for(ImpurityScore split : splits)
{
double p = split.getSumOfWeights()/sumOfAllSums;
if(p <= 0)//log(0) is -Inft, so skip and treat as zero
continue;
splitScore += p * split.getScore();
splitInfo += p * -log(p);
}
return (wholeData.getScore()-splitScore)/splitInfo;
}
else
{
for(ImpurityScore split : splits)
{
double p = split.getSumOfWeights()/sumOfAllSums;
if(p <= 0)//log(0) is -Inft, so skip and treat as zero
continue;
splitScore += p*split.getScore();
}
return wholeData.getScore()-splitScore;
}
}
@Override
protected ImpurityScore clone()
{
return new ImpurityScore(this);
}
}
| 10,262 | 32.321429 | 219 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/MDA.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.classifiers.trees;
import java.util.Random;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.classifiers.evaluation.Accuracy;
import jsat.classifiers.evaluation.ClassificationScore;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.regression.evaluation.MeanSquaredError;
import jsat.regression.evaluation.RegressionScore;
import jsat.utils.random.RandomUtil;
/**
* Mean Decrease in Accuracy (MDA) measures feature importance by applying the
* classifier for each feature, and corruption one feature at a time as each
* dataum its pushed through the tree. The importance of a feature is them
* measured as the percent change in the target score when that feature was
* corrupted. <br>
* <br>
* This approach is based off of Breiman, L. (2001). <i>Random forests</i>.
* Machine Learning, 45(1), 5–32.
*
* @author Edward Raff <[email protected]>
*/
public class MDA implements TreeFeatureImportanceInference
{
private ClassificationScore cs_base = new Accuracy();
private RegressionScore rs_base = new MeanSquaredError();
@Override
public <Type extends DataSet> double[] getImportanceStats(TreeLearner model, DataSet<Type> data)
{
double[] features = new double[data.getNumFeatures()];
double baseScore;
boolean percentIncrease;
Random rand = RandomUtil.getRandom();
if(data instanceof ClassificationDataSet)
{
ClassificationDataSet cds = (ClassificationDataSet) data;
ClassificationScore cs = cs_base.clone();
cs.prepare(cds.getPredicting());
for(int i = 0; i < cds.size(); i++)
{
DataPoint dp = cds.getDataPoint(i);
cs.addResult(((Classifier)model).classify(dp), cds.getDataPointCategory(i), cds.getWeight(i));
}
baseScore = cs.getScore();
percentIncrease = cs.lowerIsBetter();
//for every feature
for(int j = 0; j < data.getNumFeatures(); j++)
{
cs.prepare(cds.getPredicting());
for(int i = 0; i < cds.size(); i++)
{
DataPoint dp = cds.getDataPoint(i);
int true_label = cds.getDataPointCategory(i);
TreeNodeVisitor curNode = walkCorruptedPath(model, dp, j, rand);
cs.addResult(curNode.localClassify(dp), true_label, cds.getWeight(i));
}
double newScore = cs.getScore();
features[j] = percentIncrease ? (newScore-baseScore)/(baseScore+1e-3) : (baseScore-newScore)/(baseScore+1e-3);
}
}
else if(data instanceof RegressionDataSet)
{
RegressionDataSet rds = (RegressionDataSet) data;
RegressionScore rs = rs_base.clone();
rs.prepare();
for(int i = 0; i < rds.size(); i++)
{
DataPoint dp = rds.getDataPoint(i);
rs.addResult(((Regressor)model).regress(dp), rds.getTargetValue(i), rds.getWeight(i));
}
baseScore = rs.getScore();
percentIncrease = rs.lowerIsBetter();
//for every feature
for(int j = 0; j < data.getNumFeatures(); j++)
{
rs.prepare();
for(int i = 0; i < rds.size(); i++)
{
DataPoint dp = rds.getDataPoint(i);
double true_label = rds.getTargetValue(i);
TreeNodeVisitor curNode = walkCorruptedPath(model, dp, j, rand);
rs.addResult(curNode.localRegress(dp), true_label, rds.getWeight(i));
}
double newScore = rs.getScore();
features[j] = percentIncrease ? (newScore-baseScore)/(baseScore+1e-3) : (baseScore-newScore)/(baseScore+1e-3);
}
}
return features;
}
/**
* walks the tree down to a leaf node, adding corruption for a specific feature
* @param model the tree model to walk
* @param dp the data point to push down the tree
* @param j the feature index to corrupt
* @param rand source of randomness
* @return the leaf node
*/
private TreeNodeVisitor walkCorruptedPath(TreeLearner model, DataPoint dp, int j, Random rand)
{
TreeNodeVisitor curNode = model.getTreeNodeVisitor();
while(!curNode.isLeaf())
{
int path = curNode.getPath(dp);
int numChild = curNode.childrenCount();
if(curNode.featuresUsed().contains(j))//corrupt the feature!
{
//this gets us a random OTHER path, wont be the same b/c we would need to wrap around 1 farther
path = (path + rand.nextInt(numChild)) % numChild;
}
if(curNode.isPathDisabled(path))
break;
else
curNode = curNode.getChild(path);
}
return curNode;
}
}
| 6,096 | 36.869565 | 126 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/MDI.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.classifiers.trees;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.ListIterator;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
import jsat.classifiers.DataPointPair;
import jsat.utils.IntList;
/**
* Determines the importance of features by measuring the decrease in impurity
* caused by each feature used, weighted by the amount of data seen by the node
* using the feature. <br>
* This method only works for classification datasets as it uses the
* {@link ImpurityScore} class, but may use any impurity measure supported.<br>
* <br>
* For more info, see:
* <ul>
* <li>Louppe, G., Wehenkel, L., Sutera, A., & Geurts, P. (2013).
* <i>Understanding variable importances in forests of randomized trees</i>. In
* C. j. c. Burges, L. Bottou, M. Welling, Z. Ghahramani, & K. q. Weinberger
* (Eds.), Advances in Neural Information Processing Systems 26 (pp. 431–439).
* Retrieved from
* <a href="http://media.nips.cc/nipsbooks/nipspapers/paper_files/nips26/281.pdf">here</a></li>
* <li>Breiman, L. (2002). Manual on setting up, using, and understanding random
* forests v3.1. Statistics Department University of California Berkeley, CA,
* USA.</li>
* </ul>
*
*
* @author Edward Raff <[email protected]>
*/
public class MDI implements TreeFeatureImportanceInference
{
private ImpurityScore.ImpurityMeasure im;
public MDI(ImpurityScore.ImpurityMeasure im)
{
this.im = im;
}
public MDI()
{
this(ImpurityScore.ImpurityMeasure.GINI);
}
@Override
public <Type extends DataSet> double[] getImportanceStats(TreeLearner model, DataSet<Type> data)
{
double[] features = new double[data.getNumFeatures()];
if(!(data instanceof ClassificationDataSet))
throw new RuntimeException("MDI currently only supports classification datasets");
List<DataPointPair<Integer>> allData = ((ClassificationDataSet)data).getAsDPPList();
final int K = ((ClassificationDataSet)data).getClassSize();
ImpurityScore score = new ImpurityScore(K, im);
for(int i = 0; i < data.size(); i++)
score.addPoint(data.getWeight(i), ((ClassificationDataSet)data).getDataPointCategory(i));
visit(model.getTreeNodeVisitor(), score, (ClassificationDataSet) data, IntList.range(data.size()), features, score.getSumOfWeights(), K);
return features;
}
private void visit(TreeNodeVisitor node, ImpurityScore score, ClassificationDataSet data, IntList subset, final double[] features , final double N, final int K)
{
if (node == null || node.isLeaf() )//invalid path or no split, so skip
return;
double curScore = score.getScore();
double curN = score.getSumOfWeights();
//working space to split data up into new subsets
List<IntList> splitsData = new ArrayList<>(node.childrenCount());
List<ImpurityScore> splitScores = new ArrayList<>(node.childrenCount());
splitsData.add(subset);
splitScores.add(score);
for(int i = 0; i < node.childrenCount()-1; i++)
{
splitsData.add(new IntList());
splitScores.add(new ImpurityScore(K, im));
}
//loop through and split up our data
for(ListIterator<Integer> iter = subset.listIterator(); iter.hasNext();)
{
int indx = iter.next();
final int tc = data.getDataPointCategory(indx);
DataPoint dp = data.getDataPoint(indx);
double w = data.getWeight(indx);
int path = node.getPath(dp);
if(path < 0)//NaN will cause -1
score.removePoint(w, tc);
else if(path > 0)//0 will be cur data and score obj, else we move to right location
{
score.removePoint(w, tc);
splitScores.get(path).addPoint(w, tc);
splitsData.get(path).add(indx);
iter.remove();
}
}
double chageInImp = curScore;
for(ImpurityScore s : splitScores)
chageInImp -= s.getScore()*(s.getSumOfWeights()/(1e-5+curN));
Collection<Integer> featuresUsed = node.featuresUsed();
for (int feature : featuresUsed)
features[feature] += chageInImp*curN/N;
//now visit our children
for(int path = 0; path < splitScores.size(); path++)
visit(node.getChild(path), splitScores.get(path), data, splitsData.get(path), features, N, K);
}
}
| 5,400 | 37.856115 | 164 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/RandomDecisionTree.java |
package jsat.classifiers.trees;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPointPair;
import jsat.regression.RegressionDataSet;
import jsat.utils.ModifiableCountDownLatch;
import jsat.utils.random.RandomUtil;
/**
* An extension of Decision Trees, it ignores the given set of features to use-
* and selects a new random subset of features at each node for use. <br>
* <br>
* The Random Decision Tree supports missing values in training and prediction.
*
* @author Edward Raff
*/
public class RandomDecisionTree extends DecisionTree
{
private static final long serialVersionUID = -809244056947507494L;
private int numFeatures;
public RandomDecisionTree()
{
this(1);
}
/**
* Creates a new Random Decision Tree
* @param numFeatures the number of random features to use
*/
public RandomDecisionTree(int numFeatures)
{
setRandomFeatureCount(numFeatures);
}
/**
* Creates a new Random Decision Tree
* @param numFeatures the number of random features to use
* @param maxDepth the maximum depth of the tree to create
* @param minSamples the minimum number of samples needed to continue branching
* @param pruningMethod the method of pruning to use after construction
* @param testProportion the proportion of the data set to put aside to use for pruning
*/
public RandomDecisionTree(int numFeatures, int maxDepth, int minSamples, TreePruner.PruningMethod pruningMethod, double testProportion)
{
super(maxDepth, minSamples, pruningMethod, testProportion);
setRandomFeatureCount(numFeatures);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public RandomDecisionTree(RandomDecisionTree toCopy)
{
super(toCopy);
this.numFeatures = toCopy.numFeatures;
}
/**
* Sets the number of random features to and use at each node of
* the decision tree
* @param numFeatures the number of random features
*/
public void setRandomFeatureCount(int numFeatures)
{
if(numFeatures < 1)
throw new IllegalArgumentException("Number of features must be positive, not " + numFeatures);
this.numFeatures = numFeatures;
}
/**
* Returns the number of random features used at each node of the tree
* @return the number of random features used at each node of the tree
*/
public int getRandomFeatureCount()
{
return numFeatures;
}
@Override
protected Node makeNodeC(ClassificationDataSet dataPoints, Set<Integer> options, int depth, boolean parallel, ModifiableCountDownLatch mcdl)
{
if(dataPoints.isEmpty())
{
mcdl.countDown();
return null;
}
final int featureCount = dataPoints.getNumFeatures();
fillWithRandomFeatures(options, featureCount);
return super.makeNodeC(dataPoints, options, depth, parallel, mcdl); //To change body of generated methods, choose Tools | Templates.
}
@Override
protected Node makeNodeR(RegressionDataSet dataPoints, Set<Integer> options, int depth, boolean parallel, ModifiableCountDownLatch mcdl)
{
if(dataPoints.isEmpty())
{
mcdl.countDown();
return null;
}
final int featureCount = dataPoints.getNumFeatures();
fillWithRandomFeatures(options, featureCount);
return super.makeNodeR(dataPoints, options, depth, parallel, mcdl); //To change body of generated methods, choose Tools | Templates.
}
private void fillWithRandomFeatures(Set<Integer> options, final int featureCount)
{
options.clear();
Random rand = RandomUtil.getRandom();
while(options.size() < numFeatures)
{
options.add(rand.nextInt(featureCount));
}
}
@Override
public RandomDecisionTree clone()
{
return new RandomDecisionTree(this);
}
}
| 4,134 | 30.564885 | 144 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/RandomForest.java | package jsat.classifiers.trees;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.classifiers.boosting.Bagging;
import jsat.classifiers.trees.ImpurityScore.ImpurityMeasure;
import jsat.math.OnLineStatistics;
import jsat.parameters.Parameterized;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.utils.FakeExecutor;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.AtomicDoubleArray;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* Random Forest is an extension of {@link Bagging} that is applied only to
* {@link DecisionTree DecisionTrees}. It works in a similar manner, but also
* only uses a random sub set of the features for each tree trained. This
* provides increased performance in accuracy of predictions, and reduced
* training time over just Bagging.<br>
* <br>
* This class supports learning and predicting with missing values.
*
* @author Edward Raff
* @see Bagging
*/
public class RandomForest implements Classifier, Regressor, Parameterized
{
//TODO implement Out of Bag estimates of proximity, importance, and outlier detection
private static final long serialVersionUID = 2725020584282958141L;
/**
* Only used when training for a classification problem
*/
private CategoricalData predicting;
private int extraSamples;
/**
* Setting the number of features to use. Default value is -1, indicating the heuristic
* of sqrt(N) or N/3 should be used for classification and regression respectively. This
* value should be set away from -1 before training work begins, and set back if it
* was not set explicitly by the used
*/
private int featureSamples;
private int maxForestSize;
private boolean useOutOfBagError = false;
private boolean useOutOfBagImportance = false;
private TreeFeatureImportanceInference importanceMeasure = new MDI();
private OnLineStatistics[] feature_importance = null;
private double outOfBagError;
private RandomDecisionTree baseLearner;
private List<DecisionTree> forest;
public RandomForest()
{
this(100);
}
public RandomForest(int maxForestSize)
{
setExtraSamples(0);
setMaxForestSize(maxForestSize);
autoFeatureSample();
baseLearner = new RandomDecisionTree(1, Integer.MAX_VALUE, 3, TreePruner.PruningMethod.NONE, 1e-15);
baseLearner.setGainMethod(ImpurityMeasure.GINI);
}
/**
* RandomForest performs Bagging. Bagging samples from the training set with replacement, and draws
* a sampleWithReplacement at least as large as the training set. This controls how many extra samples are
* taken. If negative, fewer samples will be taken. Using negative values is not recommended.
*
* @param i how many extra samples to take
*/
public void setExtraSamples(int i)
{
extraSamples = i;
}
public int getExtraSamples()
{
return extraSamples;
}
/**
* Instead of using a heuristic, the exact number of features to sample is provided.
* If equal to or larger then the number of features in one of the training data sets,
* RandomForest degrades to {@link Bagging} performed on {@link DecisionTree}.<br>
* <br>
* To re-enable the heuristic mode, call {@link #autoFeatureSample() }
*
* @param featureSamples the number of features to randomly select for each tree in the forest.
* @throws ArithmeticException if the number given is less then or equal to zero
* @see #autoFeatureSample()
* @see Bagging
*/
public void setFeatureSamples(int featureSamples)
{
if(featureSamples <= 0)
throw new ArithmeticException("A positive number of features must be given");
this.featureSamples = featureSamples;
}
/**
* Tells the class to automatically select the number of features to use. For
* classification problems, this is the square root of the number of features.
* For regression, the number of features divided by 3 is used.
*/
public void autoFeatureSample()
{
featureSamples = -1;
}
/**
* Returns true if heuristics are currently in use for the number of features, or false if the number has been specified.
* @return true if heuristics are currently in use for the number of features, or false if the number has been specified.
*/
public boolean isAutoFeatureSample()
{
return featureSamples == -1;
}
/**
* Sets the maximum number of trees to create for the forest.
* @param maxForestSize the number of base learners to train
* @throws ArithmeticException if the number specified is not a positive value
*/
public void setMaxForestSize(int maxForestSize)
{
if(maxForestSize <= 0)
throw new ArithmeticException("Must train a positive number of learners");
this.maxForestSize = maxForestSize;
}
/**
* Returns the number of rounds of boosting that will be done, which is also the number of base learners that will be trained
* @return the number of rounds of boosting that will be done, which is also the number of base learners that will be trained
*/
public int getMaxForestSize()
{
return maxForestSize;
}
/**
* Sets whether or not to compute the out of bag error during training
* @param useOutOfBagError <tt>true</tt> to compute the out of bag error, <tt>false</tt> to skip it
*/
public void setUseOutOfBagError(boolean useOutOfBagError)
{
this.useOutOfBagError = useOutOfBagError;
}
/**
* Indicates if the out of bag error rate will be computed during training
* @return <tt>true</tt> if the out of bag error will be computed, <tt>false</tt> otherwise
*/
public boolean isUseOutOfBagError()
{
return useOutOfBagError;
}
/**
* Random Forest can obtain an unbiased estimate of feature importance using
* a {@link TreeFeatureImportanceInference} method on the out-of-bag samples
* during training. Since each tree will produce a different importance
* score, we also get a set of statistics for each feature rather than just
* a single score value. These are only computed if {@link #setUseOutOfBagImportance(boolean)
* } is set to <tt>true</tt>.
* @return an array of size equal to the number of features, each
* {@link OnLineStatistics} describing the statistics for the importance of
* each feature. Numeric features start from index 0, and categorical
* features start from the index equal to the number of numeric features.
*/
public OnLineStatistics[] getFeatureImportance()
{
return feature_importance;
}
/**
* Sets whether or not to compute the out of bag importance of each feature
* during training.
*
* @param useOutOfBagImportance <tt>true</tt> to compute the out of bag
* feature importance, <tt>false</tt> to skip it
*/
public void setUseOutOfBagImportance(boolean useOutOfBagImportance)
{
this.useOutOfBagImportance = useOutOfBagImportance;
}
/**
* Indicates if the out of bag feature importance will be computed during
* training
*
* @return <tt>true</tt> if the out of bag importance will be computed,
* <tt>false</tt> otherwise
*/
public boolean isUseOutOfBagImportance()
{
return useOutOfBagImportance;
}
/**
* If {@link #isUseOutOfBagError() } is false, then this method will return
* 0 after training. Otherwise, it will return the out of bag error estimate
* after training has completed. For classification problems, this is the 0/1
* loss error rate. Regression problems return the mean squared error.
* @return the out of bag error estimate for this predictor
*/
public double getOutOfBagError()
{
return outOfBagError;
}
@Override
public CategoricalResults classify(DataPoint data)
{
if(forest == null || forest.isEmpty())
throw new RuntimeException("Classifier has not yet been trained");
else if(predicting == null)
throw new RuntimeException("Classifier has been trained for regression");
CategoricalResults totalResult = new CategoricalResults(predicting.getNumOfCategories());
for(DecisionTree tree : forest)
totalResult.incProb(tree.classify(data).mostLikely(), 1.0);
totalResult.normalize();
return totalResult;
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
this.predicting = dataSet.getPredicting();
this.forest = new ArrayList<DecisionTree>(maxForestSize);
trainStep(dataSet, parallel);
}
@Override
public boolean supportsWeightedData()
{
return true;
}
@Override
public double regress(DataPoint data)
{
if(forest == null || forest.isEmpty())
throw new RuntimeException("Classifier has not yet been trained");
else if(predicting != null)
throw new RuntimeException("Classifier has been trained for classification");
OnLineStatistics stats = new OnLineStatistics();
for(DecisionTree tree : forest)
stats.add(tree.regress(data));
return stats.getMean();
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
this.predicting = null;
this.forest = new ArrayList<DecisionTree>(maxForestSize);
trainStep(dataSet, parallel);
}
/**
* Does the actual set up and training. {@link #predicting } and {@link #forest} should be
* set up appropriately first. Everything else is handled by this and {@link LearningWorker}
*
* @param dataSet the data set, classification or regression
* @param threadPool the source of threads
*/
private void trainStep(DataSet dataSet, boolean parallel)
{
boolean autoLearners = isAutoFeatureSample();//We will need to set it back after, so remember if we need to
if(autoLearners)
baseLearner.setRandomFeatureCount(Math.max((int)Math.sqrt(dataSet.getNumFeatures()), 1));
else
baseLearner.setRandomFeatureCount(featureSamples);
int roundsToDistribut = maxForestSize;
int roundShare = roundsToDistribut / SystemInfo.LogicalCores;//The number of rounds each thread gets
int extraRounds = roundsToDistribut % SystemInfo.LogicalCores;//The number of extra rounds that need to get distributed
if(!parallel)//No point in duplicatin recources
roundShare = roundsToDistribut;//All the rounds get shoved onto one thread
ExecutorService threadPool = parallel ? ParallelUtils.CACHED_THREAD_POOL : new FakeExecutor();
//Random used for creating more random objects, faster to duplicate such a small recourse then share and lock
Random rand = RandomUtil.getRandom();
List<Future<LearningWorker>> futures = new ArrayList<>(SystemInfo.LogicalCores);
int[][] counts = null;
AtomicDoubleArray pred = null;
if(dataSet instanceof RegressionDataSet)
{
pred = new AtomicDoubleArray(dataSet.size());
counts = new int[pred.length()][1];//how many predictions are in this?
}
else
{
counts = new int[dataSet.size()][((ClassificationDataSet)dataSet).getClassSize()];
}
while (roundsToDistribut > 0)
{
int extra = (extraRounds-- > 0) ? 1 : 0;
Future<LearningWorker> future = threadPool.submit(new LearningWorker(dataSet, roundShare + extra, new Random(rand.nextInt()), counts, pred));
roundsToDistribut -= (roundShare + extra);
futures.add(future);
}
outOfBagError = 0;
try
{
List<LearningWorker> workers = ListUtils.collectFutures(futures);
for (LearningWorker worker : workers)
forest.addAll(worker.learned);
if (useOutOfBagError)
{
if (dataSet instanceof ClassificationDataSet)
{
ClassificationDataSet cds = (ClassificationDataSet) dataSet;
for (int i = 0; i < counts.length; i++)
{
int max = 0;
for (int j = 1; j < counts[i].length; j++)
if(counts[i][j] > counts[i][max])
max = j;
if(max != cds.getDataPointCategory(i))
outOfBagError++;
}
}
else
{
RegressionDataSet rds = (RegressionDataSet) dataSet;
for (int i = 0; i < counts.length; i++)
outOfBagError += Math.pow(pred.get(i)/counts[i][0]-rds.getTargetValue(i), 2);
}
outOfBagError /= dataSet.size();
}
if(useOutOfBagImportance)//collect feature importance stats from each worker
{
feature_importance = new OnLineStatistics[dataSet.getNumFeatures()];
for(int j = 0; j < dataSet.getNumFeatures(); j++)
feature_importance[j] = new OnLineStatistics();
for(LearningWorker worker : workers)
for(int j = 0; j < dataSet.getNumFeatures(); j++)
feature_importance[j].add(worker.fi[j]);
}
}
catch (Exception ex)
{
Logger.getLogger(RandomForest.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public RandomForest clone()
{
RandomForest clone = new RandomForest(maxForestSize);
clone.extraSamples = this.extraSamples;
clone.featureSamples = this.featureSamples;
if(this.predicting != null)
clone.predicting = this.predicting.clone();
if(this.forest != null)
{
clone.forest = new ArrayList<DecisionTree>(this.forest.size());
for(DecisionTree tree : this.forest)
clone.forest.add(tree.clone());
}
clone.baseLearner = this.baseLearner.clone();
clone.useOutOfBagImportance = this.useOutOfBagImportance;
clone.useOutOfBagError = this.useOutOfBagError;
if(this.feature_importance != null)
{
clone.feature_importance = new OnLineStatistics[this.feature_importance.length];
for(int i = 0; i < this.feature_importance.length; i++)
clone.feature_importance[i] = this.feature_importance[i].clone();
}
return clone;
}
private class LearningWorker implements Callable<LearningWorker>
{
int toLearn;
List<DecisionTree> learned;
DataSet dataSet;
Random random;
OnLineStatistics[] fi;
/**
* For regression: sum of predictions
*/
private AtomicDoubleArray votes;
private int[][] counts;
public LearningWorker(DataSet dataSet, int toLearn, Random random, int[][] counts, AtomicDoubleArray pred)
{
this.dataSet = dataSet;
this.toLearn = toLearn;
this.random = random;
this.learned = new ArrayList<DecisionTree>(toLearn);
if(useOutOfBagError)
{
votes = pred;
this.counts = counts;
}
if(useOutOfBagImportance)
{
this.fi = new OnLineStatistics[dataSet.getNumFeatures()];
for(int i = 0; i < fi.length; i++)
fi[i] = new OnLineStatistics();
}
}
@Override
public LearningWorker call() throws Exception
{
Set<Integer> features = new IntSet(baseLearner.getRandomFeatureCount());
int[] sampleCounts = new int[dataSet.size()];
for(int i = 0; i < toLearn; i++)
{
//Sample to get the training points
Bagging.sampleWithReplacement(sampleCounts, sampleCounts.length+extraSamples, random);
//Sample to select the feature subset
features.clear();
while(features.size() < Math.min(baseLearner.getRandomFeatureCount(), dataSet.getNumFeatures()))//The user could have specified too many
features.add(random.nextInt(dataSet.getNumFeatures()));
RandomDecisionTree learner = baseLearner.clone();
if(dataSet instanceof ClassificationDataSet)
learner.trainC(Bagging.getWeightSampledDataSet((ClassificationDataSet)dataSet, sampleCounts), features);
else //It must be regression!
learner.train(Bagging.getWeightSampledDataSet((RegressionDataSet)dataSet, sampleCounts), features);
learned.add(learner);
if(useOutOfBagError)
{
for(int j = 0; j < sampleCounts.length; j++)
{
if(sampleCounts[j] != 0)
continue;
DataPoint dp = dataSet.getDataPoint(j);
if(dataSet instanceof ClassificationDataSet)
{
int pred = learner.classify(dp).mostLikely();
synchronized(counts[j])
{
counts[j][pred]++;
}
}
else
{
votes.getAndAdd(j, learner.regress(dp));
synchronized(counts[j])
{
counts[j][0]++;
}
}
}
}
if(useOutOfBagImportance)
{
DataSet oob;
if(dataSet instanceof ClassificationDataSet)
{
ClassificationDataSet cds = (ClassificationDataSet)dataSet;
ClassificationDataSet oob_ = new ClassificationDataSet(cds.getNumNumericalVars(), cds.getCategories(), cds.getPredicting());
for(int j = 0; j < sampleCounts.length; j++)
if(sampleCounts[j] == 0)
oob_.addDataPoint(cds.getDataPoint(j), cds.getDataPointCategory(j));
oob = oob_;
}
else//regression
{
RegressionDataSet rds = (RegressionDataSet)dataSet;
RegressionDataSet oob_ = new RegressionDataSet(rds.getNumNumericalVars(), rds.getCategories());
for(int j = 0; j < sampleCounts.length; j++)
if(sampleCounts[j] == 0)
oob_.addDataPoint(rds.getDataPoint(j), rds.getTargetValue(j));
oob = oob_;
}
double[] oob_import = importanceMeasure.getImportanceStats(learner, oob);
for(int j = 0; j < fi.length; j++)
fi[j].add(oob_import[j]);
}
}
return this;
}
}
}
| 20,490 | 38.104962 | 153 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/TreeFeatureImportanceInference.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.classifiers.trees;
import java.io.Serializable;
import jsat.DataSet;
/**
* This interface exists for implementing the importance of features from tree
* based models.
*
* @author Edward Raff <[email protected]>
*/
public interface TreeFeatureImportanceInference extends Serializable
{
/**
*
* @param <Type>
* @param model the tree model to infer feature importance from
* @param data the dataset to use for importance inference. Should be either
* a Classification or Regression dataset, depending on the type of the
* model.
* @return a double array with one entry for each feature. Numeric features
* start first, followed by categorical features. Larger values indicate
* higher importance, and all values must be non-negative. Otherwise, no
* constraints are placed on the output of this function.
*/
public <Type extends DataSet> double[] getImportanceStats(TreeLearner model, DataSet<Type> data);
}
| 1,707 | 36.955556 | 101 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/TreeLearner.java | package jsat.classifiers.trees;
import java.io.Serializable;
/**
* This interface provides a contract that allows for the mutation and pruning
* of a tree using the {@link TreeNodeVisitor} and related classes.
*
* @author Edward Raff
* @see TreeNodeVisitor
* @see TreePruner
*/
public interface TreeLearner extends Serializable
{
/**
* Obtains a node visitor for the tree learner that can be used to traverse
* and predict from the learned tree
* @return the root node visitor for the learned tree
*/
public TreeNodeVisitor getTreeNodeVisitor();
}
| 589 | 25.818182 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/TreeNodeVisitor.java | package jsat.classifiers.trees;
import java.io.Serializable;
import java.util.Collection;
import java.util.List;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
/**
* Provides an abstracted mechanism for traversing and predicting from nodes in
* a tree meant for a supervised learning problem. <i>Paths</i> and <i>children
* </i> are used interchangeably, every node has one path to one child<br>
* Paths to children nodes can be disabled, but not removed. This is done so
* that the implementation does not have to worry about changes in the indices
* to children nodes, which would be complicated to implement. Once a path is
* disabled, it can not be re-enabled.<br>
* If all paths to any children have been disabled, {@link #childrenCount() }
* may choose to return 0, otherwise - it must return the original number of
* paths to children nodes.
*
* The implementation for a given tree should override
* {@link #localClassify(jsat.classifiers.DataPoint) } and
* {@link #localRegress(jsat.classifiers.DataPoint) } if the operations are
* supported.
*
* @author Edward Raff
* @see TreeLearner
* @see TreePruner
*/
public abstract class TreeNodeVisitor implements Serializable, Cloneable
{
private static final long serialVersionUID = 4026847401940409114L;
/**
* Returns the number of children this node of the tree has, and may return
* a non zero value even if the node is a leaf
* @return the number of children this node has
*/
abstract public int childrenCount();
/**
* Returns true if the node is a leaf, meaning it has no valid paths to any
* children
*
* @return <tt>true</tt> if the node is purely a leaf node
*/
abstract public boolean isLeaf();
/**
* Returns the node for the specific child, or null if the child index was
* not valid
* @param child the index of the child node to obtain
* @return the node for the child
*/
abstract public TreeNodeVisitor getChild(int child);
/**
* Disables the selected path to the specified child from the current node.
* All child indices will not be effected by this operation.
*
* @param child the index of the child to disable the path too
*/
abstract public void disablePath(int child);
/**
* Optional operation!<br>
* This method, if supported, will set the path so that the child is set to the given value.
* <br>
* The implementation may choose to throw an exception if the NodeVisitor is not of the same
* implementing class.
* @param child the child path
* @param node the node to make the child
*/
public void setPath(int child, TreeNodeVisitor node)
{
throw new UnsupportedOperationException("setPath is an optional operation.");
}
/**
* Returns true if the path to the specified child is disabled, meaning it
* can not be traveled to. It will also return true for an invalid child
* path, since a non existent node can not be reached.
*
* @param child the child index to check the path for
* @return <tt>true</tt> if the path is unavailable, <tt>false</tt> if the
* path is good.
*/
abstract public boolean isPathDisabled(int child);
/**
* Returns the classification result that would have been obtained if the
* current node was a leaf node.
*
* @param dp the data point to localClassify
* @return the classification result
* @throws UnsupportedOperationException if the tree node does not support
* or was not trained for classification
*/
public CategoricalResults localClassify(DataPoint dp)
{
throw new UnsupportedOperationException("This tree does not support classification");
}
/**
* Returns the path down the tree the given data point would have taken, or
* -1 if this node was a leaf node OR if a missing value prevent traversal
* down the path
* @param dp the data point to send down the tree
* @return the path that would be taken
*/
abstract public int getPath(DataPoint dp);
/**
* Returns the relative weight of each path, which should be an indication
* of how much of the training data went down each path. By default, returns 1.0/{@link #childrenCount()
* }. The result should sum to one
*
* @param path the path to select
* @return the fraction of data estimated to travel the specified path, with
* respect to data that reaches this node.
*/
public double getPathWeight(int path)
{
return 1.0/childrenCount();
}
public CategoricalResults classify(DataPoint dp)
{
TreeNodeVisitor node = this;
while(!node.isLeaf())
{
int path = node.getPath(dp);
if(path < 0 )//missing value case
{
double sum = 0;
DenseVector resultSum = null;
for(int child = 0; child < childrenCount(); child++)
{
if(node.isPathDisabled(child))
continue;
CategoricalResults child_result = node.getChild(child).classify(dp);
if(resultSum == null)
resultSum = new DenseVector(child_result.size());
sum += node.getPathWeight(child);
resultSum.mutableAdd(node.getPathWeight(child), child_result.getVecView());
}
if(resultSum == null)//all paths disabled?
break;//break out and do local classify
if(sum < 1.0-1e-5)//re-normalize our result
resultSum.mutableDivide(sum+1e-6);
return new CategoricalResults(resultSum.arrayCopy());
}
if(node.isPathDisabled(path))//return local classification dec
break;
node = node.getChild(path);
}
return node.localClassify(dp);
}
/**
* Returns the regression result that would have been obtained if the
* current node was a leaf node.
*
* @param dp the data point to regress
* @return the classification result
* @throws UnsupportedOperationException if the tree node does not support
* or was not trained for classification
*/
public double localRegress(DataPoint dp)
{
throw new UnsupportedOperationException("This tree does not support classification");
}
/**
* Performs regression on the given data point by following it down the tree
* until it finds the correct terminal node.
*
* @param dp the data point to regress
* @return the regression result from the tree starting from the current node
*/
public double regress(DataPoint dp)
{
TreeNodeVisitor node = this;
while(!node.isLeaf())
{
int path = node.getPath(dp);
if(path < 0 )//missing value case
{
double sum = 0;
double resultSum = 0;
for(int child = 0; child < childrenCount(); child++)
{
if(node.isPathDisabled(child))
continue;
double child_result = node.getChild(child).regress(dp);
sum += node.getPathWeight(child);
resultSum += node.getPathWeight(child)*child_result;
}
if(sum == 0)//all paths disabled?
break;//break out and do local classify
if(sum < 1.0-1e-5)//re-normalize our result
resultSum /= (sum+1e-6);
return resultSum;
}
if(node.isPathDisabled(path))//if missing value makes path < 0, return local regression dec
break;
node = node.getChild(path);
}
return node.localRegress(dp);
}
/**
* Returns a collection of the indices of the features used by this node in
* the tree to make its decision about what branch to use next. Numeric
* features start from zero, and categorical features start from the number
* of numeric features.
*
* @return the integers indicating which features were used for this node in
* the tree.
*/
abstract public Collection<Integer> featuresUsed();
@Override
abstract public TreeNodeVisitor clone();
}
| 8,669 | 36.37069 | 108 | java |
JSAT | JSAT-master/JSAT/src/jsat/classifiers/trees/TreePruner.java | package jsat.classifiers.trees;
import java.util.ArrayList;
import java.util.List;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
import jsat.classifiers.DataPointPair;
import jsat.math.SpecialMath;
import jsat.utils.IntList;
/**
* Provides post-pruning algorithms for any decision tree that can be altered
* using the {@link TreeNodeVisitor}. Pruning is done with a held out testing
* set
* <br>
* All pruning methods handle missing values
* <br>
* NOTE: API still under work, expect changes
*
* @author Edward Raff
*/
public class TreePruner
{
private TreePruner()
{
}
/**
* The method of pruning to use
*/
public static enum PruningMethod
{
/**
* The tree will be left as generated, no pruning will occur.
*/
NONE,
/**
* The tree will be pruned in a bottom up fashion, removing
* leaf nodes if the removal does not reduce the accuracy on the testing
* set
*/
REDUCED_ERROR,
/**
* Bottom-Up pessimistic pruning using Error based Pruning from the
* C4.5 algorithm. If the node visitor supports
* {@link TreeNodeVisitor#setPath(int, jsat.classifiers.trees.TreeNodeVisitor) }
* it will perform sub tree replacement for the maximal sub tree. <br>
* The default Confidence (CF) is 0.25, as used in the C4.5 algorithm.<br>
* <br>
* NOTE: For the one case where the root would be pruned by taking the sub tree
* with the most nodes, this implementation will not perform that step. However,
* this is incredibly rare - and otherwise performs the same.
*/
ERROR_BASED
};
/**
* Performs pruning starting from the root node of a tree
* @param root the root node of a decision tree
* @param method the pruning method to use
* @param testSet the test set of data points to use for pruning
*/
public static void prune(TreeNodeVisitor root, PruningMethod method, ClassificationDataSet testSet)
{
//TODO add vargs for extra arguments that may be used by pruning methods
if(method == PruningMethod.NONE )
return;
else if(method == PruningMethod.REDUCED_ERROR)
pruneReduceError(null, -1, root, testSet);
else if(method == PruningMethod.ERROR_BASED)
pruneErrorBased(null, -1, root, testSet, 0.25);
else
throw new RuntimeException("BUG: please report");
}
/**
* Performs pruning to reduce error on the testing set
* @param parent the parent of the current node, may be null
* @param pathFollowed the path from the parent that lead to the current node
* @param current the current node being considered
* @param testSet the set of testing points to apply to this node
* @return the number of nodes pruned from the tree
*/
private static int pruneReduceError(TreeNodeVisitor parent, int pathFollowed, TreeNodeVisitor current, ClassificationDataSet testSet)
{
if(current == null)
return 0;
int nodesPruned = 0;
//If we are not a leaf, prune our children
if(!current.isLeaf())
{
//Each child should only be given testing points that would decend down that path
int numSplits = current.childrenCount();
List<ClassificationDataSet> splits = new ArrayList<>(numSplits);
IntList hadMissing = new IntList();
double[] fracs = new double[numSplits];
double wSum = 0;
for (int i = 0; i < numSplits; i++)
splits.add(testSet.emptyClone());
for(int i = 0; i < testSet.size(); i++)
{
double w_i = testSet.getWeight(i);
int path = current.getPath(testSet.getDataPoint(i));
if(path >= 0)
{
splits.get(path).addDataPoint(testSet.getDataPoint(i), testSet.getDataPointCategory(i), w_i);
wSum += w_i;
fracs[path] += w_i;
}
else//missing value
hadMissing.add(i);
}
//normalize fracs
for(int i = 0; i < numSplits; i++)
fracs[i] /= wSum+1e-15;
if(!hadMissing.isEmpty())
DecisionStump.distributMissing(splits, fracs, testSet, hadMissing);
for (int i = numSplits - 1; i >= 0; i--)//Go backwards so child removals dont affect indices
nodesPruned += pruneReduceError(current, i, current.getChild(i), splits.get(i));
}
//If we pruned all our children, we may have become a leaf! Should we prune ourselves?
if(current.isLeaf() && parent != null)//Compare this nodes accuracy vs its parrent
{
double childCorrect = 0;
double parrentCorrect = 0;
for(int i = 0; i < testSet.size(); i++)
{
DataPoint dp = testSet.getDataPoint(i);
int truth = testSet.getDataPointCategory(i);
if(current.localClassify(dp).mostLikely() == truth)
childCorrect += testSet.getWeight(i);
if(parent.localClassify(dp).mostLikely() == truth)
parrentCorrect += testSet.getWeight(i);
}
if(parrentCorrect >= childCorrect)//We use >= b/c if they are the same, we assume smaller trees are better
{
parent.disablePath(pathFollowed);
return nodesPruned+1;//We prune our children and ourselves
}
return nodesPruned;
}
return nodesPruned;
}
/**
*
* @param parent the parent node, or null if there is no parent
* @param pathFollowed the path from the parent node to the current node
* @param current the current node to evaluate
* @param testSet the set of points to estimate error from
* @param alpha the Confidence
* @return expected upperbound on errors
*/
private static double pruneErrorBased(TreeNodeVisitor parent, int pathFollowed, TreeNodeVisitor current, ClassificationDataSet testSet, double alpha)
{
//TODO this does a lot of redundant computation. Re-write this code to keep track of where datapoints came from to avoid redudancy.
if(current == null || testSet.isEmpty())
return 0;
else if(current.isLeaf())//return number of errors
{
int errors = 0;
double N = 0;
for(int i = 0; i < testSet.size(); i++)
{
if (current.localClassify(testSet.getDataPoint(i)).mostLikely() != testSet.getDataPointCategory(i))
errors += testSet.getWeight(i);
N += testSet.getWeight(i);
}
return computeBinomialUpperBound(N, alpha, errors);
}
List<ClassificationDataSet> splitSet = new ArrayList<>(current.childrenCount());
IntList hadMissing = new IntList();
for(int i = 0; i < current.childrenCount(); i++)
splitSet.add(testSet.emptyClone());
int localErrors = 0;
double subTreeScore = 0;
double N = 0.0;
double N_missing = 0.0;
double[] fracs =new double[splitSet.size()];
for(int i = 0; i < testSet.size(); i++)
{
DataPoint dp = testSet.getDataPoint(i);
int y_i = testSet.getDataPointCategory(i);
double w_i = testSet.getWeight(i);
if(current.localClassify(dp).mostLikely() != y_i)
localErrors+=w_i;
int path = current.getPath(dp);
if(path >= 0)
{
N += w_i;
splitSet.get(path).addDataPoint(dp, y_i, w_i);
fracs[path] += w_i;
}
else
{
hadMissing.add(i);
N_missing += w_i;
}
}
for(int i = 0; i < fracs.length; i++)
fracs[i] /= N;
if(!hadMissing.isEmpty())
DecisionStump.distributMissing(splitSet, fracs, testSet, hadMissing);
//Find child wich gets the most of the test set as the candidate for sub-tree replacement
int maxChildCount = 0;
int maxChild = -1;
for(int path = 0; path < splitSet.size(); path++)
if(!current.isPathDisabled(path))
{
subTreeScore += pruneErrorBased(current, path, current.getChild(path), splitSet.get(path), alpha);
if(maxChildCount < splitSet.get(path).size())
{
maxChildCount = splitSet.get(path).size();
maxChild = path;
}
}
/* Original uses normal approximation of p + Z_alpha * sqrt(p (1-p) / n).
* Instead, just compute exact using inverse beta
* Upper Bound = 1.0 - BetaInv(alpha, n-k, k+1)
*/
final double prunedTreeScore = computeBinomialUpperBound(N+N_missing, alpha, localErrors);
double maxChildTreeScore;
if(maxChild == -1)
maxChildTreeScore = Double.POSITIVE_INFINITY;
else
{
TreeNodeVisitor maxChildNode = current.getChild(maxChild);
int otherE = 0;
for (int path = 0; path < splitSet.size(); path++)
{
ClassificationDataSet split = splitSet.get(path);
for(int i = 0; i < split.size(); i++)
if (maxChildNode.classify(split.getDataPoint(i)).mostLikely() != split.getDataPointCategory(i))
otherE+=split.getWeight(i);
}
maxChildTreeScore = computeBinomialUpperBound(N+N_missing, alpha, otherE);
}
if(maxChildTreeScore < prunedTreeScore && maxChildTreeScore < subTreeScore && parent != null)
{
try//NodeVisitor may not support setPath method, which is optional
{
parent.setPath(pathFollowed, current.getChild(maxChild));
return maxChildTreeScore;
}
catch(UnsupportedOperationException ex)
{
//fall out to others, this is ok
}
}
//MaxChildTreeScore is not the min, or it was not supported - so we do not compare against it any more
if(prunedTreeScore < subTreeScore )
{
for(int i = 0; i < current.childrenCount(); i++)
current.disablePath(i);
return prunedTreeScore;
}
else//no change
return subTreeScore;
}
private static double computeBinomialUpperBound(final double N, double alpha, double errors)
{
return N * (1.0 - SpecialMath.invBetaIncReg(alpha, N - errors+1e-9, errors + 1.0));
}
}
| 10,870 | 35.850847 | 153 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/BayesianHAC.java | /*
* This code was contributed under the Public Domain
*/
package jsat.clustering;
import java.util.List;
import jsat.DataSet;
import jsat.linear.ConstantVector;
import jsat.linear.DenseVector;
import jsat.linear.MatrixStatistics;
import jsat.linear.Vec;
import jsat.utils.IntList;
import static jsat.math.SpecialMath.*;
import static java.lang.Math.log;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.Stack;
import java.util.stream.Collectors;
import jsat.distributions.Distribution;
import jsat.distributions.discrete.Binomial;
import jsat.distributions.multivariate.IndependentDistribution;
import jsat.distributions.multivariate.MultivariateDistribution;
import jsat.distributions.multivariate.NormalM;
import jsat.linear.CholeskyDecomposition;
import jsat.linear.DenseMatrix;
import jsat.linear.Matrix;
import jsat.linear.SingularValueDecomposition;
import jsat.math.OnLineStatistics;
/**
*
* @author Edward Raff
*/
public class BayesianHAC implements Clusterer
{
private double alpha_prior = 1.0;
private Distributions dist = Distributions.BERNOULLI_BETA;
/**
* After clustering, one possibility is to interpret each found cluster as
* its own separate distribution. This list stores the results of that
* interpretation.
*/
protected List<MultivariateDistribution> cluster_dists;
static public enum Distributions
{
BERNOULLI_BETA
{
@Override
public Node init(int point, double alpha_prior, List<Vec> data)
{
return new BernoulliBetaNode(point, alpha_prior, data);
}
},
GAUSSIAN_DIAG
{
@Override
public Node init(int point, double alpha_prior, List<Vec> data)
{
return new NormalDiagNode(point, alpha_prior, data);
}
},
GAUSSIAN_FULL
{
@Override
public Node init(int point, double alpha_prior, List<Vec> data)
{
return new NormalNode(point, alpha_prior, data);
}
};
abstract public Node init(int point, double alpha_prior, List<Vec> data);
}
public BayesianHAC()
{
this(Distributions.GAUSSIAN_DIAG);
}
public BayesianHAC(Distributions dist)
{
this.dist = dist;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public BayesianHAC(BayesianHAC toCopy)
{
this.alpha_prior = toCopy.alpha_prior;
this.dist = toCopy.dist;
if(toCopy.cluster_dists != null)
this.cluster_dists = toCopy.cluster_dists.stream()
.map(MultivariateDistribution::clone).collect(Collectors.toList());
}
/**
* Computes log(exp(a)+exp(b)) in an accurate manner
* @param log_a
* @param log_b
* @return
*/
static double log_exp_sum(double log_a, double log_b)
{
if(log_b > log_a)
return log_exp_sum(log_b, log_a);
//if a > b, use identify
//log(a+b) = log(a) + log(1 + a/b)
//left hand side, since we do exp first, we get
//log(exp(a)) = a, so nothing changes
//right hand side, log(1+ exp(b)/exp(a)) = log(exp(b-a)+ 1)
return log_a + Math.log1p(Math.exp(log_b-log_a));
}
protected static interface DistPrior
{
};
protected static abstract class Node<Distribution extends Node, HyperParams extends DistPrior>
{
int owned;
IntList allChilds;
double log_d;
double log_pi;
/**
* Stores the value of p(D_k | T_k), assuming this current node is (D_k | T_k)
*/
double log_pdt;
Distribution left_child;
Distribution right_child;
/**
* How many data points belong to this node (inclusive) .
*/
int size;
public Node(int single_point, double alpha_prior) //used for base case init
{
this.owned = single_point;
this.allChilds = IntList.view(new int[]{single_point});
this.log_pdt = 1;
this.size = 1;
//initialize each leaf i to have d_i = α, π_i = 1
this.log_d = log(alpha_prior);
this.log_pi = log(1.0);
}
public Node(Distribution a, Distribution b, double alpha_prior) //MERGE THE NODES
{
this.owned = -1;
this.log_pdt = Double.NaN;
this.size = a.size + b.size;
this.allChilds = new IntList(a.allChilds);
this.allChilds.addAll(b.allChilds);
Collections.sort(allChilds);
//Figure 3. of paper for equations
// double tmp = alpha_prior * gamma(this.size);
// this.d = tmp + a.log_d * b.log_d;
// this.pi = tmp/this.log_d;
double tmp = log(alpha_prior) + lnGamma(this.size);
this.log_d = log_exp_sum(tmp, a.log_d+b.log_d);
this.log_pi = tmp - this.log_d;
this.left_child = a;
this.right_child = b;
}
public double logR(List<Vec> dataset, HyperParams priors)
{
if(this.size == 1)
{
//get this computed for future please
this.log_pdt = this.log_null(dataset, priors);
return 1.0;
}
// double log_pi = log(this.log_pi);
double log_numer = log_pi+this.log_null(dataset, priors);
//rhight hand side of equation 2
double log_neg_pi = log(-Math.expm1(log_pi));
double log_rhs = log_neg_pi+ left_child.log_pdt + right_child.log_pdt;
this.log_pdt = log_exp_sum(log_numer, log_rhs);
// return Math.exp(log_numer-this.log_pdt);
return log_numer-this.log_pdt;
}
abstract public Distribution merge(Distribution a, Distribution b, double alpha_prior);
abstract public HyperParams computeInitialPrior(List<Vec> dataset);
/**
* Interpreting the current node as a cluster, this method should return
* a multivariate distribution object that summarizes the content of
* this node, ignoring the rest of the tree.
*
* @param dataset the original training dataset in the original order
* @return a distribution object representing this node.
*/
abstract public MultivariateDistribution toDistribution(List<Vec> dataset);
public boolean isLeaf()
{
return right_child == null && left_child == null;
}
/**
* Computes the log(p(D|H_1)) for the current distribution assumption
* @param dataset
* @param priors
* @return
*/
abstract public double log_null(List<Vec> dataset, HyperParams priors);
public Iterator<Integer> indxIter()
{
Stack<Node> remains = new Stack<>();
remains.push(this);
return new Iterator<Integer>()
{
@Override
public boolean hasNext()
{
while(!remains.isEmpty() && !remains.peek().isLeaf())
{
Node c = remains.pop();
remains.push(c.left_child);
remains.push(c.right_child);
}
return !remains.empty();
}
@Override
public Integer next()
{
Node c = remains.pop();
return c.owned;
}
};
}
public List<Integer> ownedList()
{
IntList a = new IntList(this.size);
Iterator<Integer> iter = this.indxIter();
while(iter.hasNext())
a.add(iter.next());
return a;
}
}
protected static class BetaConjugate implements DistPrior
{
public Vec alpha_prior;
public Vec beta_prior;
public BetaConjugate(List<Vec> dataset)
{
int d = dataset.get(0).length();
Vec mean = MatrixStatistics.meanVector(dataset);
alpha_prior = mean.multiply(2).add(1e-3);
beta_prior = new DenseVector(new ConstantVector(1, d)).subtract(mean).multiply(2).add(1e-3);
}
}
protected static class WishartDiag implements DistPrior
{
/**
* v is the degree of freedom
*/
double v;
/**
* r is scaling factor on the prior precision of the mean,
*/
double r;
/**
* m which is the prior on the mean
*/
Vec m;
/**
* S is the prior on the precision matrix.
* In our case, S is the diag of it.
*
*/
Vec S;
double log_shared_term;
public WishartDiag(List<Vec> dataset)
{
int N = dataset.size();
int k = dataset.get(0).length();
v = k;
r = 0.001;
m = new DenseVector(k);
MatrixStatistics.meanVector(m, dataset);
S = new DenseVector(k);
MatrixStatistics.covarianceDiag(m, S, dataset);
S.mutableDivide(20);
//Lets get the last term with the prod in it first b/c it contains
//many additions and subtractions
log_shared_term = 0;
double log_det_S = 0;
for(int i = 0; i < k; i++)
log_det_S += log(S.get(i));
log_shared_term += v/2*log_det_S;
}
}
protected static class WishartFull implements DistPrior
{
/**
* v is the degree of freedom
*/
double v;
/**
* r is scaling factor on the prior precision of the mean,
*/
double r;
/**
* m which is the prior on the mean
*/
Vec m;
/**
* S is the prior on the precision matrix.
* In our case, S is the diag of it.
*
*/
Matrix S;
double log_shared_term;
public WishartFull(List<Vec> dataset)
{
int N = dataset.size();
int k = dataset.get(0).length();
v = k;
r = 0.001;
m = new DenseVector(k);
MatrixStatistics.meanVector(m, dataset);
S = new DenseMatrix(k, k);
MatrixStatistics.covarianceMatrix(m, S, dataset);
SingularValueDecomposition svd = new SingularValueDecomposition(S.clone());
if(svd.isFullRank())
{
S.mutableMultiply(1.0/20);
}
else
{
OnLineStatistics var = new OnLineStatistics();
for(Vec v : dataset)
for(int i = 0; i < v.length(); i++)
var.add(v.get(i));
for(int i = 0; i < S.rows(); i++)
S.increment(i, i, 0.1*S.get(i, i) + var.getVarance());
}
//Lets get the last term with the prod in it first b/c it contains
//many additions and subtractions
log_shared_term = 0;
CholeskyDecomposition cd = new CholeskyDecomposition(S.clone());
double log_det_S = cd.getLogDet();
log_shared_term += v/2*log_det_S;
}
}
protected static class BernoulliBetaNode extends Node<BernoulliBetaNode, BetaConjugate>
{
public Vec m;
public BernoulliBetaNode(int single_point, double alpha_prior, List<Vec> dataset)
{
super(single_point, alpha_prior);
this.m = dataset.get(single_point);
}
public BernoulliBetaNode(BernoulliBetaNode a, BernoulliBetaNode b, double alpha_prior)
{
super(a, b, alpha_prior);
this.m = a.m.add(b.m);
}
@Override
public BetaConjugate computeInitialPrior(List<Vec> dataset)
{
return new BetaConjugate(dataset);
}
@Override
public double log_null(List<Vec> dataset, BetaConjugate priors)
{
Vec alpha = priors.alpha_prior;
Vec beta = priors.beta_prior;
int N = this.size;
int D = dataset.get(0).length();
double log_prob = 0;
for(int d = 0; d < D; d++)
{
double a_d = alpha.get(d);
double b_d = beta.get(d);
double m_d = this.m.get(d);
double log_numer = lnGamma(a_d + b_d) + lnGamma(a_d + m_d) + lnGamma(b_d + N - m_d);
double log_denom = lnGamma(a_d) + lnGamma(b_d) + lnGamma(a_d + b_d + N);
log_prob += (log_numer - log_denom);
}
return log_prob;
}
@Override
public BernoulliBetaNode merge(BernoulliBetaNode a, BernoulliBetaNode b, double alpha_prior)
{
return new BernoulliBetaNode(a, b, alpha_prior);
}
@Override
public MultivariateDistribution toDistribution(List<Vec> dataset)
{
//TODO add Bernoulli option and use that. But Binomial with 1 trial is equivalent
List<Distribution> dists = new ArrayList<>();
double N = this.size;
for(int i = 0; i < m.length(); i++)
dists.add(new Binomial(1, m.get(i)/N));
return new IndependentDistribution(dists);
}
}
protected static class NormalDiagNode extends Node<NormalDiagNode, WishartDiag>
{
/**
* X X^T term is really
* X^T X for row, col format like us.
* In diag case, diag(X^T X)_j = \sum_i X_ij^2
*/
Vec XT_X;
Vec x_sum;
public NormalDiagNode(int single_point, double alpha_prior, List<Vec> dataset)
{
super(single_point, alpha_prior);
Vec x_i = dataset.get(single_point);
this.XT_X = x_i.pairwiseMultiply(x_i);
this.x_sum = x_i;
}
public NormalDiagNode(NormalDiagNode a, NormalDiagNode b, double alpha_prior)
{
super(a, b, alpha_prior);
this.XT_X= a.XT_X.add(b.XT_X);
this.x_sum = a.x_sum.add(b.x_sum);
}
@Override
public NormalDiagNode merge(NormalDiagNode a, NormalDiagNode b, double alpha_prior)
{
NormalDiagNode node = new NormalDiagNode(a, b, alpha_prior);
return node;
}
@Override
public WishartDiag computeInitialPrior(List<Vec> dataset)
{
return new WishartDiag(dataset);
}
@Override
public MultivariateDistribution toDistribution(List<Vec> dataset)
{
List<Integer> ids = this.ownedList();
Vec mean = new DenseVector(dataset.get(0).length());
MatrixStatistics.meanVector(mean, dataset, ids);
Vec cov = new DenseVector(mean.length());
MatrixStatistics.covarianceDiag(mean, cov, dataset, ids);
return new NormalM(mean, cov);
}
@Override
public double log_null(List<Vec> dataset, WishartDiag priors)
{
int N = this.size;
double r = priors.r;
int k = priors.m.length();
double v = priors.v;
//init with first two terms
Vec S_prime = priors.S.add(this.XT_X);
//
// m m^T is the outer-product, but we are just diag,
//so diag(m m^T)_j = m_j^2
Vec mm = priors.m.pairwiseMultiply(priors.m);
S_prime.mutableAdd(r*N/(N+r), mm);
// diag((\sum x) (\sum x)^T )_i = (\sum x)_i^2
Vec xsum_xsum = x_sum.pairwiseMultiply(x_sum);
S_prime.mutableAdd(-1/(N+r), xsum_xsum);
//diag((m * xsum^T + xsum * m^T))_i = m_i * xsum_i * 2
Vec mxsum = priors.m.pairwiseMultiply(x_sum).multiply(2);
S_prime.mutableAdd(-r/(N+r), mxsum);
double v_p = priors.v + N;
double log_det_S_p = 0;
for(int i = 0; i < S_prime.length(); i++)
log_det_S_p += log(S_prime.get(i));
double log_prob = priors.log_shared_term + -v_p/2*log_det_S_p;
for(int j = 1; j <= k; j++)
log_prob += lnGamma((v_p+1-j)/2) - lnGamma((v+1-j)/2);
log_prob += v_p*k/2.0*log(2) - v*k/2.0*log(2);
log_prob += -N*k/2.0*log(2*Math.PI);
log_prob += k/2.0 * (log(r) - log(N+r));
return log_prob;
}
}
protected static class NormalNode extends Node<NormalNode, WishartFull>
{
/**
* X^T X for row, col format like us.
* For incremental updates of X^T X, when we add a new row z, it becomes
* X^T X + z^T z, so we just add the outer product update to X^T X
*
*/
Matrix XT_X;
Vec x_sum;
public NormalNode(int single_point, double alpha_prior, List<Vec> dataset)
{
super(single_point, alpha_prior);
Vec x_i = dataset.get(single_point);
this.XT_X = new DenseMatrix(x_i.length(), x_i.length());
Matrix.OuterProductUpdate(XT_X, x_i, x_i, 1.0);
this.x_sum = x_i;
}
public NormalNode(NormalNode a, NormalNode b, double alpha_prior)
{
super(a, b, alpha_prior);
this.XT_X= a.XT_X.add(b.XT_X);
this.x_sum = a.x_sum.add(b.x_sum);
}
@Override
public NormalNode merge(NormalNode a, NormalNode b, double alpha_prior)
{
NormalNode node = new NormalNode(a, b, alpha_prior);
return node;
}
@Override
public WishartFull computeInitialPrior(List<Vec> dataset)
{
return new WishartFull(dataset);
}
@Override
public MultivariateDistribution toDistribution(List<Vec> dataset)
{
List<Integer> ids = this.ownedList();
Vec mean = new DenseVector(dataset.get(0).length());
MatrixStatistics.meanVector(mean, dataset, ids);
Matrix cov = new DenseMatrix(mean.length(), mean.length());
MatrixStatistics.covarianceMatrix(mean, cov, dataset, ids);
return new NormalM(mean, cov);
}
@Override
public double log_null(List<Vec> dataset, WishartFull priors)
{
int N = this.size;
double r = priors.r;
int k = priors.m.length();
double v = priors.v;
//init with first two terms
Matrix S_prime = priors.S.add(this.XT_X);
//
// m m^T is the outer-product,
Matrix.OuterProductUpdate(S_prime, priors.m, priors.m, r*N/(N+r));
//4th term, outer product update of row sums
Matrix.OuterProductUpdate(S_prime, x_sum, x_sum, -1/(N+r));
//-r/(N+r) (m * xsum^T + xsum * m^T), lets break it out into two outer
//product updates,
Matrix.OuterProductUpdate(S_prime, priors.m, x_sum, -r/(N+r));
Matrix.OuterProductUpdate(S_prime, x_sum, priors.m, -r/(N+r));
double v_p = priors.v + N;
CholeskyDecomposition cd = new CholeskyDecomposition(S_prime);
double log_det_S_p = cd.getLogDet();
double log_prob = priors.log_shared_term + -v_p/2*log_det_S_p;
for(int j = 1; j <= k; j++)
log_prob += lnGamma((v_p+1-j)/2) - lnGamma((v+1-j)/2);
log_prob += v_p*k/2.0*log(2) - v*k/2.0*log(2);
log_prob += -N*k/2.0*log(2*Math.PI);
log_prob += k/2.0 * (log(r) - log(N+r));
return log_prob;
}
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
List<Vec> data = dataSet.getDataVectors();
if(designations == null)
designations = new int[data.size()];
DistPrior priors = null;
List<Node> current_nodes = new ArrayList<>();
for(int i = 0; i < data.size(); i++)
{
Node n = dist.init(i, alpha_prior, data);
if(priors == null)
priors = n.computeInitialPrior(data);
n.logR(data, priors);
current_nodes.add(n);
}
while(current_nodes.size() > 1)
{
double best_r = Double.NEGATIVE_INFINITY;
int best_i = -1, best_j = -1;
Node best_merged = null;
for(int i = 0; i < current_nodes.size(); i++)
{
Node D_i = current_nodes.get(i);
for(int j = i+1; j < current_nodes.size(); j++)
{
Node D_j = current_nodes.get(j);
Node merged = D_i.merge(D_i, D_j, alpha_prior);
double log_r = merged.logR(data, priors);
// System.out.println("\t" + log_r + "," + D_i.allChilds + "," + D_j.allChilds);
if(log_r > best_r)
{
best_i = i;
best_j = j;
best_merged = merged;
best_r = log_r;
}
}
}
// System.out.println(Math.exp(best_r) + " merge " + current_nodes.get(best_i).allChilds + " " + current_nodes.get(best_j).allChilds + " | " + best_merged.log_pi);
if(best_r > log(0.5))
{
current_nodes.remove(best_j);
current_nodes.remove(best_i);
current_nodes.add(best_merged);
}
else
break;
}
// System.out.println("C: " + current_nodes.size());
this.cluster_dists = new ArrayList<>(current_nodes.size());
for(int class_id = 0; class_id < current_nodes.size(); class_id++)
{
List<Integer> owned = current_nodes.get(class_id).ownedList();
// System.out.println(current_nodes.get(class_id).size);
// System.out.print(class_id + ":");
for(int pos : owned)
designations[pos] = class_id;
// System.out.println();
this.cluster_dists.add(current_nodes.get(class_id).toDistribution(data));
}
return designations;
}
public List<MultivariateDistribution> getClusterDistributions()
{
return cluster_dists;
}
@Override
public BayesianHAC clone()
{
return this;
}
}
| 24,191 | 30.377432 | 174 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/CLARA.java |
package jsat.clustering;
import java.util.*;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.SeedSelectionMethods.SeedSelection;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.utils.DoubleList;
import jsat.utils.random.RandomUtil;
/**
*
* @author Edward Raff
*/
public class CLARA extends PAM
{
private static final long serialVersionUID = 174392533688953706L;
/**
* The number of samples to take
*/
private int sampleSize;
/**
* The number of times to do sampling
*/
private int sampleCount;
private boolean autoSampleSize;
public CLARA(int sampleSize, int sampleCount, DistanceMetric dm, Random rand, SeedSelection seedSelection)
{
super(dm, rand, seedSelection);
this.sampleSize = sampleSize;
this.sampleCount = sampleCount;
this.autoSampleSize = false;
}
public CLARA(int sampleCount, DistanceMetric dm, Random rand, SeedSelection seedSelection)
{
super(dm, rand, seedSelection);
this.sampleSize = -1;
this.sampleCount = sampleCount;
this.autoSampleSize = true;
}
public CLARA(DistanceMetric dm, Random rand, SeedSelection seedSelection)
{
this(5, dm, rand, seedSelection);
}
public CLARA(DistanceMetric dm, Random rand)
{
this(dm, rand, SeedSelection.KPP);
}
public CLARA(DistanceMetric dm)
{
this(dm, RandomUtil.getRandom());
}
public CLARA()
{
this(new EuclideanDistance());
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public CLARA(CLARA toCopy)
{
super(toCopy);
this.sampleSize = toCopy.sampleSize;
this.sampleCount = toCopy.sampleCount;
this.autoSampleSize = toCopy.autoSampleSize;
}
/**
*
* @return the number of times {@link PAM} will be applied to a sample from the data set.
*/
public int getSampleCount()
{
return sampleCount;
}
/**
* Sets the number of times {@link PAM} will be applied to different samples from the data set.
*
* @param sampleCount the number of times to apply sampeling.
*/
public void setSampleCount(int sampleCount)
{
this.sampleCount = sampleCount;
}
/**
*
* @return the number of samples that will be taken to perform {@link PAM} on.
*/
public int getSampleSize()
{
return sampleSize;
}
/**
* Sets the number of samples CLARA should take from the data set to perform {@link PAM} on.
*
* @param sampleSize the number of samples to take
*/
public void setSampleSize(int sampleSize)
{
if(sampleSize >= 0)
{
autoSampleSize = false;
this.sampleSize = sampleSize;
}
else
autoSampleSize = true;
}
@Override
protected double cluster(DataSet data, boolean doInit, int[] medioids, int[] assignments, List<Double> cacheAccel, boolean parallel)
{
int k = medioids.length;
int[] bestMedoids = new int[medioids.length];
int[] bestAssignments = new int[assignments.length];
double bestMedoidsDist = Double.MAX_VALUE;
List<Vec> X = data.getDataVectors();
if(sampleSize >= data.size())//Then we might as well just do one round of PAM
{
return super.cluster(data, true, medioids, assignments, cacheAccel, parallel);
}
else if(doInit)
{
TrainableDistanceMetric.trainIfNeeded(dm, data);
cacheAccel = dm.getAccelerationCache(X);
}
int sampSize = autoSampleSize ? 40+2*k : sampleSize;
int[] sampleAssignments = new int[sampSize];
List<DataPoint> sample = new ArrayList<>(sampSize);
/**
* We need the mapping to be able to go from the sample indicies back to their position in the full data set
* Key is the sample index [1, 2, 3, ..., sampSize]
* Value is the coresponding index in the full data set
*/
Map<Integer, Integer> samplePoints = new LinkedHashMap<>();
DoubleList subCache = new DoubleList(sampSize);
for(int i = 0; i < sampleCount; i++)
{
//Take a sample and use PAM on it to get medoids
samplePoints.clear();
sample.clear();
subCache.clear();
while (samplePoints.size() < sampSize)
{
int indx = rand.nextInt(data.size());
if (!samplePoints.containsValue(indx))
samplePoints.put(samplePoints.size(), indx);
}
for (Integer j : samplePoints.values())
{
sample.add(data.getDataPoint(j));
subCache.add(cacheAccel.get(j));
}
DataSet sampleSet = new SimpleDataSet(sample);
//Sampling done, now apply PAM
SeedSelectionMethods.selectIntialPoints(sampleSet, medioids, dm, subCache, rand, getSeedSelection());
super.cluster(sampleSet, false, medioids, sampleAssignments, subCache, parallel);
//Map the sample medoids back to the full data set
for(int j = 0; j < medioids.length; j++)
medioids[j] = samplePoints.get(medioids[j]);
//Now apply the sample medoids to the full data set
double sqrdDist = 0.0;
for(int j = 0; j < data.size(); j++)
{
double smallestDist = Double.MAX_VALUE;
int assignment = -1;
for(int z = 0; z < k; z++)
{
double tmp = dm.dist(medioids[z], j, X, cacheAccel);
if(tmp < smallestDist)
{
assignment = z;
smallestDist = tmp;
}
}
assignments[j] = assignment;
sqrdDist += smallestDist*smallestDist;
}
if(sqrdDist < bestMedoidsDist)
{
bestMedoidsDist = sqrdDist;
System.arraycopy(medioids, 0, bestMedoids, 0, k);
System.arraycopy(assignments, 0, bestAssignments, 0, assignments.length);
}
}
System.arraycopy(bestMedoids, 0, medioids, 0, k);
System.arraycopy(bestAssignments, 0, assignments, 0, assignments.length);
return bestMedoidsDist;
}
@Override
public CLARA clone()
{
return new CLARA(this);
}
}
| 6,935 | 29.421053 | 136 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/ClusterFailureException.java | package jsat.clustering;
/**
* A ClusterFailureException is thrown when a clustering method is unable to
* perform its clustering for some reason.
*
* @author Edward Raff
*/
public class ClusterFailureException extends RuntimeException
{
private static final long serialVersionUID = -8084320940762402095L;
public ClusterFailureException()
{
}
public ClusterFailureException(String string)
{
super(string);
}
public ClusterFailureException(Throwable thrwbl)
{
super(thrwbl);
}
public ClusterFailureException(String string, Throwable thrwbl)
{
super(string, thrwbl);
}
}
| 659 | 17.857143 | 77 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/Clusterer.java |
package jsat.clustering;
import java.io.Serializable;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import static jsat.clustering.ClustererBase.createClusterListFromAssignmentArray;
/**
* Defines the interface for a generic clustering algorithm.
*
* @author Edward Raff
*/
public interface Clusterer extends Serializable
{
/**
* Performs clustering on the given data set. Parameters may be estimated by the method, or other heuristics performed.
*
* @param dataSet the data set to perform clustering on
* @return A list of clusters found by this method.
*/
default public List<List<DataPoint>> cluster(DataSet dataSet)
{
return cluster(dataSet, false);
}
/**
* Performs clustering on the given data set. Parameters may be estimated by the method, or other heuristics performed.
*
* @param dataSet the data set to perform clustering on
* @param designations the array which will contain the designated values. The array will be altered and returned by
* the function. If <tt>null</tt> is given, a new array will be created and returned.
* @return an array indicating for each value indicating the cluster designation. This is the same array as
* <tt>designations</tt>, or a new one if the input array was <tt>null</tt>
*/
default public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, false, designations);
}
/**
* Performs clustering on the given data set. Parameters may be estimated by the method, or other heuristics performed.
*
* @param dataSet the data set to perform clustering on
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
* @return the java.util.List<java.util.List<jsat.classifiers.DataPoint>>
*/
default public List<List<DataPoint>> cluster(DataSet dataSet, boolean parallel)
{
int[] assignments = cluster(dataSet, parallel, (int[]) null);
return createClusterListFromAssignmentArray(assignments, dataSet);
}
/**
* Performs clustering on the given data set. Parameters may be estimated by the method, or other heuristics performed.
*
* @param dataSet the data set to perform clustering on
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
* @param designations the array which will contain the designated values.
* The array will be altered and returned by the function. If <tt>null</tt> is given, a new array will be created and returned.
* @return the int[]
*/
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations);
/**
* Indicates whether the model knows how to cluster using weighted data
* points. If it does, the model will train assuming the weights. The values
* returned by this method may change depending on the parameters set for
* the model.
*
* @return <tt>true</tt> if the model supports weighted data, <tt>false</tt>
* otherwise
*/
default public boolean supportsWeightedData()
{
return false;
}
public Clusterer clone();
}
| 3,464 | 38.375 | 131 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/ClustererBase.java |
package jsat.clustering;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
/**
* A base foundation that provides an implementation of {@link #cluster(jsat.DataSet) }
* and {@link #cluster(jsat.DataSet, java.util.concurrent.ExecutorService) } using
* their int array counterparts. <br>
* <br>
* By default it is assumed that a cluster does not support weighted data. If
* this is incorrect, you need to overwrite the {@link #supportsWeightedData() }
* method.
*
* @author Edward Raff
*/
public abstract class ClustererBase implements Clusterer
{
private static final long serialVersionUID = 4359554809306681680L;
/**
* Convenient helper method. A list of lists to represent a cluster may be desirable. In
* such a case, this method will take in an array of cluster assignments, and return a
* list of lists.
*
* @param assignments the array containing cluster assignments
* @param dataSet the original data set, with data in the same order as was used to create the assignments array
* @return a List of lists where each list contains the data points for one cluster, and the lists are in order by cluster id.
*/
public static List<List<DataPoint>> createClusterListFromAssignmentArray(int[] assignments, DataSet dataSet)
{
List<List<DataPoint>> clusterings = new ArrayList<>();
for(int i = 0; i < dataSet.size(); i++)
{
while(clusterings.size() <= assignments[i])
clusterings.add(new ArrayList<>());
if(assignments[i] >= 0)
clusterings.get(assignments[i]).add(dataSet.getDataPoint(i));
}
return clusterings;
}
/**
* Gets a list of the datapoints in a data set that belong to the indicated cluster
* @param c the cluster ID to get the datapoints for
* @param assignments the array containing cluster assignments
* @param dataSet the data set to get the points from
* @param indexFrom stores the index from the original dataset that the
* datapoint is from, such that the item at index {@code i} in the returned
* list can be found in the original dataset at index {@code indexFrom[i]}.
* May be {@code null}
* @return a list of datapoints that were assignment to the designated cluster
*/
public static List<DataPoint> getDatapointsFromCluster(int c, int[] assignments, DataSet dataSet, int[] indexFrom)
{
List<DataPoint> list = new ArrayList<>();
int pos = 0;
for(int i = 0; i < dataSet.size(); i++)
if(assignments[i] == c)
{
list.add(dataSet.getDataPoint(i));
if(indexFrom != null)
indexFrom[pos++] = i;
}
return list;
}
@Override
abstract public Clusterer clone();
}
| 2,975 | 36.670886 | 131 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/DBSCAN.java | package jsat.clustering;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.linear.vectorcollection.DefaultVectorCollection;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.linear.vectorcollection.VectorCollectionUtils;
import jsat.math.OnLineStatistics;
import jsat.utils.concurrent.ParallelUtils;
/**
* A density-based algorithm for discovering clusters in large spatial databases
* with noise (1996) by Martin Ester , Hans-peter Kriegel , Jörg S , Xiaowei Xu
*
* @author Edward Raff
*/
public class DBSCAN extends ClustererBase
{
private static final long serialVersionUID = 1627963360642560455L;
/**
* Used by {@link #cluster(DataSet, double, int, VectorCollection,int[]) }
* to mark that a data point as not yet been visited. <br>
* Clusters that have been visited have a value >= 0, that indicates their cluster. Or have the value {@link #NOISE}
*/
private static final int UNCLASSIFIED = -1;
/**
* Used by {@link #expandCluster(int[], DataSet, int, int, double, int, VectorCollection) }
* to mark that a data point has been visited, but was considered noise.
*/
private static final int NOISE = -2;
/**
* Factory used to create a vector space of the inputs.
* The paired Integer is the vector's index in the original dataset
*/
private VectorCollection<VecPaired<Vec, Integer> > vc;
private DistanceMetric dm;
private double stndDevs = 2.0;
public DBSCAN(DistanceMetric dm, VectorCollection<VecPaired<Vec, Integer>> vc)
{
this.dm = dm;
this.vc = vc;
}
public DBSCAN()
{
this(new EuclideanDistance());
}
public DBSCAN(DistanceMetric dm)
{
this(dm ,new DefaultVectorCollection<VecPaired<Vec, Integer>>());
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DBSCAN(DBSCAN toCopy)
{
this.vc = toCopy.vc.clone();
this.dm = toCopy.dm.clone();
this.stndDevs = toCopy.stndDevs;
}
public List<List<DataPoint>> cluster(DataSet dataSet, int minPts)
{
return createClusterListFromAssignmentArray(cluster(dataSet, minPts, (int[])null), dataSet);
}
public int[] cluster(DataSet dataSet, int minPts, int[] designations)
{
return cluster(dataSet, minPts, false, designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 3, parallel, designations);
}
@Override
public DBSCAN clone()
{
return new DBSCAN(this);
}
public List<List<DataPoint>> cluster(DataSet dataSet, int minPts, boolean parallel)
{
return createClusterListFromAssignmentArray(cluster(dataSet, minPts, parallel, null), dataSet);
}
public int[] cluster(DataSet dataSet, int minPts, boolean parallel, int[] designations)
{
TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel);
vc.build(parallel, getVecIndexPairs(dataSet), dm);
List<List<Integer>> neighbors = new ArrayList<>();
List<List<Double>> distances = new ArrayList<>();
vc.search(vc, minPts+1, neighbors, distances, parallel);
OnLineStatistics stats = ParallelUtils.run(parallel, dataSet.size(), (start, end)->
{
OnLineStatistics s = new OnLineStatistics();
for(int i = start; i < end; i++)
s.add(distances.get(i).get(minPts));
return s;
}, (t, u)->t.apply(t, u));
double eps = stats.getMean() + stats.getStandardDeviation()*stndDevs;
return cluster(dataSet, eps, minPts, vc, parallel, designations);
}
private List<VecPaired<Vec, Integer>> getVecIndexPairs(DataSet dataSet)
{
List<VecPaired<Vec, Integer>> vecs = new ArrayList<>(dataSet.size());
for(int i = 0; i < dataSet.size(); i++)
vecs.add(new VecPaired<>(dataSet.getDataPoint(i).getNumericalValues(), i));
return vecs;
}
public List<List<DataPoint>> cluster(DataSet dataSet, double eps, int minPts)
{
return createClusterListFromAssignmentArray(cluster(dataSet, eps, minPts, (int[]) null), dataSet);
}
public int[] cluster(DataSet dataSet, double eps, int minPts, int[] designations)
{
TrainableDistanceMetric.trainIfNeeded(dm, dataSet);
return cluster(dataSet, eps, minPts, vc, false, designations);
}
public List<List<DataPoint>> cluster(DataSet dataSet, double eps, int minPts, boolean parallel)
{
return createClusterListFromAssignmentArray(cluster(dataSet, eps, minPts, parallel, null), dataSet);
}
public int[] cluster(DataSet dataSet, double eps, int minPts, boolean parallel, int[] designations)
{
TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel);
return cluster(dataSet, eps, minPts, vc, parallel, designations);
}
private int[] cluster(DataSet dataSet, double eps, int minPts, VectorCollection<VecPaired<Vec, Integer>> vc, boolean parallel, int[] pointCats)
{
if (pointCats == null)
pointCats = new int[dataSet.size()];
Arrays.fill(pointCats, UNCLASSIFIED);
vc.build(parallel, getVecIndexPairs(dataSet), dm);
List<List<Integer>> neighbors = new ArrayList<>();
List<List<Double>> distances = new ArrayList<>();
vc.search(vc, 0, eps, neighbors, distances, parallel);
int curClusterID = 0;
for(int i = 0; i < pointCats.length; i++)
{
if(pointCats[i] == UNCLASSIFIED)
{
//All assignments are done by expandCluster
if(expandCluster(pointCats, dataSet, i, curClusterID, eps, minPts, neighbors))
curClusterID++;
}
}
return pointCats;
}
/**
*
* @param pointCats the array to store the cluster assignments in
* @param dataSet the data set
* @param point the current data point we are working on
* @param clId the current cluster we are working on
* @param eps the search radius
* @param minPts the minimum number of points to create a new cluster
* @param vc the collection to use to search with
* @return true if a cluster was expanded, false if the point was marked as noise
*/
private boolean expandCluster(int[] pointCats, DataSet dataSet, int point, int clId, double eps, int minPts, List<List<Integer>> neighbors)
{
List<Integer> seeds = neighbors.get(point);
if(seeds.size() < minPts)// no core point
{
pointCats[point] = NOISE;
return false;
}
//Else, all points in seeds are density-reachable from Point
List<Integer> results;
pointCats[point] = clId;
Queue<Integer> workQue = new ArrayDeque<>(seeds);
while(!workQue.isEmpty())
{
int currentP = workQue.poll();
results = neighbors.get(currentP);
if(results.size() >= minPts)
for(int resultPIndx : results)
{
if(pointCats[resultPIndx] < 0)// is UNCLASSIFIED or NOISE
{
if(pointCats[resultPIndx] == UNCLASSIFIED)
workQue.add(resultPIndx);
pointCats[resultPIndx] = clId;
}
}
}
return true;
}
}
| 7,921 | 34.053097 | 147 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/EMGaussianMixture.java | package jsat.clustering;
import static java.lang.Math.log;
import java.util.*;
import java.util.concurrent.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.clustering.SeedSelectionMethods.SeedSelection;
import jsat.distributions.multivariate.MultivariateDistribution;
import jsat.distributions.multivariate.NormalM;
import jsat.linear.*;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* An implementation of Gaussian Mixture models that learns the specified number of Gaussians using Expectation Maximization algorithm.
*
* @author Edward Raff
*/
public class EMGaussianMixture implements KClusterer, MultivariateDistribution
{
private SeedSelection seedSelection;
private static final long serialVersionUID = 2606159815670221662L;
private List<NormalM> gaussians;
/**
* The coefficients for the gaussians
*/
private double[] a_k;
private double tolerance = 1e-3;
/**
* Control the maximum number of iterations to perform.
*/
protected int MaxIterLimit = Integer.MAX_VALUE;
public EMGaussianMixture(SeedSelection seedSelection)
{
setSeedSelection(seedSelection);
}
public EMGaussianMixture()
{
this(SeedSelection.KPP);
}
/**
* Sets the method of seed selection to use for this algorithm.
* {@link SeedSelection#KPP} is recommended for this algorithm in
* particular.
*
* @param seedSelection the method of seed selection to use
*/
public void setSeedSelection(SeedSelectionMethods.SeedSelection seedSelection)
{
this.seedSelection = seedSelection;
}
/**
*
* @return the method of seed selection used
*/
public SeedSelectionMethods.SeedSelection getSeedSelection()
{
return seedSelection;
}
/**
* Sets the maximum number of iterations allowed
* @param iterLimit the maximum number of iterations of the ElkanKMeans algorithm
*/
public void setIterationLimit(int iterLimit)
{
if(iterLimit < 1)
throw new IllegalArgumentException("Iterations must be a positive value, not " + iterLimit);
this.MaxIterLimit = iterLimit;
}
/**
* Returns the maximum number of iterations of the ElkanKMeans algorithm that will be performed.
* @return the maximum number of iterations of the ElkanKMeans algorithm that will be performed.
*/
public int getIterationLimit()
{
return MaxIterLimit;
}
/**
* Copy constructor. The new Gaussian Mixture can be altered without effecting <tt>gm</tt>
* @param gm the Guassian Mixture to duplicate
*/
public EMGaussianMixture(EMGaussianMixture gm)
{
if(gm.gaussians != null && !gm.gaussians.isEmpty())
{
this.gaussians = new ArrayList<>(gm.gaussians.size());
for(NormalM gaussian : gm.gaussians)
this.gaussians.add(gaussian.clone());
}
if(gm.a_k != null)
this.a_k = Arrays.copyOf(gm.a_k, gm.a_k.length);
this.MaxIterLimit = gm.MaxIterLimit;
this.tolerance = gm.tolerance;
}
/**
* Copy constructor
* @param gaussians value to copy
* @param a_k value to copy
* @param tolerance value to copy
*/
@SuppressWarnings("unused")
private EMGaussianMixture(List<NormalM> gaussians, double[] a_k, double tolerance)
{
this.gaussians = new ArrayList<NormalM>(a_k.length);
this.a_k = new double[a_k.length];
for (int i = 0; i < a_k.length; i++)
{
this.gaussians.add(gaussians.get(i).clone());
this.a_k[i] = a_k[i];
}
}
protected double cluster(final DataSet dataSet, final List<Double> accelCache, final int K, final List<Vec> means, final int[] assignment, boolean exactTotal, boolean parallel, boolean returnError)
{
EuclideanDistance dm = new EuclideanDistance();
List<List<Double>> means_qi = new ArrayList<>();
//Pick some initial centers
if(means.size() < K)
{
means.clear();
means.addAll(SeedSelectionMethods.selectIntialPoints(dataSet, K, dm, accelCache, RandomUtil.getRandom(), seedSelection, parallel));
for(Vec v : means)
means_qi.add(dm.getQueryInfo(v));
}
//Use the initial result to initalize GuassianMixture
List<Matrix> covariances = new ArrayList<>(K);
int dimension = dataSet.getNumNumericalVars();
for(int k = 0; k < means.size(); k++)
covariances.add(new DenseMatrix(dimension, dimension));
a_k = new double[K];
double sum = dataSet.size();
//Compute inital Covariances
Vec scratch = new DenseVector(dimension);
List<Vec> X = dataSet.getDataVectors();
for(int i = 0; i < dataSet.size(); i++)
{
Vec x = dataSet.getDataPoint(i).getNumericalValues();
//find out which this belongs to
double closest = dm.dist(i, means.get(0), means_qi.get(0), X, accelCache);
int k = 0;
for(int j = 1; j < K; j++)//TODO move out and make parallel
{
double d_ij = dm.dist(i, means.get(j), means_qi.get(j), X, accelCache);
if(d_ij < closest)
{
closest = d_ij;
k = j;
}
}
assignment[i] = k;
a_k[k]++;
x.copyTo(scratch);
scratch.mutableSubtract(means.get(k));
Matrix.OuterProductUpdate(covariances.get(k), scratch, scratch, 1.0);
}
for(int k = 0; k < means.size(); k++)
{
covariances.get(k).mutableMultiply(1.0 / a_k[k]);
a_k[k] /= sum;
}
return clusterCompute(K, dataSet, assignment, means, covariances, parallel);
}
protected double clusterCompute(int K, DataSet dataSet, int[] assignment, List<Vec> means, List<Matrix> covs, boolean parallel)
{
List<DataPoint> dataPoints = dataSet.getDataPoints();
int N = dataPoints.size();
double currentLogLike = -Double.MAX_VALUE;
gaussians = new ArrayList<>(K);
//Set up initial covariance matrices
for(int k = 0; k < means.size(); k++)
gaussians.add(new NormalM(means.get(k), covs.get(k)));
double[][] p_ik = new double[dataPoints.size()][K];
while(true)
{
try
{
//E-Step:
double logLike = eStep(N, dataPoints, K, p_ik, parallel);
//Convergence check!
double logDifference = Math.abs(currentLogLike - logLike);
if(logDifference < tolerance)
break;//We accept this as converged. Probablities could be refined, but no one should be changing class anymore
else
currentLogLike = logLike;
mStep(means, N, dataPoints, K, p_ik, covs, parallel);
}
catch (ExecutionException | InterruptedException ex)
{
Logger.getLogger(EMGaussianMixture.class.getName()).log(Level.SEVERE, null, ex);
}
}
//Hard asignments based on most probable outcome
for(int i = 0; i < p_ik.length; i++)
for(int k = 0; k < K; k++)
if(p_ik[i][k] > p_ik[i][assignment[i]])
assignment[i] = k;
return -currentLogLike;
}
private void mStep(final List<Vec> means,final int N,final List<DataPoint> dataPoints, final int K, final double[][] p_ik, final List<Matrix> covs, final boolean parallel) throws InterruptedException
{
/**
* Dimensions
*/
final int D = means.get(0).length();
//M-Step
/**
* n
* =====
* \
* > p
* / i k
* =====
* p + 1 i = 1
* a = ----------
* k n
*/
//Recompute a_k and update means in the same loop
for(Vec mean : means)
mean.zeroOut();
Arrays.fill(a_k, 0.0);
ThreadLocal<Vec> localMean = ThreadLocal.withInitial(()->new DenseVector(dataPoints.get(0).numNumericalValues()));
ParallelUtils.run(parallel, N, (start, end)->
{
//doing k loop first so that I only need one local tmp vector for each thread
for(int k = 0; k < K; k++)
{
//local copies
Vec mean_k_l = localMean.get();
mean_k_l.zeroOut();
double a_k_l = 0;
for(int i = start; i < end; i++)
{
Vec x_i = dataPoints.get(i).getNumericalValues();
a_k_l += p_ik[i][k];
mean_k_l.mutableAdd(p_ik[i][k], x_i);
}
//store updates in globals
synchronized(means.get(k))
{
means.get(k).mutableAdd(mean_k_l);
a_k[k] += a_k_l;
}
}
});
//We can now dived all the means by their sums, which are stored in a_k, and then normalized a_k after
for(int k = 0; k < a_k.length; k++)
means.get(k).mutableDivide(a_k[k]);
//We hold off on nomralized a_k, becase we will use its values to update the covariances
for(Matrix cov : covs)
cov.zeroOut();
ParallelUtils.run(parallel, N, (start, end)->
{
Vec scratch = new DenseVector(means.get(0).length());
Matrix cov_local = covs.get(0).clone();
for(int k = 0; k < K; k++)
{
final Vec mean = means.get(k);
scratch.zeroOut();
cov_local.zeroOut();
for(int i = start; i < end; i++)
{
DataPoint dp = dataPoints.get(i);
Vec x = dp.getNumericalValues();
x.copyTo(scratch);
scratch.mutableSubtract(mean);
Matrix.OuterProductUpdate(cov_local, scratch, scratch, p_ik[i][k]);
}
synchronized(covs.get(k))
{
covs.get(k).mutableAdd(cov_local);
}
}
});
//clean up covs
for(int k = 0; k < K; k++)
covs.get(k).mutableMultiply(1.0 / (a_k[k]));
//Finaly, normalize the coefficents
for(int k = 0; k < K; k++)
a_k[k] /= N;
//And update the Normals
for(int k = 0; k < means.size(); k++)
gaussians.get(k).setMeanCovariance(means.get(k), covs.get(k));
}
private double eStep(final int N, final List<DataPoint> dataPoints, final int K, final double[][] p_ik, final boolean parallel) throws InterruptedException, ExecutionException
{
double logLike = 0;
/*
* p / | p p\
* a P|x | mean , Sigma |
* k \ i | k k/
* p = ------------------------------
* i k K
* =====
* \ p / | p p\
* > a P|x | mean , Sigma |
* / k \ i | k k/
* =====
* k = 1
*/
/*
* We will piggy back off the E step to compute the log likelyhood
*
* N / K \
* ===== |===== |
* \ |\ |
* L(x, Theat) = > log| > a P/x | mean , Sigma \|
* / |/ k \ i | k k/|
* ===== |===== |
* i = 1 \k = 1 /
*/
logLike = ParallelUtils.run(parallel, N, (start, end)->
{
double logLikeLocal = 0;
for(int i = start; i < end; i++)
{
Vec x_i = dataPoints.get(i).getNumericalValues();
double p_ikNormalizer = 0.0;
for(int k = 0; k < K; k++)
{
double tmp = a_k[k] * gaussians.get(k).pdf(x_i);
p_ik[i][k] = tmp;
p_ikNormalizer += tmp;
}
//Normalize previous values
for(int k = 0; k < K; k++)
p_ik[i][k] /= p_ikNormalizer;
//Add to part of the log likelyhood
logLikeLocal += Math.log(p_ikNormalizer);
}
return logLikeLocal;
}, (t,u)->t+u);
return logLike;
}
@Override
public double logPdf(Vec x)
{
double pdf = pdf(x);
if(pdf == 0)
return -Double.MAX_VALUE;
return log(pdf);
}
@Override
public double pdf(Vec x)
{
double PDF = 0.0;
for(int i = 0; i < a_k.length; i++)
PDF += a_k[i] * gaussians.get(i).pdf(x);
return PDF;
}
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
List<DataPoint> dataPoints = new ArrayList<>(dataSet.size());
for(Vec x : dataSet)
dataPoints.add(new DataPoint(x, new int[0], new CategoricalData[0]));
return setUsingData(new SimpleDataSet(dataPoints), parallel);
}
@Override
public boolean setUsingData(DataSet dataSet, boolean parallel)
{
try
{
cluster(dataSet, parallel);
return true;
}
catch (ArithmeticException ex)
{
return false;
}
}
@Override
public EMGaussianMixture clone()
{
return new EMGaussianMixture(this);
}
@Override
public List<Vec> sample(int count, Random rand)
{
List<Vec> samples = new ArrayList<>(count);
//First we need the figure out which of the mixtures to sample from
//So generate [0,1] uniform values to determine
double[] priorTargets = new double[count];
for(int i = 0; i < count; i++)
priorTargets[i] = rand.nextDouble();
Arrays.sort(priorTargets);
int subSampleSize = 0;
int currentGaussian = 0;
int pos = 0;
double a_kSum = 0.0;
while(currentGaussian < a_k.length)
{
a_kSum += a_k[currentGaussian];
while(pos < count && priorTargets[pos++] < a_kSum)
subSampleSize++;
samples.addAll(gaussians.get(currentGaussian++).sample(subSampleSize, rand));
}
return samples;
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()/2), designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()/2), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
if(dataSet.size() < clusters)
throw new ClusterFailureException("Fewer data points then desired clusters, decrease cluster size");
List<Vec> means = new ArrayList<>(clusters);
cluster(dataSet, null, clusters, means, designations, false, parallel, false);
return designations;
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
throw new UnsupportedOperationException("EMGaussianMixture does not supported determining the number of clusters");
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, int[] designations)
{
throw new UnsupportedOperationException("EMGaussianMixture does not supported determining the number of clusters");
}
}
| 16,834 | 32.009804 | 203 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/FLAME.java | package jsat.clustering;
import java.util.*;
import java.util.concurrent.atomic.DoubleAdder;
import jsat.DataSet;
import jsat.exceptions.FailedToFitException;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.linear.vectorcollection.DefaultVectorCollection;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.linear.vectorcollection.VectorCollectionUtils;
import jsat.math.OnLineStatistics;
import jsat.parameters.Parameterized;
import jsat.utils.IntSet;
import jsat.utils.concurrent.ParallelUtils;
/**
* Provides an implementation of the FLAME clustering algorithm. The original
* FLAME paper does not describe all necessary details for an implementation, so
* results may differ between implementations. <br><br>
* FLAME is highly sensitive to the number of neighbors chosen. Increasing the
* neighbors tends to reduce the number of clusters formed.
* <br><br>
* See: Fu, L.,&Medico, E. (2007). <i>FLAME, a novel fuzzy clustering method
* for the analysis of DNA microarray data</i>. BMC Bioinformatics, 8(1), 3.
* Retrieved from <a href="http://www.ncbi.nlm.nih.gov/pubmed/17204155">here</a>
*
* @author Edward Raff
*/
public class FLAME extends ClustererBase implements Parameterized
{
private static final long serialVersionUID = 2393091020100706517L;
private DistanceMetric dm;
private int k;
private int maxIterations;
private VectorCollection<VecPaired<Vec, Integer>> vc = new DefaultVectorCollection<>();
private double stndDevs = 2.5;
private double eps = 1e-6;
/**
* Creates a new FLAME clustering object
* @param dm the distance metric to use
* @param k the number of neighbors to consider
* @param maxIterations the maximum number of iterations to perform
*/
public FLAME(DistanceMetric dm, int k, int maxIterations)
{
setDistanceMetric(dm);
setK(k);
setMaxIterations(maxIterations);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public FLAME(FLAME toCopy)
{
this.dm = toCopy.dm.clone();
this.maxIterations = toCopy.maxIterations;
this.vc = toCopy.vc.clone();
this.k = toCopy.k;
this.stndDevs = toCopy.stndDevs;
this.eps = toCopy.eps;
}
/**
* Sets the maximum number of iterations to perform. FLAME can require far
* more iterations to converge than necessary to get the same hard
* clustering result.
*
* @param maxIterations the maximum number of iterations to perform
*/
public void setMaxIterations(int maxIterations)
{
if(maxIterations < 1)
throw new IllegalArgumentException("Must perform a positive number of iterations, not " + maxIterations);
this.maxIterations = maxIterations;
}
/**
* Returns the maximum number of iterations to perform
* @return the maximum number of iterations to perform
*/
public int getMaxIterations()
{
return maxIterations;
}
/**
* Sets the number of neighbors that will be considered in determining
* Cluster Supporting Points and assignment contributions.
* @param k the number of neighbors to consider
*/
public void setK(int k)
{
this.k = k;
}
/**
* Returns the number of neighbors used
* @return the number of neighbors used
*/
public int getK()
{
return k;
}
/**
* Sets the convergence goal for the minimum difference in score between
* rounds. Negative values are allowed to force all iterations to occur
* @param eps the minimum difference in scores for convergence
*/
public void setEps(double eps)
{
if(Double.isNaN(eps))
throw new IllegalArgumentException("Eps can not be NaN");
this.eps = eps;
}
/**
* Returns the minimum difference in scores to consider FLAME converged
* @return the minimum difference in scores to consider FLAMe converged
*/
public double getEps()
{
return eps;
}
/**
* Sets the number of standard deviations away from the mean density a
* candidate outlier must be to be confirmed as an outlier.
* @param stndDevs the number of standard deviations away from the mean
* density an outlier must be
*/
public void setStndDevs(double stndDevs)
{
if(stndDevs < 0 || Double.isInfinite(stndDevs) || Double.isNaN(stndDevs))
throw new IllegalArgumentException("Standard Deviations must be non negative");
this.stndDevs = stndDevs;
}
/**
* Returns the number of standard deviations away from the mean
* density an outlier must be
* @return the number of standard deviations away from the mean
* density an outlier must be
*/
public double getStndDevs()
{
return stndDevs;
}
/**
* Sets the distance metric to use for the nearest neighbor search
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Returns the distance metric to use for the nearest neighbor search
* @return the distance metric to use
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* Sets the vector collection used to accelerate the nearest
* neighbor search. The nearest neighbor only needs to be done once for each
* point, so the collection should be faster than the naive method when
* considering both construction and search time.
*
* @param vc the vector collection to use
*/
public void setVectorCollectionFactory(VectorCollection<VecPaired<Vec, Integer>> vc)
{
this.vc = vc;
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
if(k >= dataSet.size())
throw new FailedToFitException("Number of k-neighbors (" + k + ") can not be larger than the number of datapoints (" + dataSet.size() + ")");
final int n = dataSet.size();
if (designations == null || designations.length < dataSet.size())
designations = new int[n];
List<VecPaired<Vec, Integer>> vecs = new ArrayList<>(n);
for (int i = 0; i < dataSet.size(); i++)
vecs.add(new VecPaired<>(dataSet.getDataPoint(i).getNumericalValues(), i));
TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel);
final List<List<? extends VecPaired<VecPaired<Vec, Integer>, Double>>> allNNs;
vc.build(parallel, vecs, dm);
allNNs = VectorCollectionUtils.allNearestNeighbors(vc, vecs, k + 1, parallel);
//NOTE: Density is done in reverse, so large values indicate low density, small values indiciate high density.
//mark density as the sum of distances
final double[] density = new double[vecs.size()];
final double[][] weights = new double[n][k];
OnLineStatistics densityStats = new OnLineStatistics();
for (int i = 0; i < density.length; i++)
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> knns = allNNs.get(i);
for (int j = 1; j < knns.size(); j++)
density[i] += (weights[i][j - 1] = knns.get(j).getPair());
densityStats.add(density[i]);
double sum = 0;
for (int j = 0; j < k; j++)
sum += (weights[i][j] = Math.min(1.0 / Math.pow(weights[i][j], 2), Double.MAX_VALUE / (k + 1)));
for (int j = 0; j < k; j++)
weights[i][j] /= sum;
}
final Map<Integer, Integer> CSOs = new HashMap<>();
final Set<Integer> outliers = new IntSet();
Arrays.fill(designations, -1);
final double threshold = densityStats.getMean() + densityStats.getStandardDeviation() * stndDevs;
for (int i = 0; i < density.length; i++)
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> knns = allNNs.get(i);
boolean lowest = true;//if my density score is lower then all neighbors, then i am a CSO
boolean highest = true;//if heigher, then I am an outlier
for (int j = 1; j < knns.size() && (highest || lowest); j++)
{
int jNN = knns.get(j).getVector().getPair();
if (density[i] > density[jNN])
lowest = false;
else
highest = false;
}
if (lowest)
CSOs.put(i, CSOs.size());
else if (highest && density[i] > threshold)
outliers.add(i);
}
//remove CSO that occur near outliers
{
int origSize = CSOs.size();
Iterator<Integer> iter = CSOs.keySet().iterator();
while (iter.hasNext())
{
int i = iter.next();
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> knns = allNNs.get(i);
for (int j = 1; j < knns.size(); j++)
if (outliers.contains(knns.get(j).getVector().getPair()))
{
iter.remove();
break;
}
}
if(origSize != CSOs.size())//we did a removal, re-order clusters
{
Set<Integer> keys = new IntSet(CSOs.keySet());
CSOs.clear();
for(int i : keys)
CSOs.put(i, CSOs.size());
}
//May have gaps, will be fixed in final step
for (int i : CSOs.keySet())
designations[i] = CSOs.get(i);
}
//outlier is implicit extra term
double[][] fuzzy = new double[n][CSOs.size() + 1];
for (int i = 0; i < n; i++)
if (CSOs.containsKey(i))
fuzzy[i][CSOs.get(i)] = 1.0;//each CSO is full it itself
else if (outliers.contains(i))
fuzzy[i][CSOs.size()] = 1.0;
else
Arrays.fill(fuzzy[i], 1.0 / (CSOs.size() + 1));
//iterate
double[][] fuzzy2 = new double[n][CSOs.size() + 1];
double prevScore = Double.POSITIVE_INFINITY;
for (int iter = 0; iter < maxIterations; iter++)
{
final double[][] FROM = fuzzy, TO = fuzzy2;
final DoubleAdder score = new DoubleAdder();
//Single index loop b/c of uneven workloads
ParallelUtils.run(parallel, FROM.length, (i)->
{
if (outliers.contains(i) || CSOs.containsKey(i))
return;
final double[] fuzzy2_i = TO[i];
Arrays.fill(fuzzy2_i, 0);
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> knns = allNNs.get(i);
double sum = 0;
for (int j = 1; j < weights[i].length; j++)
{
int jNN = knns.get(j).getVector().getPair();
final double[] fuzzy_jNN = FROM[jNN];
double weight = weights[i][j - 1];
for (int z = 0; z < FROM[jNN].length; z++)
fuzzy2_i[z] += weight * fuzzy_jNN[z];
}
for (int z = 0; z < fuzzy2_i.length; z++)
sum += fuzzy2_i[z];
double localScore = 0;
for (int z = 0; z < fuzzy2_i.length; z++)
{
fuzzy2_i[z] /= sum + 1e-6;
localScore += Math.abs(FROM[i][z] - fuzzy2_i[z]);
}
score.add(localScore);
});
if (Math.abs(prevScore - score.doubleValue()) < eps)
break;
prevScore = score.doubleValue();
double[][] tmp = fuzzy;
fuzzy = fuzzy2;
fuzzy2 = tmp;
}
//Figure out final clsutering
int[] clusterCounts = new int[n];
for (int i = 0; i < fuzzy.length; i++)
{
int pos = -1;
double maxVal = 0;
for (int j = 0; j < fuzzy[i].length; j++)
{
if (fuzzy[i][j] > maxVal)
{
maxVal = fuzzy[i][j];
pos = j;
}
}
if(pos == -1)//TODO how di this happen? Mark it as an outlier. Somehow your whole row became zeros to cause this
pos = CSOs.size();
clusterCounts[pos]++;
if (pos == CSOs.size())//outlier
pos = -1;
designations[i] = pos;
}
/* Transform clusterCOunts to indicate the new cluster ID. If
* everyone gets there own id, no clusters removed. Else, people
* with a negative value know they need to remove themsleves
*/
int newCCount = 0;
for(int i = 0; i < clusterCounts.length; i++)
if(clusterCounts[i] > 1)
clusterCounts[i] = newCCount++;
else
clusterCounts[i] = -1;
//Go through and remove clusters with a count of 1
if(newCCount != clusterCounts.length)
{
double[] tmp = new double[CSOs.size()+1];
for (int i = 0; i < fuzzy.length; i++)
{
int d = designations[i];
if(d > 0)//not outlier
{
if (clusterCounts[d] == -1)//remove self
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> knns = allNNs.get(i);
for (int j = 1; j < weights[i].length; j++)
{
int jNN = knns.get(j).getVector().getPair();
final double[] fuzzy_jNN = fuzzy[jNN];
double weight = weights[i][j - 1];
for (int z = 0; z < fuzzy[jNN].length; z++)
tmp[z] += weight * fuzzy_jNN[z];
}
double maxVal = -1;
int maxIndx = -1;
for(int z = 0; z < tmp.length; z++)
if(tmp[z] > maxVal)
{
maxVal =tmp[z];
maxIndx = z;
}
if(maxIndx == CSOs.size())
designations[i] = -1;
else
designations[i] = clusterCounts[maxIndx];
}
else
{
designations[i] = clusterCounts[d];
}
}
}
}
return designations;
}
@Override
public FLAME clone()
{
return new FLAME(this);
}
}
| 15,223 | 35.772947 | 153 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/GapStatistic.java | package jsat.clustering;
import java.util.*;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.clustering.evaluation.IntraClusterSumEvaluation;
import jsat.clustering.evaluation.intra.SumOfSqrdPairwiseDistances;
import jsat.clustering.kmeans.HamerlyKMeans;
import jsat.clustering.kmeans.KMeans;
import jsat.linear.*;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.math.OnLineStatistics;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.Parameterized;
import jsat.utils.random.RandomUtil;
/**
* This class implements a method for estimating the number of clusters in a
* data set called the Gap Statistic. It works by sampling new datasets from a
* uniform random space, and comparing the sum of squared pairwise distances
* between the sampled data and the real data. The number of samples has a
* significant impact on runtime, and is controlled via {@link #setSamples(int)
* }. <br>
* The Gap method can be applied to any distance metric and any clustering
* algorithm. However, it is significantly faster for the
* {@link EuclideanDistance} and was developed with the {@link KMeans}
* algorithm. Thus that combination is the default when using the no argument
* constructor. <br>
* <br>
* A slight deviation in the implementation from the original paper exists. The
* original paper specifies that the smallest {@code K} satisfying
* {@link #getGap() Gap}(K) ≥ Gap(K+1) - {@link #getElogWkStndDev() sd}(K+1)
* what the value of {@code K} to use. Instead the condition used is the
* smallest {@code K} such that Gap(K) ≥ Gap(K+1)- sd(K+1) and Gap(K) > 0.
* <br>
* In addition, if no value of {@code K} satisfies the condition, the largest
* value of Gap(K) will be used. <br>
* <br>
* Note, by default this implementation uses a heuristic for the max value of
* {@code K} that is capped at 100 when using the
* {@link #cluster(jsat.DataSet) } type methods.<br>
* Note: when called with the desired number of clusters, the result of the base
* clustering algorithm be returned directly. <br>
* <br>
* See: Tibshirani, R., Walther, G.,&Hastie, T. (2001). <i>Estimating the
* number of clusters in a data set via the gap statistic</i>. Journal of the
* Royal Statistical Society: Series B (Statistical Methodology), 63(2),
* 411–423. doi:10.1111/1467-9868.00293
*
* @author Edward Raff
*/
public class GapStatistic extends KClustererBase implements Parameterized
{
private static final long serialVersionUID = 8893929177942856618L;
@ParameterHolder
private KClusterer base;
private int B;
private DistanceMetric dm;
private boolean PCSampling;
private double[] ElogW;
private double[] logW;
private double[] gap;
private double[] s_k;
/**
* Creates a new Gap clusterer using k-means as the base clustering algorithm
*/
public GapStatistic()
{
this(new HamerlyKMeans());
}
/**
* Creates a new Gap clusterer using the base clustering algorithm given.
* @param base the base clustering method to use for any individual number
* of clusters
*/
public GapStatistic(KClusterer base)
{
this(base, false);
}
/**
* Creates a new Gap clsuterer using the base clustering algorithm given.
* @param base the base clustering method to use for any individual number
* of clusters
* @param PCSampling {@code true} if the Gap statistic should be computed
* from a PCA transformed space, or {@code false} to go with the uniform
* bounding hyper cube.
*/
public GapStatistic(KClusterer base, boolean PCSampling)
{
this(base, PCSampling, 10, new EuclideanDistance());
}
/**
* Creates a new Gap clsuterer using the base clustering algorithm given.
* @param base the base clustering method to use for any individual number
* of clusters
* @param PCSampling {@code true} if the Gap statistic should be computed
* from a PCA transformed space, or {@code false} to go with the uniform
* bounding hyper cube.
* @param B the number of datasets to sample
* @param dm the distance metric to evaluate with
*/
public GapStatistic(KClusterer base, boolean PCSampling, int B, DistanceMetric dm )
{
this.base = base;
setSamples(B);
setDistanceMetric(dm);
setPCSampling(PCSampling);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public GapStatistic(GapStatistic toCopy)
{
this.base = toCopy.base.clone();
this.B = toCopy.B;
this.dm = toCopy.dm.clone();
this.PCSampling = toCopy.PCSampling;
if(toCopy.ElogW != null)
this.ElogW = Arrays.copyOf(toCopy.ElogW, toCopy.ElogW.length);
if(toCopy.logW != null)
this.logW = Arrays.copyOf(toCopy.logW, toCopy.logW.length);
if(toCopy.gap != null)
this.gap = Arrays.copyOf(toCopy.gap, toCopy.gap.length);
if(toCopy.s_k != null)
this.s_k = Arrays.copyOf(toCopy.s_k, toCopy.s_k.length);
}
/**
* Sets the distance metric to use when evaluating a clustering algorithm
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
/**
*
* @return the distance metric used for evaluation
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* By default the null distribution is sampled from the bounding hyper-cube
* of the dataset. The accuracy of the sampling can be made more accurate
* (and invariant) by sampling the null distribution based on the principal
* components of the dataset. This will also increase the runtime of the
* algorithm.
* @param PCSampling {@code true} to sample from the projected data, {@code
* false} to do the default and sample from the bounding hyper-cube.
*/
public void setPCSampling(boolean PCSampling)
{
this.PCSampling = PCSampling;
}
/**
*
* @return {@code true} to sample from the projected data, {@code
* false} to do the default and sample from the bounding hyper-cube.
*/
public boolean isPCSampling()
{
return PCSampling;
}
/**
* The Gap statistic is measured by sampling from a reference distribution
* and comparing with the given data set. This controls the number of sample
* datasets to draw and evaluate.
*
* @param B the number of data sets to sample
*/
public void setSamples(int B)
{
if(B <= 0)
throw new IllegalArgumentException("sample size must be positive, not " + B);
this.B = B;
}
/**
*
* @return the number of data sets sampled
*/
public int getSamples()
{
return B;
}
/**
* Returns the array of gap statistic values. Index {@code i} of the
* returned array indicates the gap score for using {@code i+1} clusters. A
* value of {@link Double#NaN} if the score was not computed for that value
* of {@code K}
* @return the array of gap statistic values computed, or {@code null} if
* the algorithm hasn't been run yet.
*/
public double[] getGap()
{
return gap;
}
/**
* Returns the array of empirical <i>log(W<sub>k</sub>)</i> scores computed
* from the data set last clustered. <br>
* Index {@code i} of the returned array indicates the gap score for using
* {@code i+1} clusters. A value of {@link Double#NaN} if the score was not
* computed for that value of {@code K}
* @return the array of empirical scores from the last run, or {@code null}
* if the algorithm hasn't been run yet
*/
public double[] getLogW()
{
return logW;
}
/**
* Returns the array of expected <i>E[log(W<sub>k</sub>)]</i> scores
* computed from sampling new data sets. <br>
* Index {@code i} of the returned array indicates the gap score for using
* {@code i+1} clusters. A value of {@link Double#NaN} if the score was not
* computed for that value of {@code K}
* @return the array of sampled expected scores from the last run, or
* {@code null} if the algorithm hasn't been run yet
*/
public double[] getElogW()
{
return ElogW;
}
/**
* Returns the array of standard deviations from the samplings used to compute
* {@link #getElogWkStndDev() }, multiplied by <i>sqrt(1+1/B)</i>. <br>
* Index {@code i} of the returned array indicates the gap score for using
* {@code i+1} clusters. A value of {@link Double#NaN} if the score was not
* computed for that value of {@code K}
* @return the array of standard deviations from the last run, or
* {@code null} if the algorithm hasn't been run yet
*/
public double[] getElogWkStndDev()
{
return s_k;
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 1, (int) Math.min(Math.max(Math.sqrt(dataSet.size()), 10), 100), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
return base.cluster(dataSet, clusters, parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
final int D = dataSet.getNumNumericalVars();
final int N = dataSet.size();
if(designations == null || designations.length < N)
designations = new int[N];
//TODO we dont need all values in [1, lowK-1) in order to get the gap statistic for [lowK, highK]. So lets not do that extra work.
logW = new double[highK-1];
ElogW = new double[highK-1];
gap = new double[highK-1];
s_k = new double[highK-1];
IntraClusterSumEvaluation ssd = new IntraClusterSumEvaluation(new SumOfSqrdPairwiseDistances(dm));
//Step 1: Cluster the observed data
Arrays.fill(designations, 0);
logW[0] = Math.log(ssd.evaluate(designations, dataSet));//base case
for(int k = 2; k < highK; k++)
{
designations = base.cluster(dataSet, k, parallel, designations);
logW[k-1] = Math.log(ssd.evaluate(designations, dataSet));
}
//Step 2:
//use online statistics and run through all K for each B, so that we minimize the memory use
OnLineStatistics[] expected = new OnLineStatistics[highK-1];
for(int i = 0; i < expected.length; i++)
expected[i] = new OnLineStatistics();
//dataset object we will reuse
SimpleDataSet Xp = new SimpleDataSet(D, new CategoricalData[0]);
for(int i = 0; i < N; i++)
Xp.add(new DataPoint(new DenseVector(D)));
Random rand = RandomUtil.getRandom();
//info needed for sampling
//min/max for each row/col to smaple uniformly from
double[] min = new double[D];
double[] max = new double[D];
Arrays.fill(min, Double.POSITIVE_INFINITY);
Arrays.fill(max, Double.NEGATIVE_INFINITY);
final Matrix V_T;//the V^T from [U, D, V] of SVD decomposation
if(PCSampling)
{
SingularValueDecomposition svd = new SingularValueDecomposition(dataSet.getDataMatrix());
//X' = X V , from generation strategy (b)
Matrix tmp = dataSet.getDataMatrixView().multiply(svd.getV());
for(int i = 0; i < tmp.rows(); i++)
for(int j = 0; j < tmp.cols(); j++)
{
min[j] = Math.min(tmp.get(i, j), min[j]);
max[j] = Math.max(tmp.get(i, j), max[j]);
}
V_T = svd.getV().transpose();
}
else
{
V_T = null;
OnLineStatistics[] columnStats = dataSet.getOnlineColumnStats(false);
for(int i = 0; i < D; i++)
{
min[i] = columnStats[i].getMin();
max[i] = columnStats[i].getMax();
}
}
//generate B reference datasets
for(int b = 0; b < B; b++)
{
for (int i = 0; i < N; i++)//sample
{
Vec xp = Xp.getDataPoint(i).getNumericalValues();
for (int j = 0; j < D; j++)
xp.set(j, (max[j] - min[j]) * rand.nextDouble() + min[j]);
}
if(PCSampling)//project if wanted
{
//Finally we back-transform via Z = Z' V^T to give reference data Z
//TODO batch as a matrix matrix op would be faster, but use more memory
Vec tmp = new DenseVector(D);
for (int i = 0; i < N; i++)
{
Vec xp = Xp.getDataPoint(i).getNumericalValues();
tmp.zeroOut();
xp.multiply(V_T, tmp);
tmp.copyTo(xp);
}
}
//cluster each one
Arrays.fill(designations, 0);
expected[0].add(Math.log(ssd.evaluate(designations, Xp)));//base case
for(int k = 2; k < highK; k++)
{
designations = base.cluster(Xp, k, parallel, designations);
expected[k-1].add(Math.log(ssd.evaluate(designations, Xp)));
}
}
//go through and copmute gap
int k_first = -1;
int biggestGap = 0;//used as a fall back incase the original condition can't be satisfied in the specified range
for (int i = 0; i < gap.length; i++)
{
gap[i] = (ElogW[i] = expected[i].getMean()) - logW[i];
s_k[i] = expected[i].getStandardDeviation() * Math.sqrt(1 + 1.0 / B);
//check original condition first
int k = i + 1;
if (i > 0 && lowK <= k && k <= highK)
if (k_first == -1 && gap[i - 1] >= gap[i] - s_k[i] && gap[i-1] > 0)
k_first = k - 1;
//check backup
if(gap[i] > biggestGap && lowK <= k && k <= highK)
biggestGap = i;
}
if(k_first == -1)//never satisfied our conditions?
k_first = biggestGap+1;//Maybe we should go back and pick the best gap k we can find?
if(k_first == 1)//easy case
{
Arrays.fill(designations, 0);
return designations;
}
return base.cluster(dataSet, k_first, parallel, designations);
}
@Override
public GapStatistic clone()
{
return new GapStatistic(this);
}
}
| 15,151 | 35.86618 | 140 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/HDBSCAN.java | /*
* Copyright (C) 2016 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering;
import java.util.*;
import jsat.DataSet;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.vectorcollection.*;
import jsat.utils.FibHeap;
import static java.lang.Math.max;
import jsat.exceptions.FailedToFitException;
import jsat.parameters.Parameterized;
import jsat.utils.*;
/**
* HDBSCAN is a density based clustering algorithm that is an improvement over
* {@link DBSCAN}. Unlike its predecessor, HDBSCAN works with variable density
* datasets and does not need a search radius to be specified. The original
* paper presents HDBSCAN with two parameters
* {@link #setMinPoints(int) m<sub>pts</sub>} and
* {@link #setMinClusterSize(int) m<sub>clSize</sub>}, but recomends that they
* can be set to the same value and effectively behave as if only one parameter
* exists. This implementation allows for setting both independtly, but the
* single parameter constructors will use the same value for both parameters.
* <br>
* NOTE: The current implementation has O(N<sup>2</sup>) run time, though
* this may be improved in the future with more advanced algorithms.<br>
* <br>
* See: Campello, R. J. G. B., Moulavi, D., & Sander, J. (2013). Density-Based
* Clustering Based on Hierarchical Density Estimates. In J. Pei, V. Tseng, L.
* Cao, H. Motoda, & G. Xu (Eds.), Advances in Knowledge Discovery and Data
* Mining (pp. 160–172). Springer Berlin Heidelberg.
* doi:10.1007/978-3-642-37456-2_14
* @author Edward Raff
*/
public class HDBSCAN implements Clusterer, Parameterized
{
private DistanceMetric dm;
/**
* minimium number of points
*/
private int m_pts;
private int m_clSize;
private VectorCollection<Vec> vc;
/**
* Creates a new HDBSCAN object using a threshold of 15 points to form a
* cluster.
*/
public HDBSCAN()
{
this(15);
}
/**
* Creates a new HDBSCAN using the simplified form, where the only parameter
* is a single value.
*
* @param m_pts the minimum number of points needed to form a cluster and
* the number of neighbors to consider
*/
public HDBSCAN(int m_pts)
{
this(new EuclideanDistance(), m_pts);
}
/**
* Creates a new HDBSCAN using the simplified form, where the only parameter
* is a single value.
*
* @param dm the distance metric to use for finding nearest neighbors
* @param m_pts the minimum number of points needed to form a cluster and
* the number of neighbors to consider
*/
public HDBSCAN(DistanceMetric dm, int m_pts)
{
this(dm, m_pts, m_pts, new DefaultVectorCollection<>());
}
/**
* Creates a new HDBSCAN using the simplified form, where the only parameter
* is a single value.
*
* @param dm the distance metric to use for finding nearest neighbors
* @param m_pts the minimum number of points needed to form a cluster and
* the number of neighbors to consider
* @param vcf the vector collection to use for accelerating nearest neighbor
* queries
*/
public HDBSCAN(DistanceMetric dm, int m_pts, VectorCollection<Vec> vcf)
{
this(dm, m_pts, m_pts, vcf);
}
/**
* Creates a new HDBSCAN using the full specification of the algorithm,
* where two parameters may be altered. In the simplified version both
* parameters always have the same value.
*
* @param dm the distance metric to use for finding nearest neighbors
* @param m_pts the number of neighbors to consider, acts as a smoothing
* over the density estimate
* @param m_clSize the minimum number of data points needed to form a
* cluster
* @param vc the vector collection to use for accelerating nearest neighbor
* queries
*/
public HDBSCAN(DistanceMetric dm, int m_pts, int m_clSize, VectorCollection<Vec> vc)
{
this.dm = dm;
this.m_pts = m_pts;
this.m_clSize = m_clSize;
this.vc = vc;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public HDBSCAN(HDBSCAN toCopy)
{
this.dm = dm.clone();
this.m_pts = toCopy.m_pts;
this.m_clSize = toCopy.m_clSize;
this.vc = toCopy.vc.clone();
}
/**
*
* @param m_clSize the minimum number of data points needed to form a
* cluster
*/
public void setMinClusterSize(int m_clSize)
{
this.m_clSize = m_clSize;
}
/**
*
* @return the minimum number of data points needed to form a
* cluster
*/
public int getMinClusterSize()
{
return m_clSize;
}
/**
* Sets the distance metric to use for determining closeness between data points
* @param dm the distance metric to determine nearest neighbors with
*/
public void setDistanceMetrics(DistanceMetric dm)
{
this.dm = dm;
}
/**
*
* @return the distance metric to determine nearest neighbors with
*/
public DistanceMetric getDistanceMetrics()
{
return dm;
}
/**
*
* @param m_pts the number of neighbors to consider, acts as a smoothing
* over the density estimate
*/
public void setMinPoints(int m_pts)
{
this.m_pts = m_pts;
}
/**
*
* @return the number of neighbors to consider, acts as a smoothing
* over the density estimate
*/
public int getMinPoints()
{
return m_pts;
}
@Override
public HDBSCAN clone()
{
return new HDBSCAN(this);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
@SuppressWarnings("unchecked")
final List<Vec> X = dataSet.getDataVectors();
final int N = X.size();
List<Double> cache = dm.getAccelerationCache(X, parallel);
VectorCollection<Vec> X_vc = vc.clone();
X_vc.build(parallel, X, dm);
//1. Compute the core distance w.r.t. m_pts for all data objects in X.
/*
* (Core Distance): The core distance of an object x_p ∈ X w.r.t. m_pts,
* d_core(x_p), is the distance from x_p to its m_pts-nearest neighbor (incl. x_p)
*/
List<List<? extends VecPaired<Vec, Double>>> allNearestNeighbors = VectorCollectionUtils.allNearestNeighbors(X_vc, X, m_pts, parallel);
double[] core = new double[N];
for(int i = 0; i < N; i++)
core[i] = allNearestNeighbors.get(i).get(m_pts-1).getPair();
//2. Compute an MST of G_{m_pts}, the Mutual Reachability Graph.
//prims algorithm from Wikipedia
double[] C = new double[N];
Arrays.fill(C, Double.MAX_VALUE);
int[] E = new int[N];
Arrays.fill(E, -1);//-1 "a special flag value indicating that there is no edge connecting v to earlier vertices"
FibHeap<Integer> Q = new FibHeap<>();
List<FibHeap.FibNode<Integer>> q_nodes = new ArrayList<>(N);
for(int i = 0; i < N; i++)
q_nodes.add(Q.insert(i, C[i]));
Set<Integer> F = new HashSet<>();
/**
* First 2 indicate the edges, 3d value is the weight
*/
List<Tuple3<Integer, Integer, Double>> mst_edges = new ArrayList<>(N*2);
while(Q.size() > 0)
{
//a. Find and remove a vertex v from Q having the minimum possible value of C[v]
FibHeap.FibNode<Integer> node = Q.removeMin();
int v = node.getValue();
q_nodes.set(v, null);
//b. Add v to F and, if E[v] is not the special flag value, also add E[v] to F
F.add(v);
if(E[v] >= 0)
mst_edges.add(new Tuple3<>(v, E[v], C[v]));
/*
* c. Loop over the edges vw connecting v to other vertices w. For
* each such edge, if w still belongs to Q and vw has smaller weight
* than C[w]:
* Set C[w] to the cost of edge vw
* Set E[w] to point to edge vw.
*/
for(int w = 0; w < N; w++)
{
FibHeap.FibNode<Integer> w_node = q_nodes.get(w);
if (w_node == null)//this node is already in F
continue;
double mutual_reach_dist_vw = max(core[v], max(core[w], dm.dist(v, w, X, cache)));
if (mutual_reach_dist_vw < C[w])
{
Q.decreaseKey(w_node, mutual_reach_dist_vw);
C[w] = mutual_reach_dist_vw;
E[w] = v;
}
}
}
//prim is done, we have the MST!
/*
* 3. Extend the MST to obtain MSText, by adding for each vertex a “self
* edge” with the core distance of the corresponding object as weight
*/
for(int i = 0; i < N; i++)
mst_edges.add(new Tuple3<>(i, i, core[i]));
//4. Extract the HDBSCAN hierarchy as a dendrogram from MSText:
List<UnionFind<Integer>> ufs = new ArrayList<>(N);
for(int i = 0; i < N; i++)
ufs.add(new UnionFind<>(i));
//sort edges from smallest weight to largest
PriorityQueue<Tuple3<Integer, Integer, Double>> edgeQ = new PriorityQueue<>(2*N,
(o1, o2) -> o1.getZ().compareTo(o2.getZ())
);
edgeQ.addAll(mst_edges);
//everyone starts in their own cluster!
List<List<Integer>> currentGroups = new ArrayList<>();
for(int i = 0; i < N; i++)
{
IntList il = new IntList(1);
il.add(i);
currentGroups.add(il);
}
int next_cluster_label = 0;
/**
* List of all the cluster options we have found
*/
List<List<Integer>> cluster_options = new ArrayList<>();
/**
* Stores a list for each cluster. Each value in the sub list is the
* weight at which that data point was added to the cluster
*/
List<DoubleList> entry_size = new ArrayList<>();
DoubleList birthSize = new DoubleList();
DoubleList deathSize = new DoubleList();
List<Pair<Integer, Integer>> children = new ArrayList<>();
int[] map_to_cluster_label = new int[N];
Arrays.fill(map_to_cluster_label, -1);
while(!edgeQ.isEmpty())
{
Tuple3<Integer, Integer, Double> edge = edgeQ.poll();
double weight = edge.getZ();
int from = edge.getX();
int to = edge.getY();
if(to == from)
continue;
UnionFind<Integer> union_from = ufs.get(from);
UnionFind<Integer> union_to = ufs.get(to);
int clust_A = union_from.find().getItem();
int clust_B = union_to.find().getItem();
UnionFind<Integer> clust_A_tmp = union_from.find();
UnionFind<Integer> clust_B_tmp = union_to.find();
union_from.union(union_to);
int a_size = currentGroups.get(clust_A).size();
int b_size = currentGroups.get(clust_B).size();
int new_size = a_size+b_size;
int mergedClust;
int otherClust;
if(union_from.find().getItem() == clust_A)
{
mergedClust = clust_A;
otherClust = clust_B;
}
else//other way around
{
mergedClust = clust_B;
otherClust = clust_A;
}
if(new_size >= m_clSize && a_size < m_clSize && b_size < m_clSize)
{//birth of a new cluster!
cluster_options.add(currentGroups.get(mergedClust));
DoubleList dl = new DoubleList(new_size);
for(int i = 0; i < new_size; i++)
dl.add(weight);
entry_size.add(dl);
children.add(null);//we have not children!
birthSize.add(weight);
deathSize.add(Double.MAX_VALUE);//we don't know yet
map_to_cluster_label[mergedClust] = next_cluster_label;
next_cluster_label++;
}
else if(new_size >= m_clSize && a_size >= m_clSize && b_size >= m_clSize)
{//birth of a new cluster from the death of two others!
//record the weight that the other two died at
deathSize.set(map_to_cluster_label[mergedClust], weight);
deathSize.set(map_to_cluster_label[otherClust], weight);
//replace with new object so that old references in cluster_options are not altered further
currentGroups.set(mergedClust, new IntList(currentGroups.get(mergedClust)));
cluster_options.add(currentGroups.get(mergedClust));
DoubleList dl = new DoubleList(new_size);
for(int i = 0; i < new_size; i++)
dl.add(weight);
entry_size.add(dl);
children.add(new Pair<>(map_to_cluster_label[mergedClust], map_to_cluster_label[otherClust]));
birthSize.add(weight);
deathSize.add(Double.MAX_VALUE);//we don't know yet
map_to_cluster_label[mergedClust] = next_cluster_label;
next_cluster_label++;
}
else if(new_size >= m_clSize)
{//existing cluster has grown in size, so add the points and record their weight for use later
//index may change, so book keeping update
if(map_to_cluster_label[mergedClust] == -1)//the people being added are the new owners
{
//set to avoid index out of boudns bellow
int c = map_to_cluster_label[mergedClust] = map_to_cluster_label[otherClust];
//make sure we keep track of the correct list
cluster_options.set(c, currentGroups.get(mergedClust));
map_to_cluster_label[otherClust] = -1;
}
for(int indx : currentGroups.get(otherClust))
try
{
entry_size.get(map_to_cluster_label[mergedClust]).add(weight);
}
catch(IndexOutOfBoundsException ex)
{
ex.printStackTrace();
throw new FailedToFitException(ex);
}
}
currentGroups.get(mergedClust).addAll(currentGroups.get(otherClust));
currentGroups.set(otherClust, null);
}
//Remove the last "cluster" because its the dumb one of everything
cluster_options.remove(cluster_options.size()-1);
entry_size.remove(entry_size.size()-1);
birthSize.remove(birthSize.size()-1);
deathSize.remove(deathSize.size()-1);
children.remove(children.size()-1);
/**
* See equation (3) in paper
*/
double[] S = new double[cluster_options.size()];
for(int c = 0; c < S.length; c++)
{
double lambda_min = birthSize.getD(c);
double lambda_max = deathSize.getD(c);
double s = 0;
for(double f_x : entry_size.get(c))
s += Math.min(f_x, lambda_max) - lambda_min;
S[c] = s;
}
boolean[] toKeep = new boolean[S.length];
double[] S_hat = new double[cluster_options.size()];
Arrays.fill(toKeep, true);
Queue<Integer> notKeeping = new ArrayDeque<>();
for(int i = 0; i < S.length; i++)
{
Pair<Integer, Integer> child = children.get(i);
if(child == null)//I'm a leaf!
{
//for all leaf nodes, set ˆS(C_h)= S(C_h)
S_hat[i] = S[i];
continue;
}
int il = child.getFirstItem();
int ir = child.getSecondItem();
//If S(C_i) < ˆS(C_il)+ ˆ S(C_ir ), set ˆS(C_i)= ˆS(C_il)+ ˆS(C_ir )and set δi =0.
if(S[i] < S_hat[il] + S_hat[ir])
{
S_hat[i] = S_hat[il] + S_hat[ir];
toKeep[i] = false;
}
else//Else: set ˆS(C_i)= S(C_i)and set δ(·) = 0 for all clusters in C_i’s subtrees.
{
S_hat[i] = S[i];
//place children in q to process and set all sub children as not keeping
notKeeping.add(il);
notKeeping.add(ir);
while(!notKeeping.isEmpty())
{
int c = notKeeping.poll();
toKeep[c] = false;
Pair<Integer, Integer> c_children = children.get(c);
if(c_children == null)
continue;
notKeeping.add(c_children.getFirstItem());
notKeeping.add(c_children.getSecondItem());
}
}
}
//initially fill with -1 indicating it was noise
Arrays.fill(designations, 0, N, -1);
int clusters = 0;
for(int c = 0; c < toKeep.length; c++)
if(toKeep[c])
{
for(int indx : cluster_options.get(c))
designations[indx] = clusters;
clusters++;
}
return designations;
}
}
| 18,802 | 35.229287 | 143 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/KClusterer.java |
package jsat.clustering;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import static jsat.clustering.ClustererBase.createClusterListFromAssignmentArray;
/**
* Defines a clustering method that requires the number of clusters in the data set to be known before hand.
*
* @author Edward Raff
*/
public interface KClusterer extends Clusterer
{
/**
* Performs clustering on the given data set.
*
* @param dataSet the data points to perform clustering on
* @param clusters the number of clusters to assume
* @param parallel a source of threads to run tasks
* @return the java.util.List<java.util.List<jsat.classifiers.DataPoint>>
*/
default public List<List<DataPoint>> cluster(DataSet dataSet, int clusters, boolean parallel)
{
int[] assignments = cluster(dataSet, clusters, parallel, (int[]) null);
return createClusterListFromAssignmentArray(assignments, dataSet);
}
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations);
/**
* Performs clustering on the given data set.
*
* @param dataSet the data points to perform clustering on
* @param clusters the number of clusters to assume
* @return A list of DataSets, where each DataSet contains the data
* points for one cluster in the group
*/
default public List<List<DataPoint>> cluster(DataSet dataSet, int clusters)
{
return cluster(dataSet, clusters, false);
}
default public int[] cluster(DataSet dataSet, int clusters, int[] designations)
{
return cluster(dataSet, clusters, false, designations);
}
/**
* Performs clustering on the given data set. The implementation will
* attempt to determine the best number of clusters for the given data.
*
* @param dataSet the data points to perform clustering on
* @param lowK the lower bound, inclusive, of the range to search
* @param highK the upper bound, inclusive, of the range to search
* @param parallel a source of threads to run tasks
* @return the java.util.List<java.util.List<jsat.classifiers.DataPoint>>
*/
default public List<List<DataPoint>> cluster(DataSet dataSet, int lowK, int highK, boolean parallel)
{
int[] assignments = cluster(dataSet, lowK, highK, parallel, (int[]) null);
return createClusterListFromAssignmentArray(assignments, dataSet);
}
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations);
/**
* Performs clustering on the given data set. The implementation will
* attempt to determine the best number of clusters for the given data.
*
* @param dataSet the data points to perform clustering on
* @param lowK the lower bound, inclusive, of the range to search
* @param highK the upper bound, inclusive, of the range to search
* @return A list of DataSets, where each DataSet contains the data
* points for one cluster in the group
*/
default public List<List<DataPoint>> cluster(DataSet dataSet, int lowK, int highK)
{
return cluster(dataSet, lowK, highK, false);
}
default public int[] cluster(DataSet dataSet, int lowK, int highK, int[] designations)
{
return cluster(dataSet, lowK, highK, false, designations);
}
@Override
public KClusterer clone();
}
| 3,525 | 37.326087 | 109 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/KClustererBase.java | package jsat.clustering;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
/**
* A base foundation that provides an implementation of the methods that return a list of lists for the clusterings using
* their int array counterparts.
* @author Edward Raff
*/
public abstract class KClustererBase extends ClustererBase implements KClusterer
{
private static final long serialVersionUID = 2542432122353325407L;
@Override
abstract public KClusterer clone();
}
| 556 | 24.318182 | 122 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/LSDBC.java | package jsat.clustering;
import java.util.*;
import jsat.DataSet;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.*;
import jsat.linear.vectorcollection.*;
import jsat.parameters.*;
import jsat.utils.*;
/**
* A parallel implementation of <i>Locally Scaled Density Based Clustering</i>.
* <br><br>
* See paper:<br>
* <a href="http://www.springerlink.com/index/0116171485446868.pdf">Biçici, E.,
*&Yuret, D. (2007). Locally scaled density based clustering.
* In B. Beliczynski, A. Dzielinski, M. Iwanowski,&B. Ribeiro (Eds.),
* Adaptive and Natural Computing Algorithms (pp. 739–748).
* Warsaw, Poland: Springer-Verlag. </a>
* @author Edward Raff
*/
public class LSDBC extends ClustererBase implements Parameterized
{
private static final long serialVersionUID = 6626217924334267681L;
/**
* {@value #DEFAULT_NEIGHBORS} is the default number of neighbors used when
* performing clustering
*
* @see #setNeighbors(int)
*/
public static final int DEFAULT_NEIGHBORS = 15;
/**
* {@value #DEFAULT_ALPHA} is the default scale value used when performing clustering.
* @see #setAlpha(double)
*/
public static final double DEFAULT_ALPHA = 4;
private static final int UNCLASSIFIED = -1;
private DistanceMetric dm;
private VectorCollection<VecPaired<Vec, Integer>> vc = new DefaultVectorCollection<>();
/**
* The weight parameter for forming new clusters
*/
private double alpha;
/**
* The number of neighbors to use
*/
private int k;
/**
* Creates a new LSDBC clustering object using the given distance metric
* @param dm the distance metric to use
* @param alpha the scale factor to use when forming clusters
* @param neighbors the number of neighbors to consider when determining clusters
*/
public LSDBC(DistanceMetric dm, double alpha, int neighbors)
{
setDistanceMetric(dm);
setAlpha(alpha);
setNeighbors(neighbors);
}
/**
* Creates a new LSDBC clustering object using the given distance metric
* @param dm the distance metric to use
* @param alpha the scale factor to use when forming clusters
*/
public LSDBC(DistanceMetric dm, double alpha)
{
this(dm, alpha, DEFAULT_NEIGHBORS);
}
/**
* Creates a new LSDBC clustering object using the given distance metric
* @param dm the distance metric to use
*/
public LSDBC(DistanceMetric dm)
{
this(dm, DEFAULT_ALPHA);
}
/**
* Creates a new LSDBC clustering object using the {@link EuclideanDistance}
* and default parameter values.
*/
public LSDBC()
{
this(new EuclideanDistance());
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public LSDBC(LSDBC toCopy)
{
this.alpha = toCopy.alpha;
this.dm = toCopy.dm.clone();
this.k = toCopy.k;
this.vc = toCopy.vc.clone();
}
/**
* Sets the vector collection factory used for acceleration of neighbor searches.
* @param vc the vector collection to use
*/
public void setVectorCollectionFactory(VectorCollection<VecPaired<Vec, Integer>> vc)
{
this.vc = vc;
}
/**
* Sets the distance metric used when performing clustering.
* @param dm the distance metric to use.
*/
public void setDistanceMetric(DistanceMetric dm)
{
if(dm != null)
this.dm = dm;
}
/**
* Returns the distance metric used when performing clustering.
* @return the distance metric used
*/
@SuppressWarnings("unused")
private DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* Sets the number of neighbors that will be considered when clustering
* data points
* @param neighbors the number of neighbors the algorithm will use
*/
public void setNeighbors(int neighbors)
{
if(neighbors <= 0)
throw new ArithmeticException("Can not use a non positive number of neighbors");
this.k = neighbors;
}
/**
* Returns the number of neighbors that will be considered when clustering
* data points
* @return the number of neighbors the algorithm will use
*/
public int getNeighbors()
{
return k;
}
/**
* Sets the scale value that will control how many points are added to a
* cluster. Smaller values will create more, smaller clusters - and more
* points will be labeled as noise. Larger values causes larger and fewer
* clusters.
*
* @param alpha the scale value to use
*/
public void setAlpha(double alpha)
{
if(alpha <= 0 || Double.isNaN(alpha) || Double.isInfinite(alpha))
throw new ArithmeticException("Can not use the non positive scale value " + alpha );
this.alpha = alpha;
}
/**
* Returns the scale value that will control how many points are added to a
* cluster. Smaller values will create more, smaller clusters - and more
* points will be labeled as noise. Larger values causes larger and fewer
* clusters.
*
* @return the scale value to use
*/
public double getAlpha()
{
return alpha;
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
//Compute all k-NN
List<List<? extends VecPaired<VecPaired<Vec, Integer>, Double>>> knnVecList;
//Set up
List<VecPaired<Vec, Integer>> vecs = new ArrayList<>(dataSet.size());
for (int i = 0; i < dataSet.size(); i++)
vecs.add(new VecPaired<>(dataSet.getDataPoint(i).getNumericalValues(), i));
TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel);
vc.build(parallel, vecs, dm);
knnVecList = VectorCollectionUtils.allNearestNeighbors(vc, vecs, k+1, parallel);
//Sort
IndexTable indexTable = new IndexTable(knnVecList, new Comparator()
{
@Override
public int compare(Object o1, Object o2)
{
List<VecPaired<VecPaired<Vec, Integer>, Double>> l1 =
(List<VecPaired<VecPaired<Vec, Integer>, Double>>) o1;
List<VecPaired<VecPaired<Vec, Integer>, Double>> l2 =
(List<VecPaired<VecPaired<Vec, Integer>, Double>>) o2;
return Double.compare(getEps(l1), getEps(l2));
}
});
//Assign clusters, does very little computation. No need to parallelize expandCluster
Arrays.fill(designations, UNCLASSIFIED);
int clusterID = 0;
for(int i = 0; i < indexTable.length(); i++)
{
int p = indexTable.index(i);
if(designations[p] == UNCLASSIFIED && localMax(p, knnVecList))
expandCluster(p, clusterID++, knnVecList, designations);
}
return designations;
}
/**
* Performs the main clustering loop of expandCluster.
* @param neighbors the list of neighbors
* @param i the index of <tt>neighbors</tt> to consider
* @param designations the array of cluster designations
* @param clusterID the current clusterID to assign
* @param seeds the stack to hold all seeds in
*/
private void addSeed(List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors, int i, int[] designations, int clusterID, Stack<Integer> seeds)
{
int index = neighbors.get(i).getVector().getPair();
if (designations[index] != UNCLASSIFIED)
return;
designations[index] = clusterID;
seeds.add(index);
}
/**
* Convenience method. Gets the eps value for the given set of neighbors
* @param neighbors the set of neighbors, with index 0 being the point itself
* @return the eps of the <tt>k</tt><sup>th</sup> neighbor
*/
private double getEps(List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors)
{
return neighbors.get(k).getPair();
}
/**
* Returns true if the given point is a local maxima, meaning it is more dense then the density of all its neighbors
* @param p the index of the data point in question
* @param knnVecList the neighbor list
* @return <tt>true</tt> if it is a local max, <tt> false</tt> otherwise.
*/
private boolean localMax(int p, List<List<? extends VecPaired<VecPaired<Vec, Integer>, Double>>> knnVecList)
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors = knnVecList.get(p);
double myEps = getEps(neighbors);
for(int i = 1; i < neighbors.size(); i++)
{
int neighborP = neighbors.get(i).getVector().getPair();
if(getEps(knnVecList.get(neighborP)) < myEps)
return false;
}
return true;
}
/**
* Does the cluster assignment
* @param p the current index of a data point to assign to a cluster
* @param clusterID the current cluster ID to assign
* @param knnVecList the in order list of every index and its nearest neighbors
* @param designations the array to store cluster designations in
*/
private void expandCluster(int p, int clusterID, List<List<? extends VecPaired<VecPaired<Vec, Integer>, Double>>> knnVecList, int[] designations)
{
designations[p] = clusterID;
double pointEps;
int n;
Stack<Integer> seeds = new Stack<>();
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors = knnVecList.get(p);
for (int i = 1; i < neighbors.size(); i++)
addSeed(neighbors, i, designations, clusterID, seeds);
pointEps = getEps(neighbors);
n = neighbors.get(k).length();
}
final double scale = Math.pow(2, alpha / n);
while (!seeds.isEmpty())
{
int currentP = seeds.pop();
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors = knnVecList.get(currentP);
double currentPEps = getEps(neighbors);
if (currentPEps <= scale * pointEps)
{
for (int i = 1; i < neighbors.size(); i++)
addSeed(neighbors, i, designations, clusterID, seeds);
}
}
}
@Override
public LSDBC clone()
{
return new LSDBC();
}
}
| 10,750 | 31.978528 | 158 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/MEDDIT.java | /*
* Copyright (C) 2018 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering;
import java.util.*;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.atomic.AtomicIntegerArray;
import java.util.concurrent.atomic.DoubleAdder;
import java.util.concurrent.atomic.LongAdder;
import jsat.DataSet;
import static jsat.clustering.SeedSelectionMethods.selectIntialPoints;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.math.OnLineStatistics;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.AtomicDoubleArray;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
*
* @author Edward Raff <[email protected]>
*/
public class MEDDIT extends PAM
{
private double tolerance = 0.01;
public MEDDIT(DistanceMetric dm, Random rand, SeedSelectionMethods.SeedSelection seedSelection)
{
super(dm, rand, seedSelection);
}
public MEDDIT(DistanceMetric dm, Random rand)
{
super(dm, rand);
}
public MEDDIT(DistanceMetric dm)
{
super(dm);
}
public MEDDIT()
{
super();
}
public void setTolerance(double tolerance)
{
this.tolerance = tolerance;
}
public double getTolerance()
{
return tolerance;
}
@Override
protected double cluster(DataSet data, boolean doInit, int[] medioids, int[] assignments, List<Double> cacheAccel, boolean parallel)
{
DoubleAdder totalDistance =new DoubleAdder();
LongAdder changes = new LongAdder();
Arrays.fill(assignments, -1);//-1, invalid category!
List<Vec> X = data.getDataVectors();
final List<Double> accel;
final int N = data.size();
if(doInit)
{
TrainableDistanceMetric.trainIfNeeded(dm, data);
accel = dm.getAccelerationCache(X);
selectIntialPoints(data, medioids, dm, accel, rand, seedSelection);
}
else
accel = cacheAccel;
double tol;
if(tolerance < 0)
tol = 1.0/data.size();
else
tol = tolerance;
int iter = 0;
do
{
changes.reset();
totalDistance.reset();
ParallelUtils.run(parallel, N, (start, end)->
{
for(int i = start; i < end; i++)
{
int assignment = 0;
double minDist = dm.dist(medioids[0], i, X, accel);
for (int k = 1; k < medioids.length; k++)
{
double dist = dm.dist(medioids[k], i, X, accel);
if (dist < minDist)
{
minDist = dist;
assignment = k;
}
}
//Update which cluster it is in
if (assignments[i] != assignment)
{
changes.increment();
assignments[i] = assignment;
}
totalDistance.add(minDist * minDist);
}
});
//Update the medoids
IntList owned_by_k = new IntList(N);
for(int k = 0; k < medioids.length; k++)
{
owned_by_k.clear();
for(int i = 0; i < N; i++)
if(assignments[i] == k)
owned_by_k.add(i);
if(owned_by_k.isEmpty())
continue;
medioids[k] = medoid(parallel, owned_by_k, tol, X, dm, accel);
}
}
while( changes.sum() > 0 && iter++ < iterLimit);
return totalDistance.sum();
}
/**
* Computes the medoid of the data
* @param parallel whether or not the computation should be done using multiple cores
* @param X the list of all data
* @param dm the distance metric to get the medoid with respect to
* @return the index of the point in <tt>X</tt> that is the medoid
*/
public static int medoid(boolean parallel, List<? extends Vec> X, DistanceMetric dm)
{
return medoid(parallel, X, 1.0/X.size(), dm);
}
/**
* Computes the medoid of the data
* @param parallel whether or not the computation should be done using multiple cores
* @param X the list of all data
* @param tol
* @param dm the distance metric to get the medoid with respect to
* @return the index of the point in <tt>X</tt> that is the medoid
*/
public static int medoid(boolean parallel, List<? extends Vec> X, double tol, DistanceMetric dm)
{
IntList order = new IntList(X.size());
ListUtils.addRange(order, 0, X.size(), 1);
List<Double> accel = dm.getAccelerationCache(X, parallel);
return medoid(parallel, order, tol, X, dm, accel);
}
/**
* Computes the medoid of a sub-set of data
* @param parallel whether or not the computation should be done using multiple cores
* @param indecies the indexes of the points to get the medoid of
* @param X the list of all data
* @param dm the distance metric to get the medoid with respect to
* @param accel the acceleration cache for the distance metric
* @return the index value contained within indecies that is the medoid
*/
public static int medoid(boolean parallel, Collection<Integer> indecies, List<? extends Vec> X, DistanceMetric dm, List<Double> accel)
{
return medoid(parallel, indecies, 1.0/indecies.size(), X, dm, accel);
}
/**
* Computes the medoid of a sub-set of data
* @param parallel whether or not the computation should be done using multiple cores
* @param indecies the indexes of the points to get the medoid of
* @param tol
* @param X the list of all data
* @param dm the distance metric to get the medoid with respect to
* @param accel the acceleration cache for the distance metric
* @return the index value contained within indecies that is the medoid
*/
public static int medoid(boolean parallel, Collection<Integer> indecies, double tol, List<? extends Vec> X, DistanceMetric dm, List<Double> accel)
{
final int N = indecies.size();
if(tol <= 0 || N < SystemInfo.LogicalCores)//Really just not enough points, lets simplify
return PAM.medoid(parallel, indecies, X, dm, accel);
final double log2d = Math.log(1)-Math.log(tol);
/**
* Online estimate of the standard deviation that will be used
*/
final OnLineStatistics distanceStats;
/**
* This array contains the current sum of all distance computations done
* for each index. Corresponds to mu in the paper.
*/
AtomicDoubleArray totalDistSum = new AtomicDoubleArray(N);
/**
* This array contains the current number of distance computations that
* have been done for each feature index. Corresponds to T_i in the
* paper.
*/
AtomicIntegerArray totalDistCount = new AtomicIntegerArray(N);
final int[] indx_map = indecies.stream().mapToInt(i->i).toArray();
final boolean symetric = dm.isSymmetric();
final double[] lower_bound_est = new double[N];
final double[] upper_bound_est = new double[N];
ThreadLocal<Random> localRand = ThreadLocal.withInitial(RandomUtil::getRandom);
//First pass, lets pull every "arm" (compute a dsitance) for each datumn at least once, so that we have estiamtes to work with.
distanceStats = ParallelUtils.run(parallel, N, (start, end)->
{
Random rand = localRand.get();
OnLineStatistics localStats = new OnLineStatistics();
for(int i = start; i < end; i++)
{
int j = rand.nextInt(N);
while(j == i)
j = rand.nextInt(N);
double d_ij = dm.dist(indx_map[i], indx_map[j], X, accel);
localStats.add(d_ij);
totalDistSum.addAndGet(i, d_ij);
totalDistCount.incrementAndGet(i);
if(symetric)
{
totalDistSum.addAndGet(j, d_ij);
totalDistCount.incrementAndGet(j);
}
}
return localStats;
}, (a,b)-> OnLineStatistics.add(a, b));
//Now lets prepare the lower and upper bound estimates
ConcurrentSkipListSet<Integer> lowerQ = new ConcurrentSkipListSet<>((Integer o1, Integer o2) ->
{
int cmp = Double.compare(lower_bound_est[o1], lower_bound_est[o2]);
if(cmp == 0)//same bounds, but sort by identity to avoid issues
cmp = o1.compareTo(o2);
return cmp;
});
ConcurrentSkipListSet<Integer> upperQ = new ConcurrentSkipListSet<>((Integer o1, Integer o2) ->
{
int cmp = Double.compare(upper_bound_est[o1], upper_bound_est[o2]);
if(cmp == 0)//same bounds, but sort by identity to avoid issues
cmp = o1.compareTo(o2);
return cmp;
});
ParallelUtils.run(parallel, N, (start, end)->
{
double v = distanceStats.getVarance();
for(int i = start; i < end; i++)
{
int T_i = totalDistCount.get(i);
double c_i = Math.sqrt(2*v*log2d/T_i);
lower_bound_est[i] = totalDistSum.get(i)/T_i - c_i;
upper_bound_est[i] = totalDistSum.get(i)/T_i + c_i;
lowerQ.add(i);
upperQ.add(i);
}
});
//Now lets start sampling!
//how many points should we pick and sample? Not really discussed in paper- but a good idea for efficency (dont want to pay that Q cost as much as possible)
/**
* to-pull is how many arms we will select per iteration
*/
int num_to_pull;
/**
* to sample is how many random pairs we will pick for each pulled arm
*/
int samples;
if(parallel)
{
num_to_pull = Math.max(SystemInfo.LogicalCores, 32);
samples = Math.min(32, N-1);
}
else
{
num_to_pull = Math.min(32, N);
samples = Math.min(32, N-1);
}
/**
* The levers we will pull this iteration, and then add back in
*/
IntList to_pull = new IntList();
/**
* the levers we must add back in but not update b/c they hit max evaluations and the confidence bound is tight
*/
IntList toAddBack = new IntList();
boolean[] isExact = new boolean[N];
Arrays.fill(isExact, false);
int numExact = 0;
while(numExact < N)//loop should break out before this ever happens
{
to_pull.clear();
toAddBack.clear();
//CONVERGENCE CEHCK
if(upper_bound_est[upperQ.first()] < lower_bound_est[lowerQ.first()])
{
//WE are done!
return indx_map[upperQ.first()];
}
while(to_pull.size() < num_to_pull)
{
if(lowerQ.isEmpty())
break;//we've basically evaluated everyone
int i = lowerQ.pollFirst();
if(totalDistCount.get(i) >= N-1 && !isExact[i])//Lets just replace with exact value
{
double avg_d_i = ParallelUtils.run(parallel, N, (start, end)->
{
double d = 0;
for (int j = start; j < end; j++)
if (i != j)
d += dm.dist(indx_map[i], indx_map[j], X, accel);
return d;
}, (a, b)->a+b);
avg_d_i /= N-1;
upperQ.remove(i);
lower_bound_est[i] = upper_bound_est[i] = avg_d_i;
totalDistSum.set(i, avg_d_i);
totalDistCount.set(i, N);
isExact[i] = true;
numExact++;
// System.out.println("Num Exact: " + numExact);
//OK, exavt value for datumn I is set.
toAddBack.add(i);
}
if(!isExact[i])
to_pull.add(i);
}
//OK, lets now pull a bunch of levers / measure distances
OnLineStatistics changeInStats = ParallelUtils.run(parallel, to_pull.size(), (start, end)->
{
Random rand = localRand.get();
OnLineStatistics localStats = new OnLineStatistics();
for(int i_count = start; i_count < end; i_count++)
{
int i = to_pull.get(i_count);
for(int j_count = 0; j_count < samples; j_count++)
{
int j = rand.nextInt(N);
while(j == i)
j = rand.nextInt(N);
double d_ij = dm.dist(indx_map[i], indx_map[j], X, accel);
localStats.add(d_ij);
totalDistSum.addAndGet(i, d_ij);
totalDistCount.incrementAndGet(i);
if(symetric && !isExact[j])
{
totalDistSum.addAndGet(j, d_ij);
totalDistCount.incrementAndGet(j);
}
}
}
return localStats;
}, (a,b) -> OnLineStatistics.add(a, b));
if(!to_pull.isEmpty())//might be empty if everyone went over the threshold
distanceStats.add(changeInStats);
//update bounds and re-insert
double v = distanceStats.getVarance();
//we are only updating the bounds on the levers we pulled
//that may mean some old bounds are stale
//these values are exact
lowerQ.addAll(toAddBack);
upperQ.addAll(toAddBack);
upperQ.removeAll(to_pull);
for(int i : to_pull)
{
int T_i = totalDistCount.get(i);
double c_i = Math.sqrt(2*v*log2d/T_i);
lower_bound_est[i] = totalDistSum.get(i)/T_i - c_i;
upper_bound_est[i] = totalDistSum.get(i)/T_i + c_i;
lowerQ.add(i);
upperQ.add(i);
}
}
//We can reach this point on small N or low D datasets. Iterate and return the correct value
int bestIndex = 0;
for(int i = 1; i < N; i++)
if(lower_bound_est[i] < lower_bound_est[bestIndex])
bestIndex = i;
return bestIndex;
}
}
| 16,297 | 35.542601 | 164 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/MeanShift.java | package jsat.clustering;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.distributions.empirical.kernelfunc.GaussKF;
import jsat.distributions.empirical.kernelfunc.KernelFunction;
import jsat.distributions.multivariate.MetricKDE;
import jsat.distributions.multivariate.MultivariateKDE;
import jsat.distributions.multivariate.ProductKDE;
import jsat.exceptions.FailedToFitException;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import static jsat.utils.SystemInfo.LogicalCores;
import jsat.utils.concurrent.ParallelUtils;
/**
* The MeanShift algorithms performs clustering on a data set by letting the
* data speak for itself and performing a mode search amongst the data set,
* returning a cluster for each discovered mode. <br>
* <br>
* While not normally discussed in the context of Mean Shift, this
* implementation has rudimentary outlier-removal, outliers will not be included
* in the clustering. <br>
* <br>
* The mean shift requires a {@link MultivariateKDE} to run. Contrary to use in
* density estimation, where the {@link KernelFunction} used has only a minor
* impact on the results, it is highly recommended you use the {@link GaussKF}
* for the MeanShift method. This is because of the large support and better
* behaved derivative, which adds in the avoidance of oscillating convergence.
* <br><br>
* Implementation Note: This implementation does not snap the values to a grid.
* This causes the prior noted oscillation in convergence.
*
* @author Edward Raff
*/
public class MeanShift implements Clusterer
{
private static final long serialVersionUID = 4061491342362690455L;
/**
* The default number of {@link #getMaxIterations() } is
* {@value #DefaultMaxIterations}
*/
public static final int DefaultMaxIterations = 1000;
/**
* The default value of {@link #getScaleBandwidthFactor() } is {@value #DefaultScaleBandwidthFactor}
*/
public static final double DefaultScaleBandwidthFactor = 1.0;
private MultivariateKDE mkde;
private int maxIterations = DefaultMaxIterations;
private double scaleBandwidthFactor = DefaultScaleBandwidthFactor;
/**
* Creates a new MeanShift clustering object using a {@link MetricKDE},
* the {@link GaussKF}, and the {@link EuclideanDistance}.
*/
public MeanShift()
{
this(new EuclideanDistance());
}
/**
* Creates a new MeanShift clustering object using a {@link MetricKDE} and
* the {@link GaussKF}.
* @param dm the distance metric to use
*/
public MeanShift(DistanceMetric dm)
{
this(new MetricKDE(GaussKF.getInstance(), dm));
}
/**
* Creates a new MeanShift clustering object.
* <br>
* NOTE: {@link ProductKDE} does not currently support the functions needed to work with MeanShift.
* @param mkde the KDE to use in the clustering process.
*/
public MeanShift(MultivariateKDE mkde)
{
this.mkde = mkde;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public MeanShift(MeanShift toCopy)
{
this.mkde = toCopy.mkde.clone();
this.maxIterations = toCopy.maxIterations;
this.scaleBandwidthFactor = toCopy.scaleBandwidthFactor;
}
/**
* Sets the maximum number of iterations the algorithm will go through,
* terminating early if convergence has not occurred.
* @param maxIterations the maximum number of iterations
* @throws ArithmeticException if a value less than 1 is given
*/
public void setMaxIterations(int maxIterations)
{
if(maxIterations <= 0)
throw new ArithmeticException("Invalid iteration count, " + maxIterations);
this.maxIterations = maxIterations;
}
/**
* Returns the maximum number of iterations the algorithm will go through,
* terminating early if convergence has not occurred.
* @return the maximum number of iterations
*/
public int getMaxIterations()
{
return maxIterations;
}
/**
* Sets the value by which the bandwidth of the {@link MultivariateKDE} will
* be scaled by.
* @param scaleBandwidthFactor the value to scale bandwidth by
* @throws ArithmeticException if the value given is {@link Double#NaN NaN }
* or {@link Double#POSITIVE_INFINITY infinity}
*/
public void setScaleBandwidthFactor(double scaleBandwidthFactor)
{
if(Double.isNaN(scaleBandwidthFactor) || Double.isInfinite(scaleBandwidthFactor))
throw new ArithmeticException("Invalid scale factor, " + scaleBandwidthFactor);
this.scaleBandwidthFactor = scaleBandwidthFactor;
}
/**
* Returns the value by which the bandwidth of the {@link MultivariateKDE} will
* be scaled by.
* @return the value to scale bandwidth by
*/
public double getScaleBandwidthFactor()
{
return scaleBandwidthFactor;
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
try
{
if(designations == null || designations.length < dataSet.size())
designations = new int[dataSet.size()];
boolean[] converged = new boolean[dataSet.size()];
Arrays.fill(converged, false);
final KernelFunction k = mkde.getKernelFunction();
mkde.setUsingData(dataSet, parallel);
mkde.scaleBandwidth(scaleBandwidthFactor);
Vec[] xit = new Vec[converged.length];
for(int i = 0; i < xit.length; i++)
xit[i] = dataSet.getDataPoint(i).getNumericalValues().clone();
mainLoop(converged, xit, designations, k, parallel);
assignmentStep(converged, xit, designations);
return designations;
}
catch (InterruptedException ex)
{
Logger.getLogger(MeanShift.class.getName()).log(Level.SEVERE, null, ex);
throw new FailedToFitException(ex);
}
catch (BrokenBarrierException ex)
{
Logger.getLogger(MeanShift.class.getName()).log(Level.SEVERE, null, ex);
throw new FailedToFitException(ex);
}
}
private void assignmentStep(boolean[] converged, Vec[] xit, int[] designations)
{
//We now repurpose the 'converged' array to indicate if the point has not yet been asigned to a cluster
//Loop through and asign clusters
int curClusterID = 0;
boolean progress = true;
while(progress)
{
progress = false;
int basePos = 0;//This will be the mode of our cluster
while(basePos < converged.length && !converged[basePos])
basePos++;
for(int i = basePos; i < converged.length; i++)
{
if(!converged[i] || designations[i] == -1)
continue;//Already assigned
progress = true;
if(Math.abs(xit[basePos].pNormDist(2, xit[i])) < 1e-3)
{
converged[i] = false;
designations[i] = curClusterID;
}
}
curClusterID++;
}
}
private void mainLoop(final boolean[] converged, final Vec[] xit, final int[] designations, final KernelFunction k, boolean parallel) throws InterruptedException, BrokenBarrierException
{
AtomicBoolean progress = new AtomicBoolean(true);
int count = 0;
/*
* +1 b/c we have to wait for the worker threads, but we also want this
* calling thread to wait with them. Hence, +1
*/
CyclicBarrier barrier = new CyclicBarrier(LogicalCores+1);
final ThreadLocal<Vec> localScratch = ThreadLocal.withInitial(()->new DenseVector(xit[0].length()));
while(progress.get() && count++ < maxIterations)
{
progress.set(false);
ParallelUtils.run(parallel, converged.length, (i)->
{
if(converged[i])
return;
progress.lazySet(true);
convergenceStep(xit, i, converged, designations, localScratch.get(), k);
});
}
//Fill b/c we may have bailed out due to maxIterations
Arrays.fill(converged, true);
}
/**
* Computes the meanShift of the point at the given index, and then updates the vector array to indicate the movement towards the mode for the data point.
* @param xit the array of the current data point's positions
* @param i the index of the data point being considered
* @param converged the array used to indicate the convergence to a mode
* @param designations the array to store value designations in
* @param scratch the vector to compute work with
* @param k the kernel function to use
*/
private void convergenceStep(final Vec[] xit, int i, final boolean[] converged, final int[] designations, final Vec scratch, final KernelFunction k)
{
double denom = 0.0;
Vec xCur = xit[i];
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> contrib = mkde.getNearbyRaw(xCur);
if(contrib.size() == 1)
{
//If a point has no neighbors, it can not shift, and is its own mdoe - so we mark it noise
converged[i] = true;
designations[i] = -1;
}
else
{
scratch.zeroOut();
for(VecPaired<VecPaired<Vec, Integer>, Double> v : contrib)
{
double g = - k.kPrime(v.getPair());
denom += g;
scratch.mutableAdd(g, v);
}
scratch.mutableDivide(denom);
if( Math.abs(scratch.pNormDist(2, xCur)) < 1e-5)
converged[i] = true;
scratch.copyTo(xCur);
}
}
@Override
public MeanShift clone()
{
return new MeanShift(this);
}
}
| 10,478 | 35.010309 | 189 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/OPTICS.java | package jsat.clustering;
import java.util.*;
import jsat.DataSet;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.vectorcollection.*;
import jsat.math.OnLineStatistics;
import jsat.parameters.*;
import jsat.utils.IntList;
import jsat.utils.IntSet;
/**
* An Implementation of the OPTICS algorithm, which is a generalization of
* {@link DBSCAN}. OPTICS creates an ordering of the ports, and then clusters
* can be extracted from this ordering in numerous different ways.
* <br>
* NOTE: The original clustering method proposed in the paper is fairly
* complicated, and its implementation is not yet complete. Though it does
* perform some amount of clustering, it may not return the expected results.
* <br><br>
* See original paper<br>
* Ankerst, M., Breunig, M., Kriegel, H.-P.,&Sander, J. (1999).
* <a href="http://dl.acm.org/citation.cfm?id=304187"><i>OPTICS: ordering points
* to identify the clustering structure</i></a>. Proceedings of the
* 1999 ACM SIGMOD international conference on Management of data
* (Vol. 28, pp. 49–60). Philadelphia, Pennsylvania: ACM.
*
* @author Edward Raff
*/
public class OPTICS extends ClustererBase implements Parameterized
{
private static final long serialVersionUID = -1093772096278544211L;
private static final int NOISE = -1;
private static double UNDEFINED = Double.POSITIVE_INFINITY;
/**
* The default value for xi is {@value #DEFAULT_XI}
*/
public static final double DEFAULT_XI = 0.005;
/**
* The default number of points to consider is {@value #DEFAULT_MIN_POINTS}.
*/
public static final int DEFAULT_MIN_POINTS = 10;
/**
* The default method used to extract clusters in OPTICS
*/
public static final ExtractionMethod DEFAULT_EXTRACTION_METHOD = ExtractionMethod.THRESHHOLD_FIXUP;
private DistanceMetric dm;
private VectorCollection<VecPaired<Vec, Integer>> vc = new DefaultVectorCollection<>();
private double radius = 1;
private int minPts;
private double[] core_distance;
/**
* Stores the reachability distance of each point in the order they were
* first observed in the data set. After clustering is finished, it is
* altered to be in the reachability order used in clustering
*/
private double[] reach_d;
/**
* Whether or not the given data point has been processed
*/
private boolean[] processed;
private Vec[] allVecs;
private double xi;
//XXX useless assignment
private double one_min_xi;// = 1.0-xi;
private ExtractionMethod extractionMethod = DEFAULT_EXTRACTION_METHOD;
/**
* The objects contained in OrderSeeds are sorted by their
* reachability-distance to the closest core object from which they have
* been directly density reachable.
*
* The paired double is their distance, the paired integer the the vector's
* index in the data set
*
* This is only used during building. We should probably refactor this out
*/
private PriorityQueue<Integer> orderdSeeds;
@Override
public OPTICS clone()
{
return new OPTICS(this);
}
/**
* Enum to indicate which method of extracting clusters should be used on
* the reachability plot.
*
*/
public enum ExtractionMethod
{
/**
* Uses the original clustering method proposed in the OPTICS paper.<br>
* NOTE: Implementation not yet complete
*/
XI_STEEP_ORIGINAL,
/**
* Forms clusters in the reachability plot by drawing a line across it,
* and using the separations to mark clusters
*/
THRESHHOLD,
/**
* Forms clusters in the reachability plot by drawing a line across it,
* and using the separations to mark clusters. It then de-noises points
* by checking their nearest neighbors for consensus
*/
THRESHHOLD_FIXUP
}
/**
* Creates a new OPTICS cluster object. Because the radius of OPTICS is not
* sensitive, it is estimated from the data and set to a sufficiently large
* value. The {@link EuclideanDistance} will be used as the metric.
*/
public OPTICS()
{
this(DEFAULT_MIN_POINTS);
}
/**
* Creates a new OPTICS cluster object. Because the radius of OPTICS is not
* sensitive, it is estimated from the data and set to a sufficiently large
* value. The {@link EuclideanDistance} will be used as the metric.
*
* @param minPts the minimum number of points for reachability
*/
public OPTICS(int minPts)
{
this(new EuclideanDistance(), minPts);
}
/**
* Creates a new OPTICS cluster object. Because the radius of OPTICS is not
* sensitive, it is estimated from the data and set to a sufficiently large
* value.
*
* @param dm the distance metric to use
* @param minPts the minimum number of points for reachability
*/
public OPTICS(DistanceMetric dm, int minPts)
{
this(dm, minPts, DEFAULT_XI);
}
/**
* Creates a new OPTICS cluster object. Because the radius of OPTICS is not
* sensitive, it is estimated from the data and set to a sufficiently large
* value.
*
* @param dm the distance metric to use
* @param minPts the minimum number of points for reachability
* @param xi the xi value
*/
public OPTICS(DistanceMetric dm, int minPts, double xi)
{
setDistanceMetric(dm);
setMinPts(minPts);
setXi(xi);
}
public OPTICS(OPTICS toCopy)
{
this.dm = toCopy.dm.clone();
this.vc = toCopy.vc.clone();
this.minPts = toCopy.minPts;
if(toCopy.core_distance != null )
this.core_distance = Arrays.copyOf(toCopy.core_distance, toCopy.core_distance.length);
if(toCopy.reach_d != null )
this.reach_d = Arrays.copyOf(toCopy.reach_d, toCopy.reach_d.length);
if(toCopy.processed != null )
this.processed = Arrays.copyOf(toCopy.processed, toCopy.processed.length);
if(toCopy.allVecs != null )
{
this.allVecs = new Vec[toCopy.allVecs.length];
for(int i = 0; i < toCopy.allVecs.length; i++)
this.allVecs[i] = toCopy.allVecs[i].clone();
}
this.xi = toCopy.xi;
this.orderdSeeds = toCopy.orderdSeeds;
this.radius = toCopy.radius;
}
/**
* Sets the distance metric used to compute distances in the algorithm.
*
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Returns the distance metric used to compute distances in the algorithm.
* @return the distance metric used
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* Sets the xi value used in {@link ExtractionMethod#XI_STEEP_ORIGINAL} to
* produce cluster results.
*
* @param xi the value in the range (0, 1)
* @throws ArithmeticException if the value is not in the appropriate range
*/
public void setXi(double xi)
{
if(xi <= 0 || xi >= 1 || Double.isNaN(xi))
throw new ArithmeticException("xi must be in the range (0, 1) not " + xi);
this.xi = xi;
this.one_min_xi = 1.0 - xi;
}
/**
* Returns the xi value used in {@link ExtractionMethod#XI_STEEP_ORIGINAL} to
* produce cluster results.
*
* @return the xi value used
*/
public double getXi()
{
return xi;
}
/**
* Sets the method used to extract clusters from the reachability plot.
*
* @param extractionMethod the clustering method
*/
public void setExtractionMethod(ExtractionMethod extractionMethod)
{
this.extractionMethod = extractionMethod;
}
/**
* Returns the method used to extract clusters from the reachability plot.
*
* @return the clustering method used
*/
public ExtractionMethod getExtractionMethod()
{
return extractionMethod;
}
/**
* Sets the minimum number of points needed to compute the core distance.
* Higher values tend to smooth out the reachability plot.
*
* @param minPts the number of points to compute reachability and core distance
*/
public void setMinPts(int minPts)
{
this.minPts = minPts;
}
/**
* Sets the minimum number of points needed to compute the core distance.
*
* @return the number of points to compute reachability and core distance
*/
public int getMinPts()
{
return minPts;
}
/**
* Sets the {@link VectorCollection} used to produce acceleration
* structures for the OPTICS computation.
*
* @param vc the vector collection to use
*/
public void setVCF(VectorCollection<VecPaired<Vec, Integer>> vc)
{
this.vc = vc;
}
private int threshHoldFixExtractCluster(List<Integer> orderedFile, int[] designations)
{
int clustersFound = threshHoldExtractCluster(orderedFile, designations);
for(int i = 0; i < orderedFile.size(); i++)
{
if(designations[i] != NOISE)
continue;
//Check if all the neighbors have a consensus on the cluster class (ignoring noise)
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors = vc.search(allVecs[i], minPts/2+1);
int CLASS = -1;//-1 for not set, -2 for conflic
for(VecPaired<VecPaired<Vec, Integer>, Double> v : neighbors)
{
int subC = designations[v.getVector().getPair()];
if(subC == NOISE)//ignore
continue;
else if(CLASS == -1)//First class set
CLASS = subC;
else if (CLASS != subC)//Conflict
CLASS = -2;//No consensus, we wont change the noise label
}
if(CLASS != -2)
designations[i]= CLASS;
}
return clustersFound;
}
/**
* Finds clusters by segmenting the reachability plot witha line that is the mean reachability distance times
* @param orderedFile the ordering of the data points
* @param designations the storage array for their cluster assignment
* @return the number of clusters found
*/
private int threshHoldExtractCluster(List<Integer> orderedFile, int[] designations)
{
int clustersFound = 0;
OnLineStatistics stats = new OnLineStatistics();
for(double r : reach_d)
if(!Double.isInfinite(r))
stats.add(r);
double thresh = stats.getMean()+stats.getStandardDeviation();
for(int i = 0; i < orderedFile.size(); i++)
{
if(reach_d[orderedFile.get(i)] >= thresh)
continue;
//Everything in between is part of the cluster
while(i < orderedFile.size() && reach_d[orderedFile.get(i)] < thresh)
designations[i++] = clustersFound;
//Climb up to the top of the hill, everything we climbed over is part of the cluster
while(i+1 < orderedFile.size() && reach_d[orderedFile.get(i)] < reach_d[orderedFile.get(i+1)])
designations[i++] = clustersFound;
clustersFound++;
}
return clustersFound;
}
/**
* Extracts clusters using the original xi steep algorithm from the OPTICS paper
* @param n original number of data points
* @param orderedFile the correct order of the points in the reachability plot
* @param designations the array to store the final class designations
* @return the number of clusters found
*/
private int xiSteepClusterExtract(final int n, List<Integer> orderedFile, int[] designations)
{
///Now obtain clustering
///Extract CLustering
int clustersFound = 0;
Set<Integer> sdaSet = new IntSet();
int orderIndex = 0;
double mib = 0;
double[] mibVals = new double[n];
List<OPTICSCluster> clusters = new ArrayList<OPTICSCluster>();
List<Integer> allSteepUp = new IntList();
List<Integer> allSDA = new IntList();
/*
* Ugly else if to increment orderIndex counter and avoid geting stuck
* in infinite loops.
* Can I write that a better way?
*/
while(orderIndex < orderedFile.size()-1)
{
int curIndex = orderedFile.get(orderIndex);
mib = Math.max(mib, reach_d[curIndex]);
if(orderIndex +1 < orderedFile.size())
{
int nextIndex = orderedFile.get(orderIndex+1);
if(!downPoint(curIndex, nextIndex))//IF(start of a steep down area D at index)
{
filterSDASet(sdaSet, mib, mibVals, orderedFile);
sdaSet.add(orderIndex);
allSDA.add(orderIndex);
while(orderIndex+1 < orderedFile.size())
{
orderIndex++;
curIndex = nextIndex;
if(orderIndex+1 >= orderedFile.size())
break;
nextIndex = orderedFile.get(orderIndex+1);
if(downPoint(curIndex, nextIndex))
break;
}
mib = reach_d[curIndex];
}
else if(!upPoint(curIndex, nextIndex))//ELSE IF(start of steep up area U at index)
{
filterSDASet(sdaSet, mib, mibVals, orderedFile);
if(!sdaSet.isEmpty())
allSteepUp.add(orderIndex);
while(orderIndex+1 < orderedFile.size())
{
orderIndex++;
curIndex = nextIndex;
if(orderIndex+1 >= orderedFile.size())
break;
nextIndex = orderedFile.get(orderIndex+1);
if(upPoint(curIndex, nextIndex))
break;
}
mib = reach_d[curIndex];
for(Iterator<Integer> iter = sdaSet.iterator(); iter.hasNext(); )
{
int sdaOrdered = iter.next();
int sdaIndx = orderedFile.get(sdaOrdered);
if(!(orderIndex-sdaOrdered >= minPts))//Fail 3a
continue;
else if(mib * one_min_xi < mibVals[sdaIndx])
{
continue;
}
if(sdaOrdered > orderIndex)
continue;
OPTICSCluster newClust = new OPTICSCluster(sdaOrdered, orderIndex+1);
OPTICSCluster tmp;
for(Iterator<OPTICSCluster> clustIter = clusters.iterator(); clustIter.hasNext();)
{
if(newClust.contains((tmp = clustIter.next())))
{
clustIter.remove();
newClust.subClusters.add(tmp);
}
}
clusters.add(newClust);
}
}
else
orderIndex++;
}
else
orderIndex++;
}
for(OPTICSCluster oc : clusters)
{
for(int i : orderedFile.subList(oc.start, oc.end))
if(designations[i] < 0)
designations[i] = clustersFound;
clustersFound++;
}
return clustersFound;
}
/**
* Private class for keeping track of heiarchies of clusters
*/
private class OPTICSCluster
{
int start, end;
List<OPTICSCluster> subClusters;
public OPTICSCluster(int start, int end)
{
this.start = start;
this.end = end;
this.subClusters = new ArrayList<OPTICSCluster>(5);
}
/**
*
* @param other
* @return
*/
public boolean contains(OPTICSCluster other)
{
return this.start <= other.start && other.end <= this.end;
}
@Override
public String toString()
{
return "{" + start + "," + end + "}";
}
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
if(dataSet.getNumNumericalVars() < 1)
throw new ClusterFailureException("OPTICS requires numeric features, and non are present.");
final int n = dataSet.size();
if(designations == null)
designations = new int[n];
Arrays.fill(designations, NOISE);
orderdSeeds = new PriorityQueue<>(n, (Integer o1, Integer o2) -> Double.compare(reach_d[o1], reach_d[o2]));
core_distance = new double[n];
reach_d = new double[n];
Arrays.fill(reach_d, UNDEFINED);
processed = new boolean[n];
allVecs = new Vec[n];
List<VecPaired<Vec, Integer>> pairedVecs = new ArrayList<>(n);
for(int i = 0; i < allVecs.length; i++)
{
allVecs[i] = dataSet.getDataPoint(i).getNumericalValues();
pairedVecs.add(new VecPaired<>(allVecs[i], i));
}
vc.build(false, pairedVecs, dm);
//Estimate radius value
OnLineStatistics stats = VectorCollectionUtils.getKthNeighborStats(vc, allVecs, minPts+1);
radius = stats.getMean() + stats.getStandardDeviation() * 3;
List<Integer> orderedFile = new IntList(n);
//Main clustering loop
for(int i = 0; i < dataSet.size(); i++)
{
if(processed[i])
continue;
Vec vec = dataSet.getDataPoint(i).getNumericalValues();
expandClusterOrder(i, vec, orderedFile);
}
int clustersFound;
if(extractionMethod == ExtractionMethod.THRESHHOLD)
clustersFound = threshHoldExtractCluster(orderedFile, designations);
else if(extractionMethod == ExtractionMethod.THRESHHOLD_FIXUP)
clustersFound = threshHoldFixExtractCluster(orderedFile, designations);
else if(extractionMethod == ExtractionMethod.XI_STEEP_ORIGINAL)
clustersFound = xiSteepClusterExtract(n, orderedFile, designations);
//Sort reachability values
double[] newReach = new double[reach_d.length];
Arrays.fill(newReach, Double.POSITIVE_INFINITY);
for(int i = 0; i < orderedFile.size(); i++)
newReach[i] = reach_d[orderedFile.get(i)];
reach_d = newReach;
return designations;
}
private void filterSDASet(Set<Integer> sdaSet, double mib, double[] mibVals, List<Integer> orderedFile)
{
for(Iterator<Integer> iter = sdaSet.iterator(); iter.hasNext(); )
{
int sdaIndx = orderedFile.get(iter.next());
if(reach_d[sdaIndx]*one_min_xi <= mib)
iter.remove();
else
mibVals[sdaIndx] = Math.max(mib, mibVals[sdaIndx]);//TODO mibFill?
}
}
private boolean upPoint(int index1, int index2)
{
return reach_d[index1] <= reach_d[index2]*one_min_xi;
}
private boolean downPoint(int index1, int index2)
{
return reach_d[index1]*one_min_xi <= reach_d[index2];
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, designations);
}
private void expandClusterOrder(int curIndex, Vec vec, List<Integer> orderedFile)
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors = vc.search(vec, radius);
VecPaired<Vec, Integer> object = new VecPaired<>(vec, curIndex);
reach_d[curIndex] = UNDEFINED;//NaN used for undefined
processed[curIndex] = true;
setCoreDistance(neighbors, curIndex);
orderedFile.add(curIndex);
if(!Double.isInfinite(core_distance[curIndex]))
{
orderedSeedsUpdate(neighbors, curIndex);
while(!orderdSeeds.isEmpty())
{
int curObjectIndex = orderdSeeds.poll();
neighbors = vc.search(allVecs[curObjectIndex], radius);
processed[curObjectIndex] = true;
setCoreDistance(neighbors, curObjectIndex);
orderedFile.add(curObjectIndex);
if(!Double.isInfinite(core_distance[curObjectIndex]))
orderedSeedsUpdate(neighbors, curObjectIndex);
}
}
}
private void setCoreDistance(List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors, int curIndex)
{
if(neighbors.size() < minPts+1)//+1 b/c we dont count oursleves, which will get returned
core_distance[curIndex] = UNDEFINED;
else//0 is us, 1 is the nearest neighbor
core_distance[curIndex] = neighbors.get(minPts).getPair();
}
private void orderedSeedsUpdate(List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors, int centerObjectIndex)
{
double c_dist = core_distance[centerObjectIndex];
for(int i = 1; i < neighbors.size(); i++)//'0' index is a self reference, skip it
{
VecPaired<VecPaired<Vec, Integer>, Double> neighbor = neighbors.get(i);
int objIndex = neighbor.getVector().getPair();
if(processed[objIndex])
continue;
double new_r_dist = Math.max(c_dist, neighbor.getPair());
if(Double.isInfinite(reach_d[objIndex]))
{
reach_d[objIndex] = new_r_dist;
// r_dists[objIndex] = new_r_dist;
orderdSeeds.add(objIndex);
}
else if(new_r_dist < reach_d[objIndex])// Object already in OrderSeeds, but we can do better
{
reach_d[objIndex] = new_r_dist;
// r_dists[objIndex] = new_r_dist;
orderdSeeds.remove(objIndex);
orderdSeeds.add(objIndex);
}
}
}
@SuppressWarnings("unused")
private void extractClusteringDBSCAN(List<Integer> orderedFile, double e, int[] designations)
{
int clusterID = NOISE;
for(int i = 0; i < orderedFile.size(); i++)
{
int trueObjIndex = orderedFile.get(i);
if( Double.isInfinite(reach_d[trueObjIndex]) || reach_d[trueObjIndex] > e)
{
if(core_distance[trueObjIndex] <= e)
{
clusterID++;
designations[trueObjIndex] = clusterID;
}
else
designations[trueObjIndex] = NOISE;
}
else
designations[trueObjIndex] = clusterID;
}
throw new UnsupportedOperationException("Not yet implemented");
}
/**
* Returns a copy of the reachability array in correct reachability order.
* Some values that are not density reachability could have a value of
* {@link Double#POSITIVE_INFINITY}.
*
* @return an array of the reachability values
*/
public double[] getReachabilityArray()
{
return Arrays.copyOf(reach_d, reach_d.length);
}
}
| 24,445 | 33.922857 | 128 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/PAM.java |
package jsat.clustering;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Random;
import java.util.concurrent.atomic.DoubleAdder;
import java.util.concurrent.atomic.LongAdder;
import jsat.DataSet;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.math.OnLineStatistics;
import static jsat.clustering.SeedSelectionMethods.*;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
*
* @author Edward Raff
*/
public class PAM implements KClusterer
{
private static final long serialVersionUID = 4787649180692115514L;
protected DistanceMetric dm;
protected Random rand;
protected SeedSelection seedSelection;
protected int iterLimit = 100;
protected int[] medoids;
protected boolean storeMedoids = true;
public PAM(DistanceMetric dm, Random rand, SeedSelection seedSelection)
{
this.dm = dm;
this.rand = rand;
this.seedSelection = seedSelection;
}
public PAM(DistanceMetric dm, Random rand)
{
this(dm, rand, SeedSelection.KPP);
}
public PAM(DistanceMetric dm)
{
this(dm, RandomUtil.getRandom());
}
public PAM()
{
this(new EuclideanDistance());
}
/**
*
* @param iterLimit the maximum number of iterations of the algorithm to perform
*/
public void setMaxIterations(int iterLimit)
{
this.iterLimit = iterLimit;
}
/**
*
* @return the maximum number of iterations of the algorithm to perform
*/
public int getMaxIterations()
{
return iterLimit;
}
/**
* Sets the distance metric used by this clustering algorithm
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
/**
*
* @return the distance metric to be used by this algorithm
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public PAM(PAM toCopy)
{
this.dm = toCopy.dm.clone();
this.rand = RandomUtil.getRandom();
this.seedSelection = toCopy.seedSelection;
if(toCopy.medoids != null)
this.medoids = Arrays.copyOf(toCopy.medoids, toCopy.medoids.length);
this.storeMedoids = toCopy.storeMedoids;
this.iterLimit = toCopy.iterLimit;
}
/**
* If set to {@code true} the computed medoids will be stored after clustering
* is completed, and can then be retrieved using {@link #getMedoids() }.
* @param storeMedoids {@code true} if the medoids should be stored for
* later, {@code false} to discard them once clustering is complete.
*/
public void setStoreMedoids(boolean storeMedoids)
{
this.storeMedoids = storeMedoids;
}
/**
* Returns the raw array of indices that indicate which data point acted as
* the center for each cluster.
* @return the array of medeoid indices
*/
public int[] getMedoids()
{
return medoids;
}
/**
* Sets the method of seed selection used by this algorithm
* @param seedSelection the method of seed selection to used
*/
public void setSeedSelection(SeedSelection seedSelection)
{
this.seedSelection = seedSelection;
}
/**
*
* @return the method of seed selection used by this algorithm
*/
public SeedSelection getSeedSelection()
{
return seedSelection;
}
/**
* Performs the actual work of PAM.
*
* @param data the data set to apply PAM to
* @param doInit {@code true} if the initialization procedure of training the distance metric, initiating its cache, and selecting he seeds, should be done.
* @param medioids the array to store the indices that get chosen as the medoids. The length of the array indicates how many medoids should be obtained.
* @param assignments an array of the same length as <tt>data</tt>, each value indicating what cluster that point belongs to.
* @param cacheAccel the pre-computed distance acceleration cache. May be {@code null}.
* @param parallel the value of parallel
* @return the double
*/
protected double cluster(DataSet data, boolean doInit, int[] medioids, int[] assignments, List<Double> cacheAccel, boolean parallel)
{
DoubleAdder totalDistance =new DoubleAdder();
LongAdder changes = new LongAdder();
Arrays.fill(assignments, -1);//-1, invalid category!
int[] bestMedCand = new int[medioids.length];
double[] bestMedCandDist = new double[medioids.length];
List<Vec> X = data.getDataVectors();
final List<Double> accel;
if(doInit)
{
TrainableDistanceMetric.trainIfNeeded(dm, data);
accel = dm.getAccelerationCache(X);
selectIntialPoints(data, medioids, dm, accel, rand, seedSelection);
}
else
accel = cacheAccel;
int iter = 0;
do
{
changes.reset();
totalDistance.reset();
ParallelUtils.run(parallel, data.size(), (start, end)->
{
for(int i = start; i < end; i++)
{
int assignment = 0;
double minDist = dm.dist(medioids[0], i, X, accel);
for (int k = 1; k < medioids.length; k++)
{
double dist = dm.dist(medioids[k], i, X, accel);
if (dist < minDist)
{
minDist = dist;
assignment = k;
}
}
//Update which cluster it is in
if (assignments[i] != assignment)
{
changes.increment();
assignments[i] = assignment;
}
totalDistance.add(minDist * minDist);
}
});
//TODO this update may be faster by using more memory, and actually moiving people about in the assignment loop above
//Update the medoids
Arrays.fill(bestMedCandDist, Double.MAX_VALUE);
for(int i = 0; i < data.size(); i++)
{
double thisCandidateDistance;
final int clusterID = assignments[i];
final int medCandadate = i;
final int ii = i;
thisCandidateDistance = ParallelUtils.range(data.size(), parallel)
.filter(j -> j != ii && assignments[j] == clusterID)
.mapToDouble(j -> Math.pow(dm.dist(medCandadate, j, X, accel), 2))
.sum();
if(thisCandidateDistance < bestMedCandDist[clusterID])
{
bestMedCand[clusterID] = i;
bestMedCandDist[clusterID] = thisCandidateDistance;
}
}
System.arraycopy(bestMedCand, 0, medioids, 0, medioids.length);
}
while( changes.sum() > 0 && iter++ < iterLimit);
return totalDistance.sum();
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()/2), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
medoids = new int[clusters];
cluster(dataSet, true, medoids, designations, null, parallel);
if(!storeMedoids)
medoids = null;
return designations;
}
@Override
public PAM clone()
{
return new PAM(this);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
double[] totDistances = new double[highK-lowK+1];
for(int k = lowK; k <= highK; k++)
{
totDistances[k-lowK] = cluster(dataSet, true, new int[k], designations, null, parallel);
}
//Now we process the distance changes
/**
* Keep track of the changes
*/
OnLineStatistics stats = new OnLineStatistics();
double maxChange = Double.MIN_VALUE;
int maxChangeK = lowK;
for(int i = 1; i < totDistances.length; i++)
{
double change = Math.abs(totDistances[i] - totDistances[i-1]);
stats.add(change);
if (change > maxChange)
{
maxChange = change;
maxChangeK = i+lowK;
}
}
if(maxChange < stats.getStandardDeviation()*2+stats.getMean())
maxChangeK = lowK;
return cluster(dataSet, maxChangeK, parallel, designations);
}
/**
* Computes the medoid of the data
* @param parallel whether or not the computation should be done using multiple cores
* @param X the list of all data
* @param dm the distance metric to get the medoid with respect to
* @return the index of the point in <tt>X</tt> that is the medoid
*/
public static int medoid(boolean parallel, List<? extends Vec> X, DistanceMetric dm)
{
IntList order = new IntList(X.size());
ListUtils.addRange(order, 0, X.size(), 1);
List<Double> accel = dm.getAccelerationCache(X, parallel);
return medoid(parallel, order, X, dm, accel);
}
/**
* Computes the medoid of a sub-set of data
* @param parallel whether or not the computation should be done using multiple cores
* @param indecies the indexes of the points to get the medoid of
* @param X the list of all data
* @param dm the distance metric to get the medoid with respect to
* @param accel the acceleration cache for the distance metric
* @return the index value contained within indecies that is the medoid
*/
public static int medoid(boolean parallel, Collection<Integer> indecies, List<? extends Vec> X, DistanceMetric dm, List<Double> accel)
{
double bestDist = Double.POSITIVE_INFINITY;
int bestIndex = -1;
for (int i : indecies)
{
double thisCandidateDistance;
final int medCandadate = i;
thisCandidateDistance = ParallelUtils.range(indecies.size(), parallel)
.filter(j -> j != i)
.mapToDouble(j -> dm.dist(medCandadate, j, X, accel))
.sum();
if (thisCandidateDistance < bestDist)
{
bestIndex = i;
bestDist = thisCandidateDistance;
}
}
return bestIndex;
}
}
| 11,527 | 30.845304 | 161 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/SeedSelectionMethods.java |
package jsat.clustering;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.MatrixStatistics;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.vectorcollection.VPTreeMV;
import jsat.utils.*;
import jsat.utils.concurrent.ParallelUtils;
/**
* This class provides methods for sampling a data set for a set of initial points to act as the seeds for a clustering algorithm.
*
* @author Edward Raff
*/
public class SeedSelectionMethods
{
private SeedSelectionMethods()
{
}
static public enum SeedSelection
{
/**
* The seed values will be randomly selected from the data set
*/
RANDOM,
/**
* The k-means++ seeding algo: <br>
* The seed values will be probabilistically selected from the
* data set. <br>
* The solution is O(log(k)) competitive with the
* optimal k clustering when using {@link EuclideanDistance}.
* <br><br>
* See k-means++: The Advantages of Careful Seeding
*/
KPP,
/**
* Faster version of the k-means++ seeding algorithm. <br>
* <br>
* See: "Exact Acceleration of K-Means++ and K-Means‖" IJAI 2021
*/
KPP_TIA,
/**
* The K-Means|| algorithm <br>
* <br>
* See: Bahmani, B., Moseley, B., Vattani, A., Kumar, R., and
* Vassilvitskii, S. (2012). Scalable K-means++. Proc. VLDB Endow.,
* 5(7), 622–633.
*/
KBB,
/**
* Faster version of the K-Means|| seeding algorithm. <br>
* <br>
* See: "Exact Acceleration of K-Means++ and K-Means‖" IJAI 2021
*/
KBB_TIA,
/**
* The first seed is chosen randomly, and then all others are chosen
* to be the farthest away from all other seeds
*/
FARTHEST_FIRST,
/**
* Selects the seeds in one pass by selecting points as evenly
* distributed quantiles for the distance of each point from the mean
* of the whole data set. This makes the seed selection deterministic
* <br><br>
* See: J. A. Hartigan and M. A. Wong, "A k-means clustering algorithm",
* Applied Statistics, vol. 28, pp. 100–108, 1979.
*/
MEAN_QUANTILES
};
/**
* Selects seeds from a data set to use for a clustering algorithm. Copies of the vectors chosen will be returned.
*
* @param d the data set to perform select from
* @param k the number of seeds to choose
* @param dm the distance metric to used when selecting points
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
* @return a list of the copies of the chosen vectors.
*/
static public List<Vec> selectIntialPoints(DataSet d, int k, DistanceMetric dm, Random rand, SeedSelection selectionMethod)
{
return selectIntialPoints(d, k, dm, null, rand, selectionMethod);
}
/**
*
* @param d the data set to perform select from
* @param k the number of seeds to choose
* @param dm the distance metric to used when selecting points
* @param accelCache the cache of pre-generated acceleration information for the distance metric. May be null
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
* @return a list of the copies of the chosen vectors.
*/
static public List<Vec> selectIntialPoints(DataSet d, int k, DistanceMetric dm, List<Double> accelCache, Random rand, SeedSelection selectionMethod)
{
int[] indicies = new int[k];
selectIntialPoints(d, indicies, dm, accelCache, rand, selectionMethod, false);
List<Vec> vecs = new ArrayList<>(k);
for(Integer i : indicies)
vecs.add(d.getDataPoint(i).getNumericalValues().clone());
return vecs;
}
/**
* Selects seeds from a data set to use for a clustering algorithm. Copies of the vectors chosen will be returned.
*
* @param d the data set to perform select from
* @param k the number of seeds to choose
* @param dm the distance metric to used when selecting points
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
* @return a list of the copies of the chosen vectors.
*/
static public List<Vec> selectIntialPoints(DataSet d, int k, DistanceMetric dm, Random rand, SeedSelection selectionMethod, boolean parallel)
{
return selectIntialPoints(d, k, dm, null, rand, selectionMethod, parallel);
}
/**
* Selects seeds from a data set to use for a clustering algorithm. Copies of the vectors chosen will be returned.
*
* @param d the data set to perform select from
* @param k the number of seeds to choose
* @param dm the distance metric to used when selecting points
* @param accelCache the cache of pre-generated acceleration information for the distance metric. May be null
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
* @return a list of the copies of the chosen vectors.
*/
static public List<Vec> selectIntialPoints(DataSet d, int k, DistanceMetric dm, List<Double> accelCache, Random rand, SeedSelection selectionMethod, boolean parallel)
{
int[] indicies = new int[k];
selectIntialPoints(d, indicies, dm, accelCache, rand, selectionMethod, parallel);
List<Vec> vecs = new ArrayList<Vec>(k);
for(Integer i : indicies)
vecs.add(d.getDataPoint(i).getNumericalValues().clone());
return vecs;
}
/**
* Selects seeds from a data set to use for a clustering algorithm. The indices of the chosen points will be placed in the <tt>indices</tt> array.
*
* @param d the data set to perform select from
* @param indices a storage place to note the indices that were chosen as seed. The length of the array indicates how many seeds to select.
* @param dm the distance metric to used when selecting points
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
*/
static public void selectIntialPoints(DataSet d, int[] indices, DistanceMetric dm, Random rand, SeedSelection selectionMethod)
{
selectIntialPoints(d, indices, dm, null, rand, selectionMethod);
}
/**
* Selects seeds from a data set to use for a clustering algorithm. The indices of the chosen points will be placed in the <tt>indices</tt> array.
*
* @param d the data set to perform select from
* @param indices a storage place to note the indices that were chosen as seed. The length of the array indicates how many seeds to select.
* @param dm the distance metric to used when selecting points
* @param accelCache the cache of pre-generated acceleration information for the distance metric. May be null
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
*/
static public void selectIntialPoints(DataSet d, int[] indices, DistanceMetric dm, List<Double> accelCache, Random rand, SeedSelection selectionMethod)
{
selectIntialPoints(d, indices, dm, accelCache, rand, selectionMethod, false);
}
/**
* Selects seeds from a data set to use for a clustering algorithm. The indices of the chosen points will be placed in the <tt>indices</tt> array.
*
* @param d the data set to perform select from
* @param indices a storage place to note the indices that were chosen as seed. The length of the array indicates how many seeds to select.
* @param dm the distance metric to used when selecting points
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
*/
static public void selectIntialPoints(DataSet d, int[] indices, DistanceMetric dm, Random rand, SeedSelection selectionMethod, boolean parallel)
{
selectIntialPoints(d, indices, dm, null, rand, selectionMethod, parallel);
}
/**
* Selects seeds from a data set to use for a clustering algorithm. The indices of the chosen points will be placed in the <tt>indices</tt> array.
*
* @param d the data set to perform select from
* @param indices a storage place to note the indices that were chosen as seed. The length of the array indicates how many seeds to select.
* @param dm the distance metric to used when selecting points
* @param accelCache the cache of pre-generated acceleration information for the distance metric. May be null
* @param rand a source of randomness
* @param selectionMethod The method of seed selection to use.
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
*/
static public void selectIntialPoints(DataSet d, int[] indices, DistanceMetric dm, List<Double> accelCache, Random rand, SeedSelection selectionMethod, boolean parallel)
{
int k = indices.length;
if (null != selectionMethod)
switch (selectionMethod)
{
case RANDOM:
Set<Integer> indecies = new IntSet(k);
while (indecies.size() != k)//Keep sampling, we cant use the same point twice.
indecies.add(rand.nextInt(d.size()));//TODO create method to do uniform sampleling for a select range
int j = 0;
for (Integer i : indecies)
indices[j++] = i;
break;
case KPP_TIA:
kppSelectionTIA(indices, rand, d, k, dm, accelCache, parallel);
break;
case KPP:
kppSelection(indices, rand, d, k, dm, accelCache, parallel);
break;
case KBB_TIA:
kbbSelectionTIA(indices, rand, d, k, dm, accelCache, parallel);
break;
case KBB:
kbbSelection(indices, rand, d, k, dm, accelCache, parallel);
break;
case FARTHEST_FIRST:
ffSelection(indices, rand, d, k, dm, accelCache, parallel);
break;
case MEAN_QUANTILES:
mqSelection(indices, d, k, dm, accelCache, parallel);
break;
default:
break;
}
}
private static void kppSelection(int[] indices, Random rand, DataSet d, int k, DistanceMetric dm, List<Double> accelCache)
{
kppSelection(indices, rand, d, k, dm, accelCache, false);
}
private static void kppSelection(final int[] indices, Random rand, final DataSet d, final int k, final DistanceMetric dm, final List<Double> accelCache, boolean parallel)
{
/*
* http://www.stanford.edu/~darthur/kMeansPlusPlus.pdf : k-means++: The Advantages of Careful Seeding
*
*/
//Initial random point
indices[0] = rand.nextInt(d.size());
Vec w = d.getDataWeights();
final double[] closestDist = new double[d.size()];
Arrays.fill(closestDist, Double.POSITIVE_INFINITY);
final List<Vec> X = d.getDataVectors();
for (int j = 1; j < k; j++)
{
//Compute the distance from each data point to the closest mean
final int newMeanIndx = indices[j - 1];//Only the most recently added mean needs to get distances computed.
double sqrdDistSum = ParallelUtils.run(parallel, X.size(), (start, end) ->
{
double partial_sqrd_dist = 0.0;
for (int i = start; i < end; i++)
{
double newDist = dm.dist(newMeanIndx, i, X, accelCache);
newDist *= newDist;
if (newDist < closestDist[i])
closestDist[i] = newDist;
partial_sqrd_dist += closestDist[i]*w.get(i);
}
return partial_sqrd_dist;
},
(t, u) -> t + u);
if(sqrdDistSum <= 1e-6)//everyone is too close, randomly fill rest
{
Set<Integer> ind = new IntSet();
for(int i = 0;i <j; i++)
ind.add(indices[i]);
while(ind.size() < k)
ind.add(rand.nextInt(closestDist.length));
int pos = 0;
for(int i : ind)
indices[pos++] = i;
return;
}
//Choose new x as weighted probablity by the squared distances
double rndX = rand.nextDouble() * sqrdDistSum;
double searchSum = closestDist[0]*w.get(0);
int i = 0;
while(searchSum < rndX && i < d.size()-1)
searchSum += closestDist[++i]*w.get(i);
indices[j] = i;
}
}
//accelerated variant from Exact Acceleration of K-Means++ and K-Means‖
private static void kppSelectionTIA(final int[] indices, Random rand, final DataSet d, final int k, final DistanceMetric dm, final List<Double> accelCache, boolean parallel)
{
/*
* http://www.stanford.edu/~darthur/kMeansPlusPlus.pdf : k-means++: The Advantages of Careful Seeding
*
*/
final double[] closestDist = new double[d.size()];
Arrays.fill(closestDist, Double.POSITIVE_INFINITY);
final int[] closest_mean = new int[d.size()];
Arrays.fill(closest_mean, 0);
Vec w = d.getDataWeights();
final double[] expo_sample = new double[d.size()];
indices[0] = 0;//First initial seed
for(int i = 0; i < d.size(); i++)
{
double p = rand.nextDouble();
expo_sample[i] = -Math.log(1-p)/w.get(i);//dont use FastMath b/c we need to make sure all values are strictly positive
if(expo_sample[i] < expo_sample[indices[0]])
indices[0] = i;
}
final double[] sample_weight = new double[d.size()];
PriorityQueue<Integer> nextSample = new PriorityQueue<>(expo_sample.length, (a, b) -> Double.compare(sample_weight[a], sample_weight[b]));
IntList dirtyItemsToFix = new IntList();
boolean[] dirty = new boolean[d.size()];
Arrays.fill(dirty, false);
//Initial random point
closestDist[indices[0]] = 0.0;
final List<Vec> X = d.getDataVectors();
double[] gamma = new double[k];
Arrays.fill(gamma, Double.MAX_VALUE);
double prev_partial = 0;
for (int j = 1; j < k; j++)
{
final int jj = j;
//Compute the distance from each data point to the closest mean
final int newMeanIndx = indices[j - 1];//Only the most recently added mean needs to get distances computed.
double sqrdDistSum = ParallelUtils.run(parallel, X.size(), (start, end) ->
{
double partial_sqrd_dist = 0.0;
for (int i = start; i < end; i++)
{
//mul by 4 b/c gamma and closestDist are the _squared_ distances, not raw.
if(gamma[closest_mean[i]] < 4* closestDist[i])
{
double newDist = dm.dist(newMeanIndx, i, X, accelCache);
newDist *= newDist;
if (newDist < closestDist[i])
{
if(jj > 1)
{
partial_sqrd_dist -= closestDist[i]*w.get(i);
dirty[i] = true;
}
else
{
sample_weight[i] = expo_sample[i]/(newDist);
nextSample.add(i);
}
closest_mean[i] = jj-1;
closestDist[i] = newDist;
partial_sqrd_dist += closestDist[i]*w.get(i);
}
}
}
return partial_sqrd_dist;
},
(t, u) -> t + u);
if(prev_partial != 0)
{
sqrdDistSum = prev_partial + sqrdDistSum;
}
prev_partial = sqrdDistSum;
if(sqrdDistSum <= 1e-6)//everyone is too close, randomly fill rest
{
// System.out.println("BAILL");
Set<Integer> ind = new IntSet();
for(int i = 0;i <j; i++)
ind.add(indices[i]);
while(ind.size() < k)
ind.add(rand.nextInt(closestDist.length));
int pos = 0;
for(int i : ind)
indices[pos++] = i;
return;
}
int tries = 0;//for debugging
//Search till we find first clean item
while(!nextSample.isEmpty() && dirty[nextSample.peek()])
dirtyItemsToFix.add(nextSample.poll());
for(int i : dirtyItemsToFix)//fix all the dirty items!
sample_weight[i] = expo_sample[i]/(closestDist[i]);
nextSample.addAll(dirtyItemsToFix);//put them back in the Q
dirtyItemsToFix.clear();//done, clean up
while(true)//this should only happen once, kept for debugging purposes
{
tries++;
int next_indx = nextSample.poll();
if(dirty[next_indx])//this should not enter, kept for debugging purposes
{
sample_weight[next_indx] = expo_sample[next_indx]/(closestDist[next_indx]);
dirty[next_indx] = false;
nextSample.add(next_indx);
}
else
{
indices[j] = next_indx;
break;
}
}
//now we have new index, determine dists to prev means
if(j+1 < k)
{
//for(k_prev = 0; k_prev < j; k_prev++)
ParallelUtils.run(parallel, j, (k_prev, end) ->
{
for(; k_prev < end; k_prev++)
{
gamma[k_prev] = Math.pow(dm.dist(indices[k_prev], indices[jj], X, accelCache), 2);
}
});
}
}
}
private static void kbbSelection(final int[] indices, Random rand, final DataSet d, final int k, final DistanceMetric dm, final List<Double> accelCache, boolean parallel)
{
int trials = 5;
int oversample = 2*k;
//Initial random point
// indices[0] = rand.nextInt(d.size());
int[] assigned_too = new int[d.size()];
IntList C = new IntList(trials*oversample);
C.add(rand.nextInt(d.size()));//Initial random point
Vec w = d.getDataWeights();
final double[] closestDist = new double[d.size()];
Arrays.fill(closestDist, Double.POSITIVE_INFINITY);
final List<Vec> X = d.getDataVectors();
//init poitns to initial center
double sqrdDistSum = ParallelUtils.run(parallel, X.size(), (start, end) ->
{
double partial_sqrd_dist = 0.0;
for (int i = start; i < end; i++)
{
double newDist = dm.dist(C.getI(0), i, X, accelCache);
newDist *= newDist;
if (newDist < closestDist[i])
closestDist[i] = newDist;
partial_sqrd_dist += closestDist[i]*w.get(i);
}
return partial_sqrd_dist;
},
(z, u) -> z + u);
for(int t = 0; t < trials; t++)
{
//Lets sample some new points
int orig_size = C.size();
for(int i = 0; i < X.size(); i++)
if(w.get(i)*oversample*closestDist[i]/sqrdDistSum > rand.nextDouble())//sample!
C.add(i);
sqrdDistSum = ParallelUtils.run(parallel, X.size(), (start, end) ->
{
double partial_sqrd_dist = 0.0;
for (int i = start; i < end; i++)
{
if(closestDist[i] == 0)
continue;
for(int j = orig_size; j < C.size(); j++)
{
double newDist = dm.dist(C.get(j), i, X, accelCache);
newDist *= newDist;
if (newDist < closestDist[i])
{
closestDist[i] = newDist;
assigned_too[i] = j;
}
}
partial_sqrd_dist += closestDist[i]*w.get(i);
}
return partial_sqrd_dist;
},
(z, u) -> z + u);
}
Vec weights = new DenseVector(C.size());
for(int i = 0; i < X.size(); i++)
weights.increment(assigned_too[i], w.get(i));
SimpleDataSet sds = new SimpleDataSet(d.getNumNumericalVars(), new CategoricalData[0]);
for(int j : C)
{
sds.add(new DataPoint(X.get(j)));
sds.setWeight(sds.size()-1, weights.get(sds.size()-1));
}
//run k-means++ on the weighted set of selected over-samples
kppSelection(indices, rand, sds, k, dm, dm.getAccelerationCache(sds.getDataVectors(), parallel), parallel);
//map final seeds back to original vectors
for(int i = 0; i < k; i++)
indices[i] = C.getI(indices[i]);
}
//accelerated variant from Exact Acceleration of K-Means++ and K-Means‖
private static void kbbSelectionTIA(final int[] indices, Random rand, final DataSet d, final int k, final DistanceMetric dm, final List<Double> accelCache, boolean parallel)
{
int trials = 5;
int oversample = 2*k;
//Initial random point
int[] assigned_too = new int[d.size()];
IntList C = new IntList(trials*oversample);
C.add(rand.nextInt(d.size()));//Initial random point
Vec w = d.getDataWeights();
final double[] closestDist = new double[d.size()];
Arrays.fill(closestDist, Double.POSITIVE_INFINITY);
final List<Vec> X = d.getDataVectors();
//init poitns to initial center
double sqrdDistSum = ParallelUtils.run(parallel, X.size(), (start, end) ->
{
double partial_sqrd_dist = 0.0;
for (int i = start; i < end; i++)
{
double newDist = dm.dist(C.getI(0), i, X, accelCache);
newDist *= newDist;
if (newDist < closestDist[i])
closestDist[i] = newDist;
partial_sqrd_dist += closestDist[i]*w.get(i);
}
return partial_sqrd_dist;
},
(z, u) -> z + u);
for(int t = 0; t < trials; t++)
{
//Lets sample some new points
int orig_size = C.size();
for(int i = 0; i < X.size(); i++)
if(w.get(i)*oversample*closestDist[i]/sqrdDistSum > rand.nextDouble())//sample!
C.add(i);
List<Integer> to_assign = C.subList(orig_size, C.size());
List<Vec> X_new_means = new ArrayList<>(to_assign.size());
for(int j : to_assign)
X_new_means.add(X.get(j));
VPTreeMV<Vec> vp = new VPTreeMV<>(X_new_means, dm, parallel);
sqrdDistSum = ParallelUtils.run(parallel, X.size(), (start, end) ->
{
double partial_sqrd_dist = 0.0;
IntList neighbors = new IntList();
DoubleList distances = new DoubleList();
for (int i = start; i < end; i++)
{
if(closestDist[i] == 0)
continue;
neighbors.clear();
distances.clear();
vp.search(X.get(i), 1, Math.sqrt(closestDist[i]), neighbors, distances);
if(distances.isEmpty())//no one within radius!
continue;
double newDist = distances.getD(0);
newDist *= newDist;
if (newDist < closestDist[i])
{
closestDist[i] = newDist;
assigned_too[i] = orig_size + neighbors.getI(0);
}
partial_sqrd_dist += closestDist[i]*w.get(i);
}
return partial_sqrd_dist;
},
(z, u) -> z + u);
}
Vec weights = new DenseVector(C.size());
for(int i = 0; i < X.size(); i++)
weights.increment(assigned_too[i], w.get(i));
SimpleDataSet sds = new SimpleDataSet(d.getNumNumericalVars(), new CategoricalData[0]);
for(int j : C)
{
sds.add(new DataPoint(X.get(j)));
sds.setWeight(sds.size()-1, weights.get(sds.size()-1));
}
//run k-means++ on the weighted set of selected over-samples
kppSelectionTIA(indices, rand, sds, k, dm, dm.getAccelerationCache(sds.getDataVectors(), parallel), parallel);
// kppSelection(indices, rand, sds, k, dm, dm.getAccelerationCache(sds.getDataVectors(), parallel), parallel);
//map final seeds back to original vectors
for(int i = 0; i < k; i++)
indices[i] = C.getI(indices[i]);
}
private static void ffSelection(final int[] indices, Random rand, final DataSet d, final int k, final DistanceMetric dm, final List<Double> accelCache, boolean parallel)
{
//Initial random point
indices[0] = rand.nextInt(d.size());
final double[] closestDist = new double[d.size()];
Arrays.fill(closestDist, Double.POSITIVE_INFINITY);
final List<Vec> X = d.getDataVectors();
for (int j = 1; j < k; j++)
{
//Compute the distance from each data point to the closest mean
final int newMeanIndx = indices[j - 1];//Only the most recently added mean needs to get distances computed.
//Atomic integer storres the index of the vector with the current maximum minimum distance to a selected centroid
final AtomicInteger maxDistIndx = new AtomicInteger(0);
ParallelUtils.run(parallel, d.size(), (start, end)->
{
double maxDist = Double.NEGATIVE_INFINITY;
int max = indices[0];//set to some lazy value, it will be changed
for (int i = start; i < end; i++)
{
double newDist = dm.dist(newMeanIndx, i, X, accelCache);
closestDist[i] = Math.min(newDist, closestDist[i]);
if (closestDist[i] > maxDist)
{
maxDist = closestDist[i];
max = i;
}
}
synchronized(maxDistIndx)
{
if(closestDist[max] > closestDist[maxDistIndx.get()])
maxDistIndx.set(max);
}
});
indices[j] = maxDistIndx.get();
}
}
private static void mqSelection(final int[] indices, final DataSet d, final int k, final DistanceMetric dm, final List<Double> accelCache, boolean parallel)
{
final double[] meanDist = new double[d.size()];
//Compute the distance from each data point to the closest mean
final Vec newMean = MatrixStatistics.meanVector(d);
final List<Double> meanQI = dm.getQueryInfo(newMean);
final List<Vec> X = d.getDataVectors();
ParallelUtils.run(parallel, d.size(), (start, end)->
{
for (int i = start; i < end; i++)
meanDist[i] = dm.dist(i, newMean, meanQI, X, accelCache);
});
IndexTable indxTbl = new IndexTable(meanDist);
for(int l = 0; l < k; l++)
indices[l] = indxTbl.index(l*d.size()/k);
}
}
| 27,028 | 35.674355 | 177 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/TRIKMEDS.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.atomic.AtomicIntegerArray;
import java.util.concurrent.atomic.LongAdder;
import java.util.stream.DoubleStream;
import jsat.DataSet;
import static jsat.clustering.SeedSelectionMethods.selectIntialPoints;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.utils.concurrent.AtomicDoubleArray;
import static jsat.utils.concurrent.ParallelUtils.*;
import static java.lang.Math.*;
import java.util.*;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
import jsat.utils.concurrent.AtomicDouble;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* This class implements the TRIKMEDS algorithm for PAM clustering. It returns
* the exact same result that would have been computed by PAM, but uses the
* triangle inequality to avoid unnecessary distance calculations. Expected
* runtime is O( n sqrt(n)), but still has worst case complexity
* O(n<sup>2</sup>). It also requires that the distance metric used be a valid
* distance metric.
*
* @author Edward Raff
*/
public class TRIKMEDS extends PAM
{
public TRIKMEDS(DistanceMetric dm, Random rand, SeedSelectionMethods.SeedSelection seedSelection)
{
super(dm, rand, seedSelection);
}
public TRIKMEDS(DistanceMetric dm, Random rand)
{
super(dm, rand);
}
public TRIKMEDS(DistanceMetric dm)
{
super(dm);
}
public TRIKMEDS()
{
super();
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
if(!dm.isValidMetric())
throw new IllegalArgumentException("TRIKMEDS requires a valid distance metric, but " + dm.toString() + " does not obey all distance metric properties");
super.setDistanceMetric(dm);
}
@Override
protected double cluster(DataSet data, boolean doInit, int[] medioids, int[] assignments, List<Double> cacheAccel, boolean parallel)
{
LongAdder changes = new LongAdder();
Arrays.fill(assignments, -1);//-1, invalid category!
List<Vec> X = data.getDataVectors();
final List<Double> accel;
if(doInit)
{
TrainableDistanceMetric.trainIfNeeded(dm, data);
accel = dm.getAccelerationCache(X);
selectIntialPoints(data, medioids, dm, accel, rand, seedSelection);
}
else
accel = cacheAccel;
//N : number of training samples
final int N = data.size();
final int K = medioids.length;
//medioids = m
//m(k) : index of current medoid of cluster k,m(k) ∈ {1, . . . ,N}
final AtomicIntegerArray m = new AtomicIntegerArray(K);
//c(k) : current medoid of cluster k, that is c(k) = x(m(k))
final int[] c = medioids;
//n_1(i) : cluster index of centroid nearest to x(i)
//a(i) : cluster to which x(i) is currently assigned
final int[] a = assignments;
//d(i) : distance from x(i) to c(a(i))
final double[] d = new double[N];
final double[] d_tilde = new double[N];
//v(k) : number of samples assigned to cluster k
final AtomicDoubleArray v = new AtomicDoubleArray(K);
//V (k) : number of samples assigned to a cluster of index less than k +1
//We don't use V in this implementation. Paper uses it as a weird way of simplifying algorithm description. But not needed
//lc(i, k) : lowerbound on distance from x(i) tom(k)
double[][] lc = new double[N][K];
//ls(i) : lowerbound on
AtomicDoubleArray ls = new AtomicDoubleArray(N);
//p(k) : distance moved (teleported) by m(k) in last update
double[] p = new double[K];
//s(k) : sum of distances of samples in cluster k to medoid k
AtomicDoubleArray s = new AtomicDoubleArray(K);
List<Set<Integer>> ownedBy = new ArrayList<>(K);
for(int i = 0; i < K; i++)
ownedBy.add(new ConcurrentSkipListSet<>());
//Working sets used in updates
final AtomicDoubleArray delta_n_in = new AtomicDoubleArray(K);
final AtomicDoubleArray delta_n_out = new AtomicDoubleArray(K);
final AtomicDoubleArray delta_s_in = new AtomicDoubleArray(K);
final AtomicDoubleArray delta_s_out = new AtomicDoubleArray(K);
// initialise //
for(int k = 0; k < K; k++)
m.set(k, c[k]);
run(parallel, N, (start, end) ->
{
for(int i = start; i < end; i++)
{
double a_min_val = Double.POSITIVE_INFINITY;
int a_min_k = 0;
for(int k = 0; k < K; k++)
{
//Tightly initialise lower bounds on data-to-medoid distances
lc[i][k] = dm.dist(i, m.get(k), X, accel);
if(lc[i][k] <= a_min_val)
{
a_min_val = lc[i][k];
a_min_k = k;
}
}
//Set assignments and distances to nearest (assigned) medoid
a[i] = a_min_k;
d[i] = a_min_val;
//Update cluster count
v.getAndAdd(a[i], 1);
ownedBy.get(a_min_k).add(i);
//Update sum of distances to medoid
s.addAndGet(a[i], d[i]);
//Initialise lower bound on sum of in-cluster distances to x(i) to zero
ls.set(i, 0.0);
}
});
for(int k = 0; k < K; k++)
ls.set(m.get(k), s.get(k));
//end initialization
int iter = 0;
do
{
changes.reset();
///// update-medoids() //////
boolean[] medioid_changed = new boolean[K];
Arrays.fill(medioid_changed, false);
run(parallel, N, (i)->
{
for(int k = 0; k < K; k++)
{
// If the bound test cannot exclude i asm(k)
if(ls.get(i) < s.get(k))
{
// Make ls(i) tight by computing and cumulating all in-cluster distances to x(i),
double ls_i_new = 0;
for(int j : ownedBy.get(k))
{
d_tilde[j] = dm.dist(i, j, X, accel);
ls_i_new += d_tilde[j];
}
ls.set(i, ls_i_new);
//// Re-perform the test for i as candidate for m(k), now with exact sums.
//If i is the new best candidate, update some cluster information
if(ls_i_new < s.get(k))
{
/* Normally we would just check once. But if we are
* doing this in parallel, we need to make the
* switch out safe. So syncrhonize and re-peat the
* check to avoid any race condition. We do the
* check twice b/c the check may happen often, but
* only return true a few times. So lets avoid
* contention and just do a re-check after we found
* out we needed to do an update. */
synchronized (s)
{
if (ls_i_new < s.get(k))
{
s.set(k, ls_i_new);
m.set(k, i);
medioid_changed[k] = true;
for(int j : ownedBy.get(k))
d[j] = d_tilde[j];
}
}
}
//Use computed distances to i to improve lower bounds on sums for all samples in cluster k (see Figure X)
for(int j : ownedBy.get(k))
ls.accumulateAndGet(j, d[j]*v.get(k), (ls_j, d_jXv_k) -> max(ls_j, abs(d_jXv_k-ls_j)));
}
}
});
// If the medoid of cluster k has changed, update cluster information
run(parallel, K, (k)->
{
if(medioid_changed[k])
{
p[k] = dm.dist(c[k], m.get(k), X, accel);
c[k] = m.get(k);
}
//lets sneak in zero-ing out the delta arrays for the next stwp while are are doing a parallel loop
delta_n_in.set(k, 0.0);
delta_n_out.set(k, 0.0);
delta_s_in.set(k, 0.0);
delta_s_out.set(k, 0.0);
});
///// assign-to-clusters() //////
run(parallel, N, (i)->
{
// Update lower bounds on distances to medoids based on distances moved by medoids
for(int k = 0; k < K; k++)
lc[i][k] -= p[k];
// Use the exact distance of current assignment to keep bound tight (might save future calcs)
lc[i][a[i]] = d[i];
// Record current assignment and distance aold
int a_old = a[i];
double d_old = d[i];
// Determine nearest medoid, using bounds to eliminate distance calculations
for(int k = 0; k < K; k++)
if(lc[i][k] < d[i])
{
lc[i][k] = dm.dist(i, c[k], X, accel);
if(lc[i][k] < d[i])
{
a[i] = k;
d[i] = lc[i][k];
}
}
// If the assignment has changed, update statistics
if(a_old != a[i])
{
v.getAndDecrement(a_old);
v.getAndIncrement(a[i]);
changes.increment();
ownedBy.get(a_old).remove(i);
ownedBy.get(a[i]).add(i);
ls.set(i, 0.0);
delta_n_in.getAndIncrement(a[i]);
delta_n_out.getAndIncrement(a_old);
delta_s_in.getAndAdd(a[i], d[i]);
delta_s_in.getAndAdd(a_old, d_old);
}
});
///// update-sum-bounds() ///////
double[] J_abs_s = new double[K];
double[] J_net_s = new double[K];
double[] J_abs_n = new double[K];
double[] J_net_n = new double[K];
for(int k = 0; k < K; k++)
{
J_abs_s[k] = delta_s_in.get(k) + delta_s_out.get(k);
J_net_s[k] = delta_s_in.get(k) - delta_s_out.get(k);
J_abs_n[k] = delta_n_in.get(k) + delta_n_out.get(k);
J_net_n[k] = delta_n_in.get(k) - delta_n_out.get(k);
}
run(parallel, N, (start, end)->
{
for(int i = start; i < end; i++)
{
double ls_i_delta = 0;
for(int k = 0; k < K; k++)
ls_i_delta -= min(J_abs_s[k]-J_net_n[k]*d[i], J_abs_n[k]*d[i] - J_net_s[k]);
ls.getAndAdd(i, ls_i_delta);
}
});
}
while( changes.sum() > 0 && iter++ < iterLimit);
return streamP(DoubleStream.of(d), parallel).map(x->x*x).sum();
}
/**
* Computes the medoid of the data
* @param parallel whether or not the computation should be done using multiple cores
* @param X the list of all data
* @param dm the distance metric to get the medoid with respect to
* @return the index of the point in <tt>X</tt> that is the medoid
*/
public static int medoid(boolean parallel, List<? extends Vec> X, DistanceMetric dm)
{
IntList order = new IntList(X.size());
ListUtils.addRange(order, 0, X.size(), 1);
List<Double> accel = dm.getAccelerationCache(X, parallel);
return medoid(parallel, order, X, dm, accel);
}
/**
* Computes the medoid of a sub-set of data
* @param parallel whether or not the computation should be done using multiple cores
* @param indecies the indexes of the points to get the medoid of
* @param X the list of all data
* @param dm the distance metric to get the medoid with respect to
* @param accel the acceleration cache for the distance metric
* @return the index value contained within indecies that is the medoid
*/
public static int medoid(boolean parallel, Collection<Integer> indecies, List<? extends Vec> X, DistanceMetric dm, List<Double> accel)
{
final int N = X.size();
AtomicDoubleArray l = new AtomicDoubleArray(N);
AtomicDouble e_cl = new AtomicDouble(Double.POSITIVE_INFINITY);
IntList rand_order = new IntList(indecies);
Collections.shuffle(rand_order, RandomUtil.getRandom());
ThreadLocal<double[]> d_local = ThreadLocal.withInitial(() -> new double[N]);
ParallelUtils.streamP(rand_order.streamInts(), parallel).forEach(i ->
{
double[] d = d_local.get();
double d_avg = 0;
if (l.get(i) < e_cl.get())
{
for (int j : indecies)
d_avg += (d[j] = dm.dist(i, j, X, accel));
d_avg /= indecies.size() - 1;
final double l_i = d_avg;
// set l(i) to to be tight, that is l(i) = E(i)
l.set(i, l_i);
if (l_i < e_cl.get())//We might be the best?
e_cl.getAndUpdate(val -> min(val, l_i));//atomic set to maximum
//l(j)←max(l(j), |l(i)−d(j)|) // using ||x(i)−x(j)|| to possibly improve bound.
for (int j : indecies)
l.getAndUpdate(j, (l_j) -> max(l_j, abs(l_i - d[j]) ) );
}
});
//OK, who had the lowest energy? Thats our median.
for(int i : indecies)
if(l.get(i) == e_cl.get())//THIS IS SAFE, we explicily set l and e_cl to the same exact values in the algorithm. So we can do a hard equality check on the doubles
return i;
return -1;//Some error occred
}
}
| 15,666 | 40.337731 | 174 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/VBGMM.java | /*
* This code contributed under the Public Domain.
*/
package jsat.clustering;
import static java.lang.Math.log;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.kmeans.HamerlyKMeans;
import jsat.distributions.multivariate.MultivariateDistribution;
import jsat.distributions.multivariate.NormalM;
import jsat.linear.DenseMatrix;
import jsat.linear.DenseVector;
import jsat.linear.Matrix;
import jsat.linear.MatrixStatistics;
import jsat.linear.Vec;
import jsat.math.MathTricks;
import jsat.math.SpecialMath;
import jsat.utils.concurrent.ParallelUtils;
/**
* The Variational Bayesian Guassian Mixture Model (VBGMM) extends the standard
* {@link EMGaussianMixture GMM} to adaptively select the number of clusters in
* the data.<br>
* <br>
* See:
* <ul>
* <li> H. Attias, “A Variational Baysian Framework for Graphical Models,” in
* Advances in Neural Information Processing Systems 12, S. A. Solla, T. K.
* Leen, and K. Müller, Eds. MIT Press, 2000, pp. 209–215.</li>
* <li>A. Corduneanu and C. M. Bishop, “Variational Bayesian Model Selection
* for Mixture Distributions,” in Proceedings Eighth International Conference on
* Artificial Intelligence and Statistics, 2001, pp. 27–34.</li>
* </ul>
*
* @author Edward Raff
*/
public class VBGMM implements Clusterer, MultivariateDistribution
{
/**
* Prior for the Dirichlet distribution
*/
protected double alpha_0 = 1e-5;
/**
* Prior for the mean of each normal
*
*/
protected double beta_0 = 1.0;
private double prune_tol = 1e-5;
protected NormalM[] normals;
/**
* The log of the distribution weights pi_{1 ... k}
*/
protected double[] log_pi;
protected int max_k = 200;
private int maxIterations = 2000;
protected COV_FIT_TYPE cov_type = COV_FIT_TYPE.FULL;
static public enum COV_FIT_TYPE
{
/**
* Estimates only the diagonal of the covariance matrix. This saves both
* computational time and memory, and is easier to estimate than the
* full covariance matrix if there are many features. However, it can
* not represent as many distribution shapes as a full covariance
* matrix.
*/
DIAG
{
@Override
public void fit(List<Vec> X, Matrix S_k, double[] contrib, Vec xk, double Nk)
{
int N = contrib.length;
int d = xk.length();
S_k.zeroOut();
Vec diag = S_k.getRowView(0);
//(10.53) in Bishop, but only the diagonal - which is just the variance of each variable
for(int n = 0; n < N; n++)
{
//double r_nk = r[k][n];
double r_nk = contrib[n];
Vec x_n = X.get(n);
for(int j = 0; j < d; j++)
diag.increment(j, r_nk*Math.pow(xk.get(j)-x_n.get(j), 2));
}
diag.mutableDivide(Nk + 1e-6);
}
@Override
public void updateWishart(Matrix W_inv_0, Matrix W_inv_k, Matrix S_k, Vec xk, double Nk, Vec m_0, double beta_0, double beta_k, double nu_k)
{
int d = W_inv_0.cols();
//(10.62) in Bishop
W_inv_0.copyTo(W_inv_k);
W_inv_k.mutableAdd(Nk, S_k);
Vec W_inv_k_diag = W_inv_k.getRowView(0);
//adding small value to diagonal to make cov stable
W_inv_k_diag.mutableAdd(1e-6);
//note (beta_0 + Nk) denominator is same as (10.60)
double β0_Nk_over_β0_plus_Nk = beta_0 * Nk / beta_k ;
Vec tmp = xk.clone();
tmp.mutableSubtract(m_0);
tmp.applyFunction(v->v*v);//squared, b/c outer product would be x*x along the diag
// Matrix.OuterProductUpdate(W_inv_k, tmp, tmp, β0_Nk_over_β0_plus_Nk);
W_inv_k_diag.mutableAdd(β0_Nk_over_β0_plus_Nk, tmp);
//Normalize the covariance matrix now so that we don't have to
//multiply by nu_k later in in (10.64), makig it easier to re-use
//the NormalM class
W_inv_k_diag.mutableDivide(nu_k + 1e-6);
}
@Override
public Matrix allocate(int d)
{
//stored in a single row of a matrix
return new DenseMatrix(1, d);
}
@Override
public NormalM asNormal(Vec mean, Matrix cov)
{
//"cov" is actually the diagonal
return new NormalM(mean, cov.getRowView(0));
}
},
/**
* Estimates a full covariance matrix for each cluster. This is the
* standard method presented in textbooks and papers. If you have more
* features than data points, you may not be able to reliably estimate
* this information.
*/
FULL
{
@Override
public void fit(List<Vec> X, Matrix S_k, double[] contrib, Vec xk, double Nk)
{
int N = contrib.length;
int d = xk.length();
S_k.zeroOut();
//(10.53) in Bishop
DenseVector tmp = new DenseVector(d);
for(int n = 0; n < N; n++)
{
//double r_nk = r[k][n];
double r_nk = contrib[n];
X.get(n).copyTo(tmp);
tmp.mutableSubtract(xk);
Matrix.OuterProductUpdate(S_k, tmp, tmp, r_nk);
}
S_k.mutableMultiply(1.0/(Nk + 1e-6));
}
@Override
public void updateWishart(Matrix W_inv_0, Matrix W_inv_k, Matrix S_k, Vec xk, double Nk, Vec m_0, double beta_0, double beta_k, double nu_k)
{
int d = W_inv_0.rows();
//(10.62) in Bishop
W_inv_0.copyTo(W_inv_k);
W_inv_k.mutableAdd(Nk, S_k);
//adding small value to diagonal to make cov stable
for(int i = 0; i < d; i++)
W_inv_k.increment(i, i, 1e-6);
//note (beta_0 + Nk) denominator is same as (10.60)
double β0_Nk_over_β0_plus_Nk = beta_0 * Nk / beta_k ;
Vec tmp = xk.clone();
tmp.mutableSubtract(m_0);
Matrix.OuterProductUpdate(W_inv_k, tmp, tmp, β0_Nk_over_β0_plus_Nk);
//Normalize the covariance matrix now so that we don't have to
//multiply by nu_k later in in (10.64), makig it easier to re-use
//the NormalM class
W_inv_k.mutableMultiply(1.0/nu_k);
}
@Override
public Matrix allocate(int d)
{
return new DenseMatrix(d, d);
}
@Override
public NormalM asNormal(Vec mean, Matrix cov)
{
return new NormalM(mean, cov);
}
};
/**
*
* @param X the entire dataset of vectors
* @param S_k the location to store the covariance estimate
* @param contrib the weight each data point will contribute to the
* covariance estimate
* @param xk the mean to use as the current center of the data
* @param Nk the total weight of the points under consideration, should
* be equal to the sum of all values in <i>contrib</i>
*/
abstract public void fit(List<Vec> X, Matrix S_k, double[] contrib, Vec xk, double Nk);
/**
* This method performs the Covariance matrix update that corresponds to
* the result of the Wishart distrubtional prior in the VBGMM model.
* This is equation (10.62) in Bishop's book.
*
* @param W_inv_0 the prior over the covairances of the whole data
* @param W_inv_k the location to store the result of this function call
* @param S_k the estimated covariance of the current cluster
* @param xk the estimated mean of the current cluster
* @param Nk the total weight allocated to the current cluster
* @param m_0 the prior over the means of the whole dataset
* @param beta_0 the prior weight for the mean prior
* @param beta_k the resulting weight estimate for the current cluster
* @param nu_k the resulting degress of freedom estimated for the
* current cluster
*/
abstract public void updateWishart(Matrix W_inv_0, Matrix W_inv_k, Matrix S_k, Vec xk, double Nk, Vec m_0, double beta_0, double beta_k, double nu_k);
/**
* Allocations a Matrix object that will be used to store the covariance
* matrix. The matrix may not be a full d x d matrix if the chosen
* covariance type uses a more compact approximation or representation.
*
* @param d the number of features
* @return a matrix that future {@link COV_FIT_TYPE} functions will use
* to update and alter.
*/
abstract public Matrix allocate(int d);
/**
* Returns a normal distribution object that can be used to sample from
* for the current cluster.
*
* @param mean the mean of the cluster
* @param cov the covariance matrix as returned by {@link #allocate(int)
* } and updated with {@link #updateWishart(jsat.linear.Matrix, jsat.linear.Matrix, jsat.linear.Matrix, jsat.linear.Vec, double, jsat.linear.Vec, double, double, double)
* }.
* @return a normal distirbution object to use corresponding to samples
* from the given parameterization.
*/
abstract public NormalM asNormal(Vec mean, Matrix cov);
}
public VBGMM()
{
this(COV_FIT_TYPE.FULL);
}
public VBGMM(COV_FIT_TYPE cov_type)
{
this.cov_type = cov_type;
}
public VBGMM(VBGMM toCopy)
{
this.max_k = toCopy.max_k;
this.maxIterations = toCopy.maxIterations;
this.prune_tol = toCopy.prune_tol;
this.beta_0 = toCopy.beta_0;
this.alpha_0 = toCopy.alpha_0;
if(toCopy.normals != null)
{
this.normals = Arrays.copyOf(toCopy.normals, toCopy.normals.length);
for(int i = 0; i < this.normals.length; i++)
this.normals[i] = this.normals[i].clone();
this.log_pi = Arrays.copyOf(toCopy.log_pi, toCopy.log_pi.length);
}
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
int k_max = Math.min(max_k, dataSet.size()/2);
int N = dataSet.size();
int d = dataSet.getNumNumericalVars();
List<Vec> X = dataSet.getDataVectors();
normals = new NormalM[k_max];
boolean[] active = new boolean[k_max];
Arrays.fill(active, true);
/**
* Information on the response / "contribution" of each data point n to
* cluster k. Bishop and others denote this as r_nk. We will transpose
* this to be r_kn, because we almost always iterate over all n while
* working with a fixed k. Doing this will result in better caching and
* pre-fetch behavior.
*/
double[][] r = new double[k_max][N];
//(10.51)
double[] N_k = new double[k_max];
Vec[] X_bar_k = new Vec[k_max];
Matrix[] S_k = new Matrix[k_max];
double[] beta = new double[k_max];
Arrays.fill(beta, d);
double log_prune_tol = Math.log(prune_tol);
/**
* Dirichlet distribution parameters alpha
*/
double[] alpha = new double[k_max];
/**
* Prior over the means of the dataset, should be set to the mean of the dataset (could be given by the user, but not dealing with that)
*/
Vec m_0 = new DenseVector(d);
MatrixStatistics.meanVector(m_0, dataSet);
//using R as a sracth space for a quick init
Arrays.fill(r[0], 1.0);
/**
* Prior over the covariances of the dataset. Set from the dataset cov,
* could be given, but not dealing with that. Its inverse because Bishop
* deals with the precision matrix, which is the inverse of the
* covariance.
*/
Matrix W_inv_0 = cov_type.allocate(d);
cov_type.fit(X, W_inv_0, r[0], m_0, N);
Arrays.fill(r[0], 0.0);//Done using as temp space
/**
* The estimated mean for each cluster
*/
Vec[] m_k = new Vec[k_max];
/**
* The estimated covariance matrix for each cluster
*/
Matrix[] W_inv_k = new Matrix[k_max];
for(int k = 0; k < k_max; k++)
{
m_k[k] = new DenseVector(d);
W_inv_k[k] = cov_type.allocate(d);
S_k[k] = cov_type.allocate(d);
}
/**
* Prior over the degrees of freedom in the model.
*/
double nu_0 = d;
/**
* The degrees of freedom
*/
double[] nu_k = new double[k_max];
Arrays.fill(nu_k, 1.0);
log_pi = new double[k_max];
/**
* The log precision term for each component (10.65) in Bishop, or (21.131) in Murphy
*/
double[] log_precision = new double[k_max];
//Initialization by k-means
HamerlyKMeans kMeans = new HamerlyKMeans();
designations = kMeans.cluster(dataSet, k_max, parallel, designations);
//Everything is set to 0 right now, so assign to closest
for(int n = 0; n < N; n++)
{
r[designations[n]][n] = 1.0;
log_pi[designations[n]] += 1;
}
//Set central locations based on k-means
for(int k = 0; k < k_max; k++)
{
kMeans.getMeans().get(k).copyTo(m_k[k]);
if(log_pi[k] == 0)
active[k]= false;
log_pi[k] = Math.log(log_pi[k])- Math.log(N);
}
//We will leave log_precision alonge as all zeros, since init would have same prior to everyone. So no need to compute
double prevLog = Double.POSITIVE_INFINITY;
for(int iteration = 0; iteration < maxIterations; iteration++)
{
//M-Step
ParallelUtils.run(parallel, k_max, (k)->
{
if(!active[k])
return;
double Nk = 0.0;
DenseVector xk = new DenseVector(d);
for(int n = 0; n < N; n++)
{
double r_nk = r[k][n];
Vec x_n = X.get(n);
Nk += r_nk;//(10.51) in Bishop
xk.mutableAdd(r_nk, x_n);//(10.52) is Bishop
}
N_k[k] = Nk;
//(10.52) is Bishop, finish average
xk.mutableDivide(Nk + 1e-6);
X_bar_k[k] = xk;
//(10.53) in Bishop will be handled in a scenario dependent manner by cov_type
cov_type.fit(X, S_k[k], r[k], xk, Nk);
//(10.58) in Bishop
alpha[k] = alpha_0 + Nk;
//(10.60) in Bishop
beta[k] = beta_0 + Nk;
//(10.61) in Bishop
m_k[k].zeroOut();
m_k[k].mutableAdd(beta_0, m_0);
m_k[k].mutableAdd(Nk, xk);
m_k[k].mutableDivide(beta[k] + 1e-6);
//(10.63)
nu_k[k] = nu_0 + Nk;
//(10.62) in Bishop will be handled in a scenario dependent manner
cov_type.updateWishart(W_inv_0, W_inv_k[k], S_k[k], xk, Nk, m_0, beta_0, beta[k], nu_k[k]);
});
//E-step prep
double alpha_sum = DenseVector.toDenseVec(alpha).sum();
ParallelUtils.run(parallel, k_max, (k)->
{
if(!active[k])
return;
//Let cov_type create normal, b/c W_inv_k might not actually be a full covariance matrix
normals[k] = cov_type.asNormal(m_k[k], W_inv_k[k]);
//(10.66) in Bishop
log_pi[k] = SpecialMath.digamma(alpha[k]) - SpecialMath.digamma(alpha_sum);
if(log_pi[k] < log_prune_tol)//This cluster has gotten too small, prune it out
active[k] = false;
// else
// System.out.println("\t" +Math.exp(log_pi[k]));
//(10.65) in Bishop, sans log(det) term which will be added by NormalM class later
log_precision[k] = d * Math.log(2);// + normals[k].getLogCovarianceDeterminant();
for(int i = 0; i < d; i++)
log_precision[k] += SpecialMath.digamma((nu_k[k]-i)/2.0);
log_precision[k] /= 2;// b/c log(Δ^(1/2)) = 1/2 * log(Δ), and
//(10.65) give us log(Δ) but we only use log(Δ^(1/2)) later on
});
//E-Step
//Fully equation of r is:
//\ln \rho_{n k}=E\left[\ln \pi_{k}\right]+\frac{1}{2} E\left[\ln \left|\lambda_{k}\right|\right]-\frac{D}{2} \ln (2 \pi)-\frac{1}{2} E_{\mu_{k} \Delta_{k}}\left[\left(\mathbf{x}_{n}-\mu_{k}\right)^{T} \Lambda_{k}\left(\mathbf{x}_{k}-\mu_{k}\right)\right]
//where r_{n k}=\frac{\rho_{n k}}{\sum_{i=1}^{K} \rho_{n j}}
double log_prob_sum = ParallelUtils.run(parallel, k_max, (k)->
{
if(!active[k])
{
//You have no log prob contribution
return 0.0;
}
double log_prob_contrib = 0;
//(10.64) in Bishop, applied to every data point
for(int n = 0; n < N; n++)
{
//no nu_k multiply in stated (10.64) b/c we normalized the
//covariance matrix earlier
//The call to normals also include the log_det factor that
//was supposed to be in (10.65)
double proj = normals[k].logPdf(X.get(n));
proj -= d/(2*beta[k]);
log_prob_contrib += (r[k][n] = proj + log_pi[k] + log_precision[k]);
}
return log_prob_contrib;
}, (a,b)->a+b);
// System.out.println(Math.abs((prevLog-log_prob_sum)/prevLog) + " " + log_prob_sum);
if(Math.abs((prevLog-log_prob_sum)/prevLog) < 1e-5)
break;
prevLog = log_prob_sum;
//Apply exp to r to go from log form to responsibility/contribution form
//include extra normalization to deal with roundoff of non-active components
ParallelUtils.run(parallel, N, (n)->
{
double sum = 0;
for(int k = 0; k < k_max; k++)
if(active[k])
sum += (r[k][n] = Math.exp(r[k][n]));
for(int k = 0; k < k_max; k++)
if(active[k])
r[k][n] /= sum;
});
}
//How many clusters did we get?
int still_active = active.length;
for(boolean still_good : active)
if(!still_good)
still_active--;
int final_k = still_active;
//We've got clusters, lets do some pruning now
{
int cur_pos = 0;
for(int k = 0; k < k_max; k++)
if(active[k])
{
normals[cur_pos] = normals[k];
log_pi[cur_pos++] = log_pi[k];
}
normals = Arrays.copyOf(normals, final_k);
log_pi = Arrays.copyOf(log_pi, final_k);
}
for(int n = 0; n < N; n++)
{
int cur_pos = 0;
//move active indexes up
int k_max_indx = 0;
double k_max_value = 0;
for(int k = 0; k < k_max; k++)
if(active[k])
{
//we will alter r for now, because maybe we want to use that code later?
//not much real work ontop of finding which index won anyway
if((r[cur_pos][n] = r[k][n]) > k_max_value)
{
k_max_indx = cur_pos;
k_max_value = r[cur_pos][n];
}
cur_pos++;
}
//Mark final cluster id
designations[n] = k_max_indx;
}
return designations;
}
public void setAlphaPrior(double alpha_0) {
this.alpha_0 = alpha_0;
}
public double getAlphaPrior() {
return alpha_0;
}
public void setBetaPrior(double beta_0) {
this.beta_0 = beta_0;
}
public double getBetaPrior() {
return beta_0;
}
public void setMaxIterations(int maxIterations)
{
this.maxIterations = maxIterations;
}
public int getMaxIterations()
{
return maxIterations;
}
@Override
public VBGMM clone()
{
return new VBGMM(this);
}
@Override
public double logPdf(Vec x)
{
double pdf = pdf(x);
if(pdf == 0)
return -Double.MAX_VALUE;
return log(pdf);
}
@Override
public double pdf(Vec x)
{
double pdf = 0;
for(int i = 0; i < normals.length; i++)
pdf += Math.exp(log_pi[i] + normals[i].logPdf(x));
return pdf;
}
public double[] mixtureAssignments(Vec x)
{
double[] assignments = new double[normals.length];
for(int i = 0; i < normals.length; i++)
assignments[i] = log_pi[i] + normals[i].logPdf(x);
MathTricks.softmax(assignments, false);
return assignments;
}
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
SimpleDataSet sds = new SimpleDataSet(dataSet.stream().map(v->new DataPoint(v)).collect(Collectors.toList()));
this.cluster(sds, parallel);
return true;
}
@Override
public List<Vec> sample(int count, Random rand)
{
List<Vec> samples = new ArrayList<>(count);
//First we need the figure out which of the mixtures to sample from
//So generate [0,1] uniform values to determine
double[] priorTargets = new double[count];
for(int i = 0; i < count; i++)
priorTargets[i] = rand.nextDouble();
Arrays.sort(priorTargets);
int subSampleSize = 0;
int currentGaussian = 0;
int pos = 0;
double a_kSum = 0.0;
while(currentGaussian < normals.length)
{
a_kSum += Math.exp(log_pi[currentGaussian]);
while(pos < count && priorTargets[pos++] < a_kSum)
subSampleSize++;
samples.addAll(normals[currentGaussian++].sample(subSampleSize, rand));
}
return samples;
}
}
| 23,816 | 35.03177 | 267 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/biclustering/Bicluster.java | /*
* This code was contributed under the public domain.
*/
package jsat.clustering.biclustering;
import java.util.List;
import jsat.DataSet;
/**
*
* @author Edward Raff
*/
public interface Bicluster
{
default public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
return designations;
}
/**
* Computes a biclustering of the dataset, where the goal is to identify a
* fixed number of biclusters.
*
* @param dataSet the dataset to perform biclustering on
* @param clusters the number of clusters to search for
* @param parallel whether or not to use parallel computation
* @param row_assignments This will store the the assignment of rows to each
* by bicluster. After this function returns, the primary list will have a
* sub-list for each bicluster. The i'th sub list contains the rows of the
* matrix that belong to the i'th bicluster.
* @param col_assignments This will store the assignment of columns to each
* bicluster. After this function returns, the primary list will have a
* sub-list for each bicluster. The i'th sub list contains the columns /
* features of the matrix that belong to the i'th bicluster.
*/
public void bicluster(DataSet dataSet, int clusters, boolean parallel,
List<List<Integer>> row_assignments,
List<List<Integer>> col_assignments);
/**
* Computes a biclustering of the dataset, where the goal is to identify an
* unkown number of biclusters.
*
* @param dataSet the dataset to perform biclustering on
* @param clusters the number of clusters to search for
* @param row_assignments This will store the the assignment of rows to each
* by bicluster. After this function returns, the primary list will have a
* sub-list for each bicluster. The i'th sub list contains the rows of the
* matrix that belong to the i'th bicluster.
* @param col_assingments This will store the assignment of columns to each
* bicluster. After this function returns, the primary list will have a
* sub-list for each bicluster. The i'th sub list contains the columns /
* features of the matrix that belong to the i'th bicluster.
*/
default public void bicluster(DataSet dataSet, int clusters,
List<List<Integer>> row_assignments,
List<List<Integer>> col_assingments)
{
bicluster(dataSet, clusters, false, row_assignments, col_assingments);
}
/**
* Indicates whether the model knows how to cluster using weighted data
* points. If it does, the model will train assuming the weights. The values
* returned by this method may change depending on the parameters set for
* the model.
*
* @return <tt>true</tt> if the model supports weighted data, <tt>false</tt>
* otherwise
*/
default public boolean supportsWeightedData()
{
return false;
}
public Bicluster clone();
}
| 3,153 | 37.938272 | 108 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/biclustering/ConsensusScore.java | /*
* This code was contributed under the public domain.
*/
package jsat.clustering.biclustering;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import jsat.linear.DenseMatrix;
import jsat.linear.Matrix;
import jsat.utils.Pair;
import jsat.utils.concurrent.ParallelUtils;
/**
* Implementatino of the Consensus Score method for evaluating the quality of a
* biclustering compared to a known ground truth.<br>
* <br>
* See: [1] S. Hochreiter et al., “<i>FABIA: factor analysis for bicluster acquisition</i>,” Bioinformatics, vol. 26, no. 12, pp. 1520–1527, Jun. 2010.
* @author Edward Raff
*/
public class ConsensusScore
{
/**
* Computes the Consensus score to measure the quality of a biclustering
* algorithm.
*
* @param parallel whether or not multiple threads should be used in the
* computation of this score.
* @param rows_truth A list for each bicluster, where the i'th sub-list
* contains the rows of the i'th biclustering. This biclustering should
* correspond to the ground-truth that is known in advance.
* @param cols_truth A list for each bicluster, where the i'th sub-list
* contains the columns of the i'th biclustering. This biclustering should
* correspond to the ground-truth that is known in advance.
* @param rows_found A list for each bicluster, where the i'th sub-list
* contains the rows of the i'th biclustering. This biclustering should
* correspond to the found clustering that we wish to evaluate.
* @param cols_found A list for each bicluster, where the i'th sub-list
* contains the columns of the i'th biclustering. This biclustering should
* correspond to the found clustering that we wish to evaluate.
* @return a score in the range of [0, 1]. Where 1 indicate a perfect
* biclustering, and 0 indicates a biclustering with no overlap of the
* ground truth.
*/
public static double score(boolean parallel,
List<List<Integer>> rows_truth, List<List<Integer>> cols_truth,
List<List<Integer>> rows_found, List<List<Integer>> cols_found)
{
int k_true = rows_truth.size();
int k_found = rows_found.size();
//(1) compute similarities between all pairs of biclusters, where one is
//from the first set and the other from the second set;
double[][] cost_matrix = new double[k_true][k_found];
ParallelUtils.run(parallel, k_true, (i)->
{
Set<Pair<Integer, Integer>> true_ci = coCluster_to_set(rows_truth, i, cols_truth);
for(int j = 0; j < k_found; j++)
{
Set<Pair<Integer, Integer>> true_cj = coCluster_to_set(rows_found, j, cols_found);
int A_size = true_ci.size();
int B_size = true_cj.size();
//remove everything we don't have, so now we represent the union
true_cj.removeIf(pair-> !true_ci.contains(pair));
int union = true_cj.size();
cost_matrix[i][j] = 1.0-union/(double)(A_size+B_size-union);
}
});
//(2) assign the biclusters of one set to biclusters of the other set by
//maximizing the assignment by the Munkres algorithm (Munkres, 1957);
Map<Integer, Integer> assignments = assignment(new DenseMatrix(cost_matrix));
double score_sum = 0;
//(3) divide the sum of similarities of the assigned biclusters ...
for(Map.Entry<Integer, Integer> pair : assignments.entrySet())
score_sum += (1.0-cost_matrix[pair.getKey()][pair.getValue()]);
//by the number of biclusters of the larger set
return score_sum/Math.max(k_true, k_found);
}
private static Set<Pair<Integer, Integer>> coCluster_to_set(List<List<Integer>> rows_truth, int q, List<List<Integer>> cols_truth) {
Set<Pair<Integer, Integer>> true_c_i = new HashSet<>();
List<Integer> rows = rows_truth.get(q);
List<Integer> cols = cols_truth.get(q);
for(int i = 0; i < rows.size(); i++)
{
for(int j = 0; j < cols.size(); j++)
true_c_i.add(new Pair<>(rows.get(i), cols.get(j)));
}
return true_c_i;
}
private static Map<Integer, Integer> assignment(Matrix A)
{
Map<Integer, Integer> assignments = new HashMap<>();
boolean[] taken = new boolean[A.cols()];
//TODO, greedy assignment that is not optimal. Replace with hungarian or something
int min_indx;
double best_score;
for(int i = 0; i < A.rows(); i++)
{
min_indx = -1;
best_score = Double.POSITIVE_INFINITY;
for(int j = 0; j < A.cols(); j++)
{
double score = A.get(i, j);
if(score < best_score && !taken[j])
{
best_score = score;
min_indx = j;
}
}
assignments.put(i, min_indx);
taken[min_indx] = true;
if(assignments.size() == Math.min(A.rows(), A.cols()))
break;//Nothing left to match up
}
return assignments;
}
}
| 5,406 | 39.962121 | 152 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/biclustering/SpectralCoClustering.java | /*
* This code was contributed under the public domain.
*/
package jsat.clustering.biclustering;
import java.util.List;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.clustering.Clusterer;
import jsat.clustering.KClusterer;
import jsat.clustering.kmeans.GMeans;
import jsat.clustering.kmeans.HamerlyKMeans;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.Matrix;
import jsat.linear.SubMatrix;
import jsat.linear.TruncatedSVD;
import jsat.linear.Vec;
import jsat.utils.IntList;
/**
*
* @author Edward Raff
*/
public class SpectralCoClustering implements Bicluster
{
static public enum InputNormalization
{
SCALE
{
@Override
public Matrix normalize(Matrix A, DenseVector R, DenseVector C)
{
return row_col_normalize(A, R, C);
}
},
BISTOCHASTIZATION
{
@Override
public Matrix normalize(Matrix A, DenseVector R, DenseVector C)
{
//Init locations to store final normalization vectors
//make equal to no normalization at first, and accumulate after every step
DenseVector R_tmp = R.clone();
R_tmp.zeroOut();
R_tmp.mutableAdd(1.0);
DenseVector C_tmp = C.clone();
C_tmp.zeroOut();
C_tmp.mutableAdd(1.0);
Matrix A_prev = A;
double diff = Double.POSITIVE_INFINITY;
int iter = 0;
while(iter++ < 1000 && diff > 1e-4)
{
A_prev = A;
A = row_col_normalize(A, R, C);
//Compute the "norm" of the two matricies
//below is not quite the norm of the 2 matricies, but close
//enough, we just need to know if we have converged
diff = 0;
for(int row = 0; row < A.rows(); row++)
diff += A.getRowView(row).pNormDist(2, A_prev.getRowView(row));
diff /= A.rows();
R_tmp.mutablePairwiseMultiply(R);
C_tmp.mutablePairwiseMultiply(C);
}
R_tmp.copyTo(R);
C_tmp.copyTo(C);
return A;
}
};
/**
* Computes a normalization of the input matrix that allows a later SVD
* step to better reveal block structure in the underlying data.
*
* @param A the input matrix to normalize, which will not be altered
* @param R the location to store the row normalization matrix to apply
* to the original matrix to get the result. This may be filled with
* constants if no subsequent use of this should occur in the SVD
* decomposition.
* @param C the location to store the column normalization matrix to
* apply to the original matrix to get the result. This may be filled
* with constants if no subsequent use of this should occur in the SVD
* decomposition.
* @return A new matrix that has been normalized
*/
abstract public Matrix normalize(Matrix A, DenseVector R, DenseVector C);
}
private Clusterer baseClusterAlgo;
private InputNormalization inputNormalization;
public SpectralCoClustering()
{
this(InputNormalization.SCALE);
}
public SpectralCoClustering(InputNormalization normalization)
{
this(normalization, new GMeans(new HamerlyKMeans()));
}
public SpectralCoClustering(InputNormalization normalization, Clusterer baseCluster)
{
setBaseClusterAlgo(baseCluster);
setInputNormalization(normalization);
}
public void setInputNormalization(InputNormalization inputNormalization)
{
this.inputNormalization = inputNormalization;
}
public InputNormalization getInputNormalization()
{
return inputNormalization;
}
public void setBaseClusterAlgo(Clusterer baseClusterAlgo)
{
this.baseClusterAlgo = baseClusterAlgo;
}
public Clusterer getBaseClusterAlgo()
{
return baseClusterAlgo;
}
@Override
public void bicluster(DataSet dataSet, int clusters, boolean parallel, List<List<Integer>> row_assignments, List<List<Integer>> col_assignments)
{
//1. Given A, form An = D_1^{−1/2} A D_2^{−1/2}
Matrix A = dataSet.getDataMatrix();
DenseVector R = new DenseVector(A.rows());
DenseVector C = new DenseVector(A.cols());
Matrix A_n = inputNormalization.normalize(A, R, C);
//2. Compute l = ceil(log2 k) singular vectors of A_n, u2, . . . u_l+1 and v2, . . . v_l+1, and form the matrix Z as in (12)
int l = (int) Math.ceil(Math.log(clusters)/Math.log(2.0));
//A_n has r rows and c columns. We are going to make a new data matrix Z
//Z will have (r+c) rows, and l columns.
SimpleDataSet Z = create_Z_dataset(A_n, l, R, C, inputNormalization);//+1 b/c we are going to skip the first SV
KClusterer to_use;
if(baseClusterAlgo instanceof KClusterer)
to_use = (KClusterer) baseClusterAlgo;
else
to_use = new HamerlyKMeans();
int[] joint_designations = to_use.cluster(Z, clusters, parallel, null);
createAssignments(row_assignments, col_assignments, clusters, A, joint_designations);
}
public void bicluster(DataSet dataSet, boolean parallel, List<List<Integer>> row_assignments, List<List<Integer>> col_assignments)
{
//1. Given A, form An = D_1^{−1/2} A D_2^{−1/2}
Matrix A = dataSet.getDataMatrix();
DenseVector R = new DenseVector(A.rows());
DenseVector C = new DenseVector(A.cols());
Matrix A_n = inputNormalization.normalize(A, R, C);
//2. Compute l = ceil(log2 k) singular vectors of A_n, u2, . . . u_l+1 and v2, . . . v_l+1, and form the matrix Z as in (12)
int k_max = Math.min(A.rows(), A.cols());
int l = (int) Math.ceil(Math.log(k_max)/Math.log(2.0));
SimpleDataSet Z = create_Z_dataset(A_n, l, R, C, inputNormalization);
int[] joint_designations = baseClusterAlgo.cluster(Z, parallel, null);
int clusters = 0;
for(int i : joint_designations)
clusters = Math.max(clusters, i+1);
//prep label outputs
createAssignments(row_assignments, col_assignments, clusters, A, joint_designations);
//the bicluster labels for the rows
//the bicluter labels for the columns
//Now we need to prune potential false bi-clusterings that have only features or only rows
}
private SimpleDataSet create_Z_dataset(Matrix A_n, int l, DenseVector R, DenseVector C, InputNormalization inputNormalization)
{
//A_n has r rows and c columns. We are going to make a new data matrix Z
//Z will have (r+c) rows, and l columns.
TruncatedSVD svd = new TruncatedSVD(A_n, l+1);//+1 b/c we are going to skip the first SV
Matrix U = svd.getU();
Matrix V = svd.getV().transpose();
//In some cases, Drop the first column, which corresponds to the first SV we don't want
int to_skip = 1;
U = new SubMatrix(U, 0, to_skip, U.rows(), l+to_skip);
V = new SubMatrix(V, 0, to_skip, V.rows(), l+to_skip);
/* Orig paper says to do this multiplication for re-scaling. Why not for
* bistochastic? Its very similar! b/c in "Spectral Biclustering of
* Microarray Data: Coclustering Genes and Conditions" where bistochastic
* is introduced, on page 710: "Once D1 and D2 are found, we apply SVD to
* B with no further normalization "
*
*/
if(inputNormalization == InputNormalization.SCALE)
{
Matrix.diagMult(R, U);
Matrix.diagMult(C, V);
}
SimpleDataSet Z = new SimpleDataSet(l, new CategoricalData[0]);
for(int i = 0; i < U.rows(); i++)
Z.add(new DataPoint(U.getRow(i)));
for(int i = 0; i < V.rows(); i++)
Z.add(new DataPoint(V.getRow(i)));
return Z;
}
private void createAssignments(List<List<Integer>> row_assignments, List<List<Integer>> col_assignments, int clusters, Matrix A, int[] joint_designations) {
//prep label outputs
row_assignments.clear();
col_assignments.clear();
for(int c = 0; c < clusters; c++)
{
row_assignments.add(new IntList());
col_assignments.add(new IntList());
}
for(int i = 0; i < A.rows(); i++)//the bicluster labels for the rows
if(joint_designations[i] >= 0)
row_assignments.get(joint_designations[i]).add(i);
for(int j = 0; j < A.cols(); j++)//the bicluter labels for the columns
if(joint_designations[j+A.rows()] >= 0)
col_assignments.get(joint_designations[j+A.rows()]).add(j);
//Now we need to prune potential false bi-clusterings that have only features or only rows
for(int j = row_assignments.size()-1; j >= 0; j--)
{
if(row_assignments.get(j).isEmpty() || col_assignments.get(j).isEmpty())
{
row_assignments.remove(j);
col_assignments.remove(j);
}
}
}
@Override
public SpectralCoClustering clone()
{
return this;
}
/**
* Performs normalization as described in Section 4.of "Co-clustering
* Documents and Words Using Bipartite Spectral Graph Partitioning"
*
* @param A the matrix to normalize
* @param R the location to store the row sums, should have length equal to
* the number of rows in A
* @param C the location to store the column sums, should have length equal
* to the number of columns in A.
* @return a normalized copy of the original matrix.
*/
protected static Matrix row_col_normalize(Matrix A, Vec R, Vec C)
{
R.zeroOut();
C.zeroOut();
//A_n = R^{−1/2} A C^{−1/2}
//Where R and C are diagonal matrix with Row and Column sums
for (int i = 0; i < A.rows(); i++)
for(IndexValue iv : A.getRowView(i))
{
int j = iv.getIndex();
double v = iv.getValue();
R.increment(i, v);
C.increment(j, v);
}
R.applyFunction(v -> v == 0 ? 0 : 1.0/Math.sqrt(v));
C.applyFunction(v -> v == 0 ? 0 : 1.0/Math.sqrt(v));
Matrix A_n = A.clone();
Matrix.diagMult(R, A_n);
Matrix.diagMult(A_n, C);
return A_n;
}
}
| 11,268 | 35.826797 | 160 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/AbstractClusterDissimilarity.java | package jsat.clustering.dissimilarity;
import jsat.DataSet;
/**
* This base class does not currently provide any inheritable functionality, but
* stores static methods.
*
* @author Edward Raff
*/
public abstract class AbstractClusterDissimilarity implements ClusterDissimilarity
{
/**
* A convenience method. If the <i>distanceMatrix</i> was created with
* {@link #createDistanceMatrix(jsat.DataSet, jsat.clustering.dissimilarity.ClusterDissimilarity)
* }, then this method will return the appropriate value for the desired
* index.
*
* @param distanceMatrix the distance matrix to query from
* @param i the first index
* @param j the second index
* @return the correct value from the distance matrix from the index given
* as if the distance matrix was of full form
*/
public static double getDistance(double[][] distanceMatrix, int i, int j)
{
if (i > j)
{
int tmp = j;
j = i;
i = tmp;
}
return distanceMatrix[i][j - i - 1];
}
/**
* A convenience method. If the <i>distanceMatrix</i> was created with
* {@link #createDistanceMatrix(jsat.DataSet, jsat.clustering.dissimilarity.ClusterDissimilarity)
* }, then this method will set the appropriate value for the desired
* index.
*
* @param distanceMatrix the distance matrix to query from
* @param i the first index
* @param j the second index
* @param dist the new distance value to store in the matrix
*/
public static void setDistance(double[][] distanceMatrix, int i, int j, double dist)
{
if (i > j)
{
int tmp = j;
j = i;
i = tmp;
}
distanceMatrix[i][j - i - 1] = dist;
}
/**
* Creates an upper triangular matrix containing the distance between all
* points in the data set. The main diagonal will contain all zeros, since
* the distance between a point and itself is always zero. This main
* diagonal is not stored, and is implicit <br> To save space, the matrix is
* staggered, and is of a size such that all elements to the left of the
* main diagonal are not present. <br> To compute the index into the
* returned array for the index [i][j], the values should be switched such
* that i ≥ j, and accessed as [i][j-i-1]
*
* @param dataSet the data set to create distance matrix for
* @param cd the cluster dissimilarity measure to use
* @return a upper triangular distance matrix
*/
public static double[][] createDistanceMatrix(DataSet dataSet, ClusterDissimilarity cd)
{
double[][] distances = new double[dataSet.size()][];
for (int i = 0; i < distances.length; i++)
{
distances[i] = new double[dataSet.size() - i - 1];
for (int j = i + 1; j < distances.length; j++)
distances[i][j - i - 1] = cd.distance(dataSet.getDataPoint(i), dataSet.getDataPoint(j));
}
return distances;
}
@Override
abstract public ClusterDissimilarity clone();
}
| 3,162 | 33.010753 | 104 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/AverageLinkDissimilarity.java | package jsat.clustering.dissimilarity;
import java.util.*;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Also known as Group-Average Agglomerative Clustering (GAAC) and UPGMA, this
* measure computer the dissimilarity by summing the distances between all
* possible data point pairs in the union of the clusters.
*
* @author Edward Raff
*/
public class AverageLinkDissimilarity extends LanceWilliamsDissimilarity implements UpdatableClusterDissimilarity
{
/**
* Creates a new AverageLinkDissimilarity using the {@link EuclideanDistance}
*/
public AverageLinkDissimilarity()
{
this(new EuclideanDistance());
}
/**
* Creates a new AverageLinkDissimilarity
* @param dm the distance measure to use on individual points
*/
public AverageLinkDissimilarity(DistanceMetric dm)
{
super(dm);
}
@Override
public AverageLinkDissimilarity clone()
{
return new AverageLinkDissimilarity(dm.clone());
}
@Override
public double dissimilarity(List<DataPoint> a, List<DataPoint> b)
{
double disSum = 0;
int allSize = a.size()+b.size();
List<DataPoint> allPoints = new ArrayList<DataPoint>(allSize);
allPoints.addAll(a);
allPoints.addAll(b);
for(int i = 0; i < allPoints.size(); i++)
for(int j = i+1; j < allPoints.size(); j++)
disSum += distance(allPoints.get(i), allPoints.get(j));
return disSum/(allSize*(allSize-1));
}
@Override
public double dissimilarity(Set<Integer> a, Set<Integer> b, double[][] distanceMatrix)
{
double disSum = 0;
int allSize = a.size()+b.size();
int[] allPoints = new int[allSize];
int z = 0;
for(int val : a)
allPoints[z++] = val;
for(int val : b)
allPoints[z++] = val;
for(int i = 0; i < allPoints.length; i++)
for(int j = i+1; j < allPoints.length; j++)
disSum += getDistance(distanceMatrix, allPoints[i], allPoints[j]);
return disSum/(allSize*(allSize-1));
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, double[][] distanceMatrix)
{
return getDistance(distanceMatrix, i, j);
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, int k, int nk, double[][] distanceMatrix)
{
double ai = ni/(double)(ni+nj);
double aj = nj/(double)(ni+nj);
return ai * getDistance(distanceMatrix, i, k) + aj * getDistance(distanceMatrix, j, k);
}
@Override
protected double aConst(boolean iFlag, int ni, int nj, int nk)
{
double denom = ni+nj;
if(iFlag)
return ni/denom;
else
return nj/denom;
}
@Override
protected double bConst(int ni, int nj, int nk)
{
return 0;
}
@Override
protected double cConst(int ni, int nj, int nk)
{
return 0;
}
}
| 3,176 | 26.387931 | 113 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/CentroidDissimilarity.java |
package jsat.clustering.dissimilarity;
import java.util.List;
import java.util.Set;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Average similarity of all data point pairs between clusters, inter-cluster
* pairs are ignored. Also called UPGMC.
*
* @author Edward Raff
*/
public class CentroidDissimilarity extends LanceWilliamsDissimilarity implements UpdatableClusterDissimilarity
{
/**
* Creates a new CentroidDissimilarity that used the {@link EuclideanDistance}
*/
public CentroidDissimilarity()
{
this(new EuclideanDistance());
}
/**
* Creates a new CentroidDissimilarity
* @param dm the distance measure to use between individual points
*/
public CentroidDissimilarity(DistanceMetric dm)
{
super(dm);
}
@Override
public CentroidDissimilarity clone()
{
return new CentroidDissimilarity(dm.clone());
}
@Override
public double dissimilarity(List<DataPoint> a, List<DataPoint> b)
{
double sumDIss = 0;
for (DataPoint ai : a)
for (DataPoint bi : b)
sumDIss += distance(ai, bi);
return sumDIss/(a.size()*b.size());
}
@Override
public double dissimilarity(Set<Integer> a, Set<Integer> b, double[][] distanceMatrix)
{
double sumDiss = 0;
for (int ai : a)
for (int bi : b)
sumDiss += getDistance(distanceMatrix, ai, bi);
return sumDiss/(a.size()*b.size());
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, double[][] distanceMatrix)
{
return getDistance(distanceMatrix, i, j);
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, int k, int nk, double[][] distanceMatrix)
{
double iPj = ni+nj;
double ai = ni/iPj;
double aj = nj/iPj;
double b = - ni * nj / iPj*iPj;
return ai* getDistance(distanceMatrix, i, k) + aj * getDistance(distanceMatrix, j, k) + b * getDistance(distanceMatrix, i, j);
}
@Override
protected double aConst(boolean iFlag, int ni, int nj, int nk)
{
double denom = ni+nj;
if(iFlag)
return ni/denom;
else
return nj/denom;
}
@Override
protected double bConst(int ni, int nj, int nk)
{
double nipj = ni + nj;
return - ni*(double)nj/(nipj*nipj);
}
@Override
protected double cConst(int ni, int nj, int nk)
{
return 0;
}
}
| 2,654 | 23.813084 | 134 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/ClusterDissimilarity.java | package jsat.clustering.dissimilarity;
import java.util.List;
import java.util.Set;
import jsat.classifiers.DataPoint;
/**
* This interface provides the basic contract for measuring the dissimilarity
* between two clusters, and intended for use in Hierarchical Agglomerative
* Clustering.
*
* @author Edward Raff
*/
public interface ClusterDissimilarity
{
/**
* Provides the notion of distance, or dissimilarity, between two data points
* @param a the first data point
* @param b the second data point
* @return a value >= 0 that is a measure of the difference between the
* two points. The closer to zero, the more similar the points are.
*/
public double distance(DataPoint a, DataPoint b);
/**
* Provides the notion of dissimilarity between two sets of points, that may
* not have the same number of points.
*
* @param a the first cluster of points
* @param b the second cluster of points
* @return a value >= 0 that describes the dissimilarity of the two
* clusters. The larger the value, the more different the two clusterings are.
*/
public double dissimilarity(List<DataPoint> a, List<DataPoint> b);
/**
* Provides the notion of dissimilarity between two sets of points, that may
* not have the same number of points. This is done using a matrix
* containing all pairwise distance computations between all points.
*
* @param a the first set of indices of the original data set that are in a
* cluster, which map to <i>distanceMatrix</i>
* @param b the second set of indices of the original data set that are in a
* cluster, which map to <i>distanceMatrix</i>
* @param distanceMatrix the upper triangual distance matrix as created by
* {@link AbstractClusterDissimilarity#createDistanceMatrix(jsat.DataSet, jsat.clustering.dissimilarity.ClusterDissimilarity) }
* @return a value >= 0 that describes the dissimilarity of the two
* clusters. The larger the value, the more different the two clusterings are.
*/
public double dissimilarity(Set<Integer> a, Set<Integer> b, double[][] distanceMatrix);
public ClusterDissimilarity clone();
}
| 2,249 | 41.45283 | 131 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/CompleteLinkDissimilarity.java |
package jsat.clustering.dissimilarity;
import java.util.List;
import java.util.Set;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Measures the dissimilarity of two clusters by returning the value of the
* maximal dissimilarity of any two pairs of data points where one is from
* each cluster.
*
* @author Edward Raff
*/
public class CompleteLinkDissimilarity extends LanceWilliamsDissimilarity implements UpdatableClusterDissimilarity
{
/**
* Creates a new CompleteLinkDissimilarity using the {@link EuclideanDistance}
*/
public CompleteLinkDissimilarity()
{
this(new EuclideanDistance());
}
/**
* Creates a new CompleteLinkDissimilarity
* @param dm the distance metric to use between individual points
*/
public CompleteLinkDissimilarity(DistanceMetric dm)
{
super(dm);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public CompleteLinkDissimilarity(CompleteLinkDissimilarity toCopy)
{
this(toCopy.dm.clone());
}
@Override
public CompleteLinkDissimilarity clone()
{
return new CompleteLinkDissimilarity(this);
}
@Override
public double dissimilarity(List<DataPoint> a, List<DataPoint> b)
{
double maxDiss = Double.MIN_VALUE;
double tmpDist;
for (DataPoint ai : a)
for (DataPoint bi : b)
if ((tmpDist = distance(ai, bi)) > maxDiss)
maxDiss = tmpDist;
return maxDiss;
}
@Override
public double dissimilarity(Set<Integer> a, Set<Integer> b, double[][] distanceMatrix)
{
double maxDiss = Double.MIN_VALUE;
for (int ai : a)
for (int bi : b)
if (getDistance(distanceMatrix, ai, bi) > maxDiss)
maxDiss = getDistance(distanceMatrix, ai, bi);
return maxDiss;
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, double[][] distanceMatrix)
{
return getDistance(distanceMatrix, i, j);
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, int k, int nk, double[][] distanceMatrix)
{
return Math.max(getDistance(distanceMatrix, i, k), getDistance(distanceMatrix, j, k));
}
@Override
public double dissimilarity(int ni, int nj, int nk, double d_ij, double d_ik, double d_jk)
{
return Math.max(d_ik, d_jk);
}
@Override
protected double aConst(boolean iFlag, int ni, int nj, int nk)
{
return 0.5;
}
@Override
protected double bConst(int ni, int nj, int nk)
{
return 0;
}
@Override
protected double cConst(int ni, int nj, int nk)
{
return 0.5;
}
}
| 2,888 | 24.121739 | 114 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/DistanceMetricDissimilarity.java |
package jsat.clustering.dissimilarity;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
/**
* A base class for Dissimilarity measures that are build ontop the use of some {@link DistanceMetric distance metric}.
*
* @author Edward Raff
*/
public abstract class DistanceMetricDissimilarity extends AbstractClusterDissimilarity
{
/**
* The distance metric that will back this dissimilarity measure.
*/
protected final DistanceMetric dm;
public DistanceMetricDissimilarity(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public double distance(DataPoint a, DataPoint b)
{
return dm.dist(a.getNumericalValues(), b.getNumericalValues());
}
@Override
abstract public DistanceMetricDissimilarity clone();
}
| 827 | 24.090909 | 120 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/LanceWilliamsDissimilarity.java |
package jsat.clustering.dissimilarity;
import static java.lang.Math.abs;
import java.util.List;
import java.util.Set;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.utils.IntSet;
/**
* This class provides a base implementation of a Lance Williams (LW)
* Dissimilarity measure, which is updatable. All LW measures can be written in the form
* <br>
* α<sub>i</sub> d<sub>ik</sub> + α<sub>j</sub> d<sub>jk</sub> +
* β d<sub>ij</sub> + γ |d<sub>ik</sub> - d<sub>jk</sub>|
* <br>
* The d's represent the distances between points, and the variables: <br>
* <ul>
* <li>α</li>
* <li>β</li>
* <li>γ</li>
* </ul>
* are computed from other functions, and depend on prior values.
* <br><br>
* NOTE: LW is meant for algorithms that perform updates to a distance matrix.
* While the {@link #dissimilarity(java.util.List, java.util.List) } and
* {@link #dissimilarity(java.util.Set, java.util.Set, double[][]) } methods
* will work and produce the correct results, their performance will likely be
* less than desired had they be computed directly.
* @author Edward Raff
*/
public abstract class LanceWilliamsDissimilarity extends DistanceMetricDissimilarity implements UpdatableClusterDissimilarity
{
/**
* Creates a new LW dissimilarity measure using the given metric as the base distance between individual points.
* @param dm the base metric to measure dissimilarity from.
*/
public LanceWilliamsDissimilarity(DistanceMetric dm)
{
super(dm);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public LanceWilliamsDissimilarity(LanceWilliamsDissimilarity toCopy)
{
this(toCopy.dm.clone());
}
/**
* This method computes the value of the α variable. A flag is used to
* control whether the value for the cluster <tt>i</tt> and <tt>k</tt> is
* being computed, or the value for the cluster <tt>j</tt> and <tt>k</tt>.
*
* @param iFlag <tt>true</tt> indicates that α<sub>i</sub> is the
* value to compute, <tt>false</tt> indicated that α<sub>j</sub>
* should be computed.
* @param ni the number of points that make up cluster <tt>i</tt>
* @param nj the number of points that make up cluster <tt>j</tt>
* @param nk the number of points that make up cluster <tt>k</tt>
* @return the value of the variable α
*/
protected abstract double aConst(boolean iFlag, int ni, int nj, int nk);
/**
* This method computes the value of the β variable.
* @param ni the number of points that make up cluster <tt>i</tt>
* @param nj the number of points that make up cluster <tt>j</tt>
* @param nk the number of points that make up cluster <tt>k</tt>
* @return the value of the variable β
*/
protected abstract double bConst(int ni, int nj, int nk);
/**
* This method computes the value of the γ variable.
* @param ni the number of points that make up cluster <tt>i</tt>
* @param nj the number of points that make up cluster <tt>j</tt>
* @param nk the number of points that make up cluster <tt>k</tt>
* @return the value of the variable γ
*/
protected abstract double cConst(int ni, int nj, int nk);
@Override
public double dissimilarity(List<DataPoint> a, List<DataPoint> b)
{
if(a.size() == 1 && b.size() == 1)
return dm.dist(a.get(0).getNumericalValues(), b.get(0).getNumericalValues());
List<DataPoint> CI;
List<DataPoint> CJ;
List<DataPoint> CK;
if(a.size() > 1)
{
CI = a.subList(0, 1);
CJ = a.subList(1, a.size());
CK = b;
}
else// a==1, b >1
{
CI = b.subList(0, 1);
CJ = b.subList(1, b.size());
CK = a;
}
double d_ik = dissimilarity(CI, CK);
double d_jk = dissimilarity(CJ, CK);
double d_ij = dissimilarity(CI, CJ);
return aConst(true, CI.size(), CJ.size(), CK.size()) * d_ik +
aConst(false, CI.size(), CJ.size(), CK.size()) * d_jk +
bConst(CI.size(), CJ.size(), CK.size()) * d_ij +
cConst(CI.size(), CJ.size(), CK.size()) * abs(d_ik-d_jk);
}
@Override
public double dissimilarity(Set<Integer> a, Set<Integer> b, double[][] distanceMatrix)
{
if(a.size() == 1 && b.size() == 1)
return getDistance(distanceMatrix, getVal(a), getVal(b));
Set<Integer> CI;
Set<Integer> CJ;
Set<Integer> CK;
if(a.size() > 1)
{
CI = new IntSet();
CI.add(getVal(a));
CJ = new IntSet(a);
CJ.removeAll(CI);
CK = b;
}
else//a == 1, b > 1
{
CI = new IntSet();
CI.add(getVal(b));
CJ = new IntSet(b);
CJ.removeAll(CI);
CK = a;
}
double d_ik = dissimilarity(CI, CK, distanceMatrix);
double d_jk = dissimilarity(CJ, CK, distanceMatrix);
double d_ij = dissimilarity(CI, CJ, distanceMatrix);
return aConst(true, CI.size(), CJ.size(), CK.size()) * d_ik +
aConst(false, CI.size(), CJ.size(), CK.size()) * d_jk +
bConst(CI.size(), CJ.size(), CK.size()) * d_ij +
cConst(CI.size(), CJ.size(), CK.size()) * abs(d_ik-d_jk);
}
/**
* Returns a value from the set, assuming that all values are positive. If empty, -1 is returned.
* @param a the set to get a value of
* @return a value from the set, or -1 if empty
*/
private static int getVal(Set<Integer> a)
{
for(int i : a)
return i;
return -1;
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, double[][] distanceMatrix)
{
return getDistance(distanceMatrix, i, j);
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, int k, int nk, double[][] distanceMatrix)
{
double d_ik = getDistance(distanceMatrix, i, k);
double d_jk = getDistance(distanceMatrix, j, k);
double d_ij = getDistance(distanceMatrix, i, j);
return dissimilarity(ni, nj, nk, d_ij, d_ik, d_jk);
}
/**
* Provides the notion of dissimilarity between two sets of points, that may
* not have the same number of points. This is done using a matrix
* containing all pairwise distance computations between all points. This
* distance matrix will then be updated at each iteration and merging,
* leaving empty space in the matrix. The updates will be done by the
* clustering algorithm. Implementing this interface indicates that this
* dissimilarity measure can be accurately computed in an updatable manner
* that is compatible with a Lance–Williams update. <br>
*
* This computes the dissimilarity of the union of clusters i and j,
* (C<sub>i</sub> ∪ C<sub>j</sub>), with the cluster k. This method is
* used by other algorithms to perform an update of the distance matrix in
* an efficient manner.
*
* @param ni the number of items in the cluster represented by <tt>i</tt>
* @param nj the number of items in the cluster represented by <tt>j</tt>
* @param nk the number of items in the cluster represented by <tt>k</tt>
* @param d_ij the distance between clusters i and j
* @param d_ik the distance between clusters i and k
* @param d_jk the distance between clusters j and k
* @return the distance between the cluster formed from i and j, to the cluster k
*/
public double dissimilarity(int ni, int nj, int nk, double d_ij, double d_ik, double d_jk)
{
return aConst(true, ni, nj, nk) * d_ik +
aConst(false, ni, nj, nk) * d_jk +
bConst(ni, nj, nk) * d_ij +
cConst(ni, nj, nk) * abs(d_ik-d_jk);
}
@Override
abstract public LanceWilliamsDissimilarity clone();
}
| 8,309 | 37.831776 | 125 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/MedianDissimilarity.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering.dissimilarity;
import jsat.linear.distancemetrics.DistanceMetric;
/**
* Median link dissimilarity, also called WPGMC. When two points are merged
* under the Median dissimilarity, the weighting to all points in every
* clustering is distributed evenly.
*
* @author Edward Raff
*/
public class MedianDissimilarity extends LanceWilliamsDissimilarity
{
public MedianDissimilarity(DistanceMetric dm)
{
super(dm);
}
public MedianDissimilarity(MedianDissimilarity toCopy)
{
super(toCopy);
}
@Override
protected double aConst(boolean iFlag, int ni, int nj, int nk)
{
return 0.5;
}
@Override
protected double bConst(int ni, int nj, int nk)
{
return -0.25;
}
@Override
protected double cConst(int ni, int nj, int nk)
{
return 0;
}
@Override
public MedianDissimilarity clone()
{
return new MedianDissimilarity(this);
}
}
| 1,706 | 25.261538 | 75 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/SingleLinkDissimilarity.java |
package jsat.clustering.dissimilarity;
import java.util.List;
import java.util.Set;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Measures the dissimilarity of two clusters by returning the minimum
* dissimilarity between the two closest data points from the clusters, ie:
* the minimum distance needed to link the two clusters.
*
* @author Edward Raff
*/
public class SingleLinkDissimilarity extends LanceWilliamsDissimilarity implements UpdatableClusterDissimilarity
{
/**
* Creates a new SingleLinkDissimilarity using the {@link EuclideanDistance}
*/
public SingleLinkDissimilarity()
{
this(new EuclideanDistance());
}
/**
* Creates a new SingleLinkDissimilarity
* @param dm the distance metric to use between individual points
*/
public SingleLinkDissimilarity(DistanceMetric dm)
{
super(dm);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public SingleLinkDissimilarity(SingleLinkDissimilarity toCopy)
{
this(toCopy.dm.clone());
}
@Override
public SingleLinkDissimilarity clone()
{
return new SingleLinkDissimilarity(this);
}
@Override
public double dissimilarity(List<DataPoint> a, List<DataPoint> b)
{
double minDiss = Double.MAX_VALUE;
double tmpDist;
for (DataPoint ai : a)
for (DataPoint bi : b)
if ((tmpDist = distance(ai, bi)) < minDiss)
minDiss = tmpDist;
return minDiss;
}
@Override
public double dissimilarity(Set<Integer> a, Set<Integer> b, double[][] distanceMatrix)
{
double minDiss = Double.MAX_VALUE;
for (int ai : a)
for (int bi : b)
if (getDistance(distanceMatrix, ai, bi) < minDiss)
minDiss = getDistance(distanceMatrix, ai, bi);
return minDiss;
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, double[][] distanceMatrix)
{
return getDistance(distanceMatrix, i, j);
}
@Override
public double dissimilarity(int i, int ni, int j, int nj, int k, int nk, double[][] distanceMatrix)
{
return Math.min(getDistance(distanceMatrix, i, k), getDistance(distanceMatrix, j, k));
}
@Override
public double dissimilarity(int ni, int nj, int nk, double d_ij, double d_ik, double d_jk)
{
return Math.min(d_ik, d_jk);
}
@Override
protected double aConst(boolean iFlag, int ni, int nj, int nk)
{
return 0.5;
}
@Override
protected double bConst(int ni, int nj, int nk)
{
return 0;
}
@Override
protected double cConst(int ni, int nj, int nk)
{
return -0.5;
}
}
| 2,903 | 24.252174 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/UpdatableClusterDissimilarity.java |
package jsat.clustering.dissimilarity;
/**
* This interface extends the contract of a {@link ClusterDissimilarity} for
* more efficient computation. This contract indicates that the dissimilarity
* measure being used can be computed in an online fashion, and that the
* dissimilarity matrix can be updated to reflect the dissimilarity for a
* new merged cluster.
*
* @author Edward Raff
*/
public interface UpdatableClusterDissimilarity extends ClusterDissimilarity
{
/**
* Provides the notion of dissimilarity between two sets of points, that may
* not have the same number of points. This is done using a matrix
* containing all pairwise distance computations between all points. This
* distance matrix will then be updated at each iteration and merging,
* leaving empty space in the matrix. The updates will be done by the
* clustering algorithm. Implementing this interface indicates that this
* dissimilarity measure can be accurately computed in an updatable manner
* that is compatible with a Lance–Williams update.
*
* @param i the index of cluster <tt>i</tt>'s distance in the original data set
* @param ni the number of items in the cluster represented by <tt>i</tt>
* @param j the index of cluster <tt>j</tt>'s distance in the original data set
* @param nj the number of items in the cluster represented by <tt>j</tt>
* @param distanceMatrix a distance matrix originally created by
* {@link AbstractClusterDissimilarity#createDistanceMatrix(jsat.DataSet,
* jsat.clustering.dissimilarity.ClusterDissimilarity) }
* @return a value >= 0 that describes the dissimilarity of the two
* clusters. The larger the value, the more different the two clusterings are.
*/
public double dissimilarity(int i, int ni, int j, int nj, double[][] distanceMatrix);
/**
* Provides the notion of dissimilarity between two sets of points, that may
* not have the same number of points. This is done using a matrix
* containing all pairwise distance computations between all points. This
* distance matrix will then be updated at each iteration and merging,
* leaving empty space in the matrix. The updates will be done by the
* clustering algorithm. Implementing this interface indicates that this
* dissimilarity measure can be accurately computed in an updatable manner
* that is compatible with a Lance–Williams update. <br>
*
* This computes the dissimilarity of the union of clusters i and j,
* (C<sub>i</sub> ∪ C<sub>j</sub>), with the cluster k. This method is
* used by other algorithms to perform an update of the distance matrix in
* an efficient manner.
*
* @param i the index of cluster <tt>i</tt>'s distance in the original data set
* @param ni the number of items in the cluster represented by <tt>i</tt>
* @param j the index of cluster <tt>j</tt>'s distance in the original data set
* @param nj the number of items in the cluster represented by <tt>j</tt>
* @param k the index of cluster <tt>k</tt>'s distance in the original data set
* @param nk the number of items in the cluster represented by <tt>k</tt>
* a distance matrix originally created by
* {@link AbstractClusterDissimilarity#createDistanceMatrix(jsat.DataSet,
* jsat.clustering.dissimilarity.ClusterDissimilarity) }
* @return a value >= 0 that describes the dissimilarity of the union of
* two clusters with a third cluster. The larger the value, the more
* different the resulting clusterings are.
*/
public double dissimilarity(int i, int ni, int j, int nj, int k, int nk, double[][] distanceMatrix);
@Override
public UpdatableClusterDissimilarity clone();
}
| 3,847 | 53.971429 | 105 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/dissimilarity/WardsDissimilarity.java | package jsat.clustering.dissimilarity;
import jsat.linear.distancemetrics.SquaredEuclideanDistance;
/**
* An implementation of Ward's method for hierarchical clustering. This method
* merges clusters based on the minimum total variance of the resulting
* clusters.
*
* @author Edward Raff
*/
public class WardsDissimilarity extends LanceWilliamsDissimilarity
{
public WardsDissimilarity()
{
super(new SquaredEuclideanDistance());
}
@Override
public WardsDissimilarity clone()
{
return new WardsDissimilarity();
}
@Override
protected double aConst(boolean iFlag, int ni, int nj, int nk)
{
double totalPoints = ni+nj+nk;
if(iFlag)
return (ni+nk)/totalPoints;
else
return (nj+nk)/totalPoints;
}
@Override
protected double bConst(int ni, int nj, int nk)
{
double totalPoints = ni+nj+nk;
return -nk/totalPoints;
}
@Override
protected double cConst(int ni, int nj, int nk)
{
return 0;
}
}
| 1,069 | 20.4 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/AdjustedRandIndex.java | package jsat.clustering.evaluation;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
import static java.lang.Math.*;
/**
* Adjusted Rand Index (ARI) is a measure to evaluate a cluster based
* on the true class labels for the data set. The ARI normally returns a value
* in [-1, 1], where 0 indicates the clustering appears random, and 1 indicate
* the clusters perfectly match the class labels, and negative values indicate a
* clustering that is worse than random. To match the
* {@link ClusterEvaluation} interface, the value returned by evaluate will
* be 1.0-Adjusted Rand Index so the best value becomes 0.0 and the worse value
* becomes 2.0.
* <br>
* <b>NOTE:</b> Because the ARI needs to know the true class labels, only
* {@link #evaluate(int[], jsat.DataSet) } will work, since it provides the data
* set as an argument. The dataset given must be an instance of
* {@link ClassificationDataSet}
*
* @author Edward Raff
*/
public class AdjustedRandIndex implements ClusterEvaluation
{
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
if( !(dataSet instanceof ClassificationDataSet))
throw new RuntimeException("NMI can only be calcuate for classification data sets");
ClassificationDataSet cds = (ClassificationDataSet)dataSet;
int clusters = 0;//how many clusters are there?
for(int clusterID : designations)
clusters = Math.max(clusterID+1, clusters);
double[] truthSums = new double[cds.getClassSize()];
double[] clusterSums = new double[clusters];
double[][] table = new double[clusterSums.length][truthSums.length];
double n = 0.0;
for(int i = 0; i < designations.length; i++)
{
int cluster = designations[i];
if(cluster < 0)
continue;//noisy point
int label = cds.getDataPointCategory(i);
double weight = cds.getWeight(i);
table[cluster][label] += weight;
truthSums[label] += weight;
clusterSums[cluster] += weight;
n += weight;
}
/*
* Adjusted Rand Index involves many (n choose 2) = 1/2 (n-1) n
*/
double sumAllTable = 0.0;
double addCTerm = 0.0, addLTerm = 0.0;//clustering and label
for(int i = 0; i < table.length; i++)
{
double a_i = clusterSums[i];
addCTerm += a_i*(a_i-1)/2;
for(int j = 0; j < table[i].length; j++)
{
if(i == 0)
{
double b_j = truthSums[j];
addLTerm += b_j*(b_j-1)/2;
}
double n_ij = table[i][j];
double n_ij_c2 = n_ij*(n_ij-1)/2;
sumAllTable += n_ij_c2;
}
}
double longMultTerm = exp(log(addCTerm)+log(addLTerm)-(log(n)+log(n-1)-log(2)));//numericaly more stable verison
return 1.0-(sumAllTable-longMultTerm)/(addCTerm/2+addLTerm/2-longMultTerm);
}
@Override
public double naturalScore(double evaluate_score)
{
//returns values int he range of [1, -1], with 1=best, and -1=worst
return -evaluate_score+1;
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
throw new UnsupportedOperationException("Adjusted Rand Index requires the true data set"
+ " labels, call evaluate(int[] designations, DataSet dataSet)"
+ " instead");
}
@Override
public ClusterEvaluation clone()
{
return new AdjustedRandIndex();
}
}
| 3,779 | 34.660377 | 120 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/ClusterEvaluation.java | package jsat.clustering.evaluation;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.dissimilarity.ClusterDissimilarity;
/**
* Provides the contract for evaluating the quality of a hard assignment of
* clustering a dataset. The value returned indicates the quality of the
* clustering, with smaller values indicating a good clustering, and larger
* values indicating a poor clustering. <br>
* This differs from {@link ClusterDissimilarity} in that it evaluates all
* clusters, instead of just measuring the dissimilarity of two specific clusters.
*
* @author Edward Raff
*/
public interface ClusterEvaluation
{
/**
* Evaluates the clustering of the given clustering.
* @param designations the array that stores the cluster assignments for
* each data point in the data set
* @param dataSet the data set that contains all data points
* @return a value in [0, Inf) that indicates the quality of the clustering
* (smaller is better).
*/
public double evaluate(int[] designations, DataSet dataSet);
/**
* Evaluates the clustering of the given set of clusters.
*
* @param dataSets a list of lists, where the size of the first index
* indicates the the number of clusters, and the list at
* each index is the data points that make up each cluster.
* @return a value in [0, Inf) that indicates the quality of the clustering
* (smaller is better).
*/
public double evaluate(List<List<DataPoint>> dataSets);
/**
* The {@link #evaluate(java.util.List) evaluate} methods mandate a score to
* be returned in such a way that lower values are better. This method takes
* the value returned by an evaluate method, and returns the score as
* naturally defined by the cluster evaluation method. This is useful for
* when some algorithms return scores where higher is better, and we wish to
* display the scores in their intended form.
*
* @param evaluate_score the score where lower is better, as returned by the
* evaluate method.
* @return the score as naturally defined by the evaluation method.
*/
public double naturalScore(double evaluate_score);
public ClusterEvaluation clone();
}
| 2,384 | 40.842105 | 83 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/ClusterEvaluationBase.java |
package jsat.clustering.evaluation;
import jsat.DataSet;
import jsat.clustering.ClustererBase;
/**
* Base implementation for one of the methods in {@link ClusterEvaluation} to
* make life easier.
*
* @author Edward Raff
*/
abstract public class ClusterEvaluationBase implements ClusterEvaluation
{
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
return evaluate(ClustererBase.createClusterListFromAssignmentArray(designations, dataSet));
}
@Override
public abstract ClusterEvaluation clone();
}
| 569 | 21.8 | 99 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/Completeness.java | /*
* Copyright (C) 2019 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering.evaluation;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
/**
* A clustering result satisfies completeness if all the data points that are members of a given class are elements of the same cluster.
* @author Edward Raff
*/
public class Completeness implements ClusterEvaluation
{
public Completeness()
{
}
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
if( !(dataSet instanceof ClassificationDataSet))
throw new RuntimeException("Completeness can only be calcuate for classification data sets");
ClassificationDataSet cds = (ClassificationDataSet)dataSet;
int clusters = 0;//how many clusters are there?
for(int clusterID : designations)
clusters = Math.max(clusterID+1, clusters);
int C = cds.getPredicting().getNumOfCategories();
int K = clusters;
double[][] A = new double[C][K];
//Rows of AK
double[] class_sum = new double[C];
double[] cluster_sum = new double[K];
double n = 0.0;
for(int i = 0; i < designations.length; i++)
{
int cluster = designations[i];
if(cluster < 0)
continue;//noisy point
int label = cds.getDataPointCategory(i);
double weight = cds.getWeight(i);
A[label][cluster] += weight;
class_sum[label] += weight;
cluster_sum[cluster] += weight;
n += weight;
}
double h_kc = 0;
double h_k = 0;
for(int c = 0; c < C; c++)
{
for(int k = 0; k < K; k++)
{
//compute h_k, h_k needs only one loop to compute its values, hence c check
if(cluster_sum[k] > 0 && c == 0)
h_k -= cluster_sum[k]/n * (Math.log(cluster_sum[k])-Math.log(n));
//h(k|c) calc
if(A[c][k] == 0 || class_sum[c] == 0)
continue;
h_kc -= A[c][k]/n * (Math.log(A[c][k]) - Math.log(class_sum[c]));
}
}
if(h_k == 0)
return 0;
return h_kc/h_k;
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
throw new UnsupportedOperationException("Completeness requires the true data set"
+ " labels, call evaluate(int[] designations, DataSet dataSet)"
+ " instead");
}
@Override
public double naturalScore(double evaluate_score)
{
return 1-evaluate_score;
}
@Override
public Completeness clone()
{
return new Completeness();
}
}
| 3,212 | 28.75 | 138 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/DaviesBouldinIndex.java |
package jsat.clustering.evaluation;
import java.util.ArrayList;
import java.util.List;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.ClustererBase;
import jsat.linear.MatrixStatistics;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* A measure for evaluating the quality of a clustering by measuring the
* distances of points to their centroids.
*
* @author Edward Raff
*/
public class DaviesBouldinIndex implements ClusterEvaluation
{
private DistanceMetric dm;
/**
* Creates a new DaviesBouldinIndex using the {@link EuclideanDistance}.
*/
public DaviesBouldinIndex()
{
this(new EuclideanDistance());
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DaviesBouldinIndex(DaviesBouldinIndex toCopy)
{
this(toCopy.dm.clone());
}
/**
* Creates a new DaviesBouldinIndex
* @param dm the distance measure to use when computing
*/
public DaviesBouldinIndex(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
return evaluate(ClustererBase.createClusterListFromAssignmentArray(designations, dataSet));
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
/**
* Forumal for the DB measure
*
* /sigma + sigma \
* 1 __ n | i j|
* DB = - \ max |-----------------|
* n /__ i = 1 i neq j| d(c ,c ) |
* \ i j /
*
* where
* c_i is the centroid of cluster i
* sigma_i is the average distance of over point in cluster i to its centroid
* d(,) is a distance function
* n is the number of clusters
*/
List<Vec> centroids = new ArrayList<Vec>(dataSets.size());
double[] avrgCentriodDist = new double[dataSets.size()];
for(int i = 0; i < dataSets.size(); i++)
{
Vec mean = MatrixStatistics.meanVector(new SimpleDataSet(dataSets.get(i)));
centroids.add(mean);
for(DataPoint dp : dataSets.get(i))
avrgCentriodDist[i] += dm.dist(dp.getNumericalValues(), mean);
avrgCentriodDist[i]/=dataSets.get(i).size();
}
double dbIndex = 0;
for(int i = 0; i < dataSets.size(); i++)
{
double maxPenalty = Double.NEGATIVE_INFINITY;
for(int j = 0; j < dataSets.size(); j++)
{
if(j == i)
continue;
double penalty = (avrgCentriodDist[i] + avrgCentriodDist[j])/dm.dist(centroids.get(i), centroids.get(j));
maxPenalty = Math.max(maxPenalty, penalty);
}
dbIndex += maxPenalty;
}
return dbIndex / dataSets.size();
}
@Override
public double naturalScore(double evaluate_score)
{
//DB already satisfies this
return evaluate_score;
}
@Override
public DaviesBouldinIndex clone()
{
return new DaviesBouldinIndex(this);
}
}
| 3,455 | 28.042017 | 121 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/DunnIndex.java | package jsat.clustering.evaluation;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.ClustererBase;
import jsat.clustering.dissimilarity.ClusterDissimilarity;
import jsat.clustering.evaluation.intra.IntraClusterEvaluation;
/**
* Computes the Dunn Index (DI) using a customizable manner. Normally, a higher
* DI value indicates a better value. In order to conform to the interface
* contract of a lower value indicating a better result, the value of 1/(1+DI)
* is returned.
*
* @author Edward Raff
*/
public class DunnIndex implements ClusterEvaluation
{
private IntraClusterEvaluation ice;
private ClusterDissimilarity cd;
/**
* Creates a new DunnIndex
* @param ice the metric to measure the quality of a single cluster
* @param cd the metric to measure the distance between two clusters
*/
public DunnIndex(IntraClusterEvaluation ice, ClusterDissimilarity cd)
{
this.ice = ice;
this.cd = cd;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DunnIndex(DunnIndex toCopy)
{
this(toCopy.ice.clone(), toCopy.cd.clone());
}
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
return evaluate(ClustererBase.createClusterListFromAssignmentArray(designations, dataSet));
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
double minVal = Double.POSITIVE_INFINITY;
double maxIntra = Double.NEGATIVE_INFINITY;
for(int i = 0; i < dataSets.size(); i++)
{
for(int j = i+1; j <dataSets.size(); j++)
minVal = Math.min(minVal, cd.dissimilarity(dataSets.get(i), dataSets.get(j)));
maxIntra = Math.max(maxIntra, ice.evaluate(dataSets.get(i)));
}
/*
*
* Instead of returning 1.0/(1.0+minVal/maxIntra) naivly
*
* 1 y
* ----- = -----
* x x + y
* 1 + -
* y
*
* So return maxIntra/(minVal+maxIntra) instead, its numerically more
* stable and avoids an uneeded division.
*
*/
return maxIntra/(minVal+maxIntra);
}
@Override
public double naturalScore(double evaluate_score)
{
return 1/evaluate_score-1;
}
@Override
public DunnIndex clone()
{
return new DunnIndex(this);
}
}
| 2,566 | 26.602151 | 99 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/Homogeneity.java | /*
* Copyright (C) 2019 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering.evaluation;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
/**
* A clustering result satisfies homogeneity if all of its clusters contain
* only data points which are members of a single class. Normally, Homogeneity
* would return a score with 0 being undesirable, and 1 being desirable.
*
* @author Edward Raff
*/
public class Homogeneity implements ClusterEvaluation
{
public Homogeneity()
{
}
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
if( !(dataSet instanceof ClassificationDataSet))
throw new RuntimeException("Homogeneity can only be calcuate for classification data sets");
ClassificationDataSet cds = (ClassificationDataSet)dataSet;
int clusters = 0;//how many clusters are there?
for(int clusterID : designations)
clusters = Math.max(clusterID+1, clusters);
int C = cds.getPredicting().getNumOfCategories();
int K = clusters;
double[][] A = new double[C][K];
//Rows of AK
double[] class_sum = new double[C];
double[] cluster_sum = new double[K];
double n = 0.0;
for(int i = 0; i < designations.length; i++)
{
int cluster = designations[i];
if(cluster < 0)
continue;//noisy point
int label = cds.getDataPointCategory(i);
double weight = cds.getWeight(i);
A[label][cluster] += weight;
class_sum[label] += weight;
cluster_sum[cluster] += weight;
n += weight;
}
double h_ck = 0;
double h_c = 0;
for(int c = 0; c < C; c++)
{
//compute h_c, h_c needs only one loop to compute its values
if(class_sum[c] > 0)
h_c -= class_sum[c]/n * (Math.log(class_sum[c])-Math.log(n));
for(int k = 0; k < K; k++)
{
//h(c|k) calc
if(A[c][k] == 0 || cluster_sum[k] == 0)
continue;
h_ck -= A[c][k]/n * (Math.log(A[c][k]) - Math.log(cluster_sum[k]));
}
}
if(h_c == 0)
return 0;
return h_ck/h_c;
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
throw new UnsupportedOperationException("Homogeneity requires the true data set"
+ " labels, call evaluate(int[] designations, DataSet dataSet)"
+ " instead");
}
@Override
public double naturalScore(double evaluate_score)
{
return 1-evaluate_score;
}
@Override
public Homogeneity clone()
{
return new Homogeneity();
}
}
| 3,278 | 28.276786 | 104 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/IntraClusterSumEvaluation.java | package jsat.clustering.evaluation;
import java.util.List;
import jsat.classifiers.DataPoint;
import jsat.clustering.evaluation.intra.IntraClusterEvaluation;
/**
* Evaluates a cluster based on the sum of scores for some
* {@link IntraClusterEvaluation} applied to each cluster.
*
* @author Edward Raff
*/
public class IntraClusterSumEvaluation extends ClusterEvaluationBase
{
private IntraClusterEvaluation ice;
/**
* Creates a new cluster evaluation that returns the sum of the intra
* cluster evaluations
* @param ice the intra cluster evaluation to use
*/
public IntraClusterSumEvaluation(IntraClusterEvaluation ice)
{
this.ice = ice;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public IntraClusterSumEvaluation(IntraClusterSumEvaluation toCopy)
{
this(toCopy.ice.clone());
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
double score = 0;
for(List<DataPoint> list : dataSets)
score += ice.evaluate(list);
return score;
}
@Override
public double naturalScore(double evaluate_score)
{
return evaluate_score;
}
@Override
public IntraClusterSumEvaluation clone()
{
return new IntraClusterSumEvaluation(this);
}
}
| 1,358 | 22.431034 | 74 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/NormalizedMutualInformation.java | package jsat.clustering.evaluation;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
import jsat.utils.DoubleList;
/**
* Normalized Mutual Information (NMI) is a measure to evaluate a cluster based
* on the true class labels for the data set. The NMI normally returns a value
* in [0, 1], where 0 indicates the clustering appears random, and 1 indicate
* the clusters perfectly match the class labels. To match the
* {@link ClusterEvaluation} interface, the value returned by evaluate will
* be 1.0-NMI .
* <br>
* <b>NOTE:</b> Because the NMI needs to know the true class labels, only
* {@link #evaluate(int[], jsat.DataSet) } will work, since it provides the data
* set as an argument. The dataset given must be an instance of
* {@link ClassificationDataSet}
*
* @author Edward Raff
*/
public class NormalizedMutualInformation implements ClusterEvaluation
{
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
if( !(dataSet instanceof ClassificationDataSet))
throw new RuntimeException("NMI can only be calcuate for classification data sets");
ClassificationDataSet cds = (ClassificationDataSet)dataSet;
double nmiNumer = 0.0;
double nmiC = 0.0;
double nmiK = 0.0;
DoubleList kPriors = new DoubleList();
for(int i= 0; i < cds.size(); i++)
{
int ki = designations[i];
if(ki < 0)//outlier, not clustered
continue;
while(kPriors.size() <= ki)
kPriors.add(0.0);
kPriors.set(ki, kPriors.get(ki)+cds.getWeight(i));
}
double N = 0.0;
for(int i = 0; i < kPriors.size(); i++)
N += kPriors.get(i);
for(int i = 0; i < kPriors.size(); i++)
{
kPriors.set(i, kPriors.get(i)/N);
double pKi = kPriors.get(i);
if(pKi > 0)
nmiK += - pKi*Math.log(pKi);
}
double[] cPriors = cds.getPriors();
double[][] ck = new double[cPriors.length][kPriors.size()];
for(int i = 0; i < cds.size(); i++)
{
int ci = cds.getDataPointCategory(i);
int kj = designations[i];
if(kj < 0)//outlier, ignore
continue;
ck[ci][kj] += cds.getWeight(i);
}
for(int i = 0; i < cPriors.length; i++)
{
double pCi = cPriors[i];
if(pCi <= 0.0)
continue;
double logPCi = Math.log(pCi);
for(int j = 0; j < kPriors.size(); j++)
{
double pKj = kPriors.get(j);
if(pKj <= 0.0)
continue;
double pCiKj = ck[i][j]/N;
if(pCiKj <= 0.0)
continue;
nmiNumer += pCiKj* (Math.log(pCiKj) - Math.log(pKj) - logPCi);
}
nmiC += -pCi*logPCi;
}
return 1.0-nmiNumer/((nmiC+nmiK)/2);
}
@Override
public double naturalScore(double evaluate_score)
{
return -evaluate_score+1;
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
throw new UnsupportedOperationException("NMI requires the true data set"
+ " labels, call evaluate(int[] designations, DataSet dataSet)"
+ " instead");
}
@Override
public NormalizedMutualInformation clone()
{
return new NormalizedMutualInformation();
}
}
| 3,686 | 30.245763 | 96 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/VMeasure.java | /*
* Copyright (C) 2019 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering.evaluation;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
/**
* V-Measure is a general purpose cluster evaluation technique, which is the
* harmonic mean of {@link Homogeneity} and {@link Completeness}. Normally, a
* value of 1.0 would be perfect clustering, and 0 would be the worst possible
* score.
*
* @author Edward Raff
*/
public class VMeasure implements ClusterEvaluation
{
private double beta;
/**
* Creates a V-Means which is the weighted harmonic mean between
* {@link Homogeneity} and {@link Completeness}. if β is greater than 1
* completeness is weighted more strongly in the calculation, if β is less
* than 1, homogeneity is weighted more strongly
*
* @param beta the weight preference to apply to completeness (greater than
* one) or homogeneity ( less than 1 but > 0).
*/
public VMeasure(double beta)
{
if(beta < 0)
throw new IllegalArgumentException("Beta must be positive, not " + beta);
this.beta = beta;
}
/**
* Creates the standard V-Measure which is the harmonic mean between
* {@link Homogeneity} and {@link Completeness}
*/
public VMeasure()
{
this(1.0);
}
@Override
public double evaluate(int[] designations, DataSet dataSet)
{
if( !(dataSet instanceof ClassificationDataSet))
throw new RuntimeException("VMeasure can only be calcuate for classification data sets");
Homogeneity homo = new Homogeneity();
Completeness comp = new Completeness();
double h = homo.naturalScore(homo.evaluate(designations, dataSet));
double c = comp.naturalScore(comp.evaluate(designations, dataSet));
double v;
if((beta*h)+c == 0.0)
v = 0;
else
v = (1+beta)*h*c/((beta*h)+c);
return 1-v;
}
@Override
public double evaluate(List<List<DataPoint>> dataSets)
{
throw new UnsupportedOperationException("VMeasure requires the true data set"
+ " labels, call evaluate(int[] designations, DataSet dataSet)"
+ " instead");
}
@Override
public double naturalScore(double evaluate_score)
{
return 1-evaluate_score;
}
@Override
public VMeasure clone()
{
return new VMeasure(this.beta);
}
}
| 3,062 | 28.737864 | 101 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/intra/IntraClusterEvaluation.java |
package jsat.clustering.evaluation.intra;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
/**
* This interface defines the contract for a method to evaluate the
* intra-cluster distance. This means an evaluation of a single cluster,
* where a higher value indicates a poorly formed cluster. This evaluation does
* not take into account any other neighboring clusters
*
* @author Edward Raff
*/
public interface IntraClusterEvaluation
{
/**
* Evaluates the cluster represented by the given list of data points.
* @param designations the array of cluster designations for the data set
* @param dataSet the full data set of all clusters
* @param clusterID the cluster id in the <tt>designations</tt> array to
* return the evaluation of
* @return the value in the range [0, Inf) that indicates how well formed
* the cluster is.
*/
public double evaluate(int[] designations, DataSet dataSet, int clusterID);
/**
* Evaluates the cluster represented by the given list of data points.
* @param dataPoints the data points that make up this cluster
* @return the value in the range [0, Inf) that indicates how well formed
* the cluster is.
*/
public double evaluate(List<DataPoint> dataPoints);
public IntraClusterEvaluation clone();
}
| 1,366 | 34.973684 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/intra/MaxDistance.java |
package jsat.clustering.evaluation.intra;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Evaluates a cluster's validity by returning the
* maximum distance between any two points in the cluster.
*
* @author Edward Raff
*/
public class MaxDistance implements IntraClusterEvaluation
{
private DistanceMetric dm;
/**
* Creates a new MaxDistance measure using the {@link EuclideanDistance}
*/
public MaxDistance()
{
this(new EuclideanDistance());
}
/**
* Creates a new MaxDistance
* @param dm the metric to measure the distance between two points by
*/
public MaxDistance(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public MaxDistance(MaxDistance toCopy)
{
this(toCopy.dm.clone());
}
@Override
public double evaluate(int[] designations, DataSet dataSet, int clusterID)
{
double maxDistance = 0;
for (int i = 0; i < dataSet.size(); i++)
for (int j = i + 1; j < dataSet.size(); j++)
if (designations[i] == clusterID)
maxDistance = Math.max(
dm.dist(dataSet.getDataPoint(i).getNumericalValues(),
dataSet.getDataPoint(j).getNumericalValues()),
maxDistance);
return maxDistance;
}
@Override
public double evaluate(List<DataPoint> dataPoints)
{
double maxDistance = 0;
for(int i = 0; i < dataPoints.size(); i++)
for(int j = i+1; j < dataPoints.size(); j++ )
maxDistance = Math.max(
dm.dist(dataPoints.get(i).getNumericalValues(),
dataPoints.get(j).getNumericalValues()),
maxDistance);
return maxDistance;
}
@Override
public MaxDistance clone()
{
return new MaxDistance(this);
}
}
| 2,175 | 26.2 | 82 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/intra/MeanCentroidDistance.java | package jsat.clustering.evaluation.intra;
import java.util.List;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Evaluates a cluster's validity by computing the mean distance of each point
* in the cluster from the cluster's centroid.
*
* @author Edward Raff
*/
public class MeanCentroidDistance implements IntraClusterEvaluation
{
private DistanceMetric dm;
/**
* Creates a new MeanCentroidDistance using the {@link EuclideanDistance}
*/
public MeanCentroidDistance()
{
this(new EuclideanDistance());
}
/**
* Creates a new MeanCentroidDistance.
* @param dm the metric to measure the distance between two points by
*/
public MeanCentroidDistance(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public MeanCentroidDistance(MeanCentroidDistance toCopy)
{
this(toCopy.dm.clone());
}
@Override
public double evaluate(int[] designations, DataSet dataSet, int clusterID)
{
Vec mean = new DenseVector(dataSet.getNumNumericalVars());
int clusterSize = 0;
for(int i = 0; i < dataSet.size(); i++)
if(designations[i] == clusterID)
{
clusterSize++;
mean.mutableAdd(dataSet.getDataPoint(i).getNumericalValues());
}
mean.mutableDivide(clusterSize);
double dists = 0.0;
for(int i = 0; i < dataSet.size(); i++)
if(designations[i] == clusterID)
dists += dm.dist(dataSet.getDataPoint(i).getNumericalValues(), mean);
return dists/dataSet.size();
}
@Override
public double evaluate(List<DataPoint> dataPoints)
{
Vec mean = MatrixStatistics.meanVector(new SimpleDataSet(dataPoints));
double dists = 0.0;
for(DataPoint dp : dataPoints)
dists += dm.dist(dp.getNumericalValues(), mean);
return dists/dataPoints.size();
}
@Override
public MeanCentroidDistance clone()
{
return new MeanCentroidDistance(this);
}
}
| 2,371 | 25.355556 | 85 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/intra/MeanDistance.java | package jsat.clustering.evaluation.intra;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Evaluates a cluster's validity by computing the mean distance between all
* combinations of points.
*
* @author Edwar Raff
*/
public class MeanDistance implements IntraClusterEvaluation
{
private DistanceMetric dm;
/**
* Creates a new MeanDistance using the {@link EuclideanDistance}
*/
public MeanDistance()
{
this(new EuclideanDistance());
}
/**
* Creates a new MeanDistance
* @param dm the metric to measure the distance between two points by
*/
public MeanDistance(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public MeanDistance(MeanDistance toCopy)
{
this(toCopy.dm.clone());
}
@Override
public double evaluate(int[] designations, DataSet dataSet, int clusterID)
{
double distances = 0;
for (int i = 0; i < dataSet.size(); i++)
for (int j = i + 1; j < dataSet.size(); j++)
if (designations[i] == clusterID)
distances += dm.dist(dataSet.getDataPoint(i).getNumericalValues(),
dataSet.getDataPoint(j).getNumericalValues());
return distances/(dataSet.size()*(dataSet.size()-1));
}
@Override
public double evaluate(List<DataPoint> dataPoints)
{
double distances = 0.0;
for(int i = 0; i < dataPoints.size(); i++)
for(int j = i+1; j < dataPoints.size(); j++ )
distances += dm.dist(dataPoints.get(i).getNumericalValues(),
dataPoints.get(j).getNumericalValues());
return distances/(dataPoints.size()*(dataPoints.size()-1));
}
@Override
public MeanDistance clone()
{
return new MeanDistance(this);
}
}
| 2,095 | 26.578947 | 87 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/intra/SoSCentroidDistance.java | package jsat.clustering.evaluation.intra;
import java.util.List;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Evaluates a cluster's validity by computing the sum of squared distances from
* each point to the mean of the cluster.
*
* @author Edward Raff
*/
public class SoSCentroidDistance implements IntraClusterEvaluation
{
private DistanceMetric dm;
/**
* Creates a new MeanCentroidDistance using the {@link EuclideanDistance}
*/
public SoSCentroidDistance()
{
this(new EuclideanDistance());
}
/**
* Creates a new MeanCentroidDistance.
* @param dm the metric to measure the distance between two points by
*/
public SoSCentroidDistance(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public SoSCentroidDistance(SoSCentroidDistance toCopy)
{
this(toCopy.dm.clone());
}
@Override
public double evaluate(int[] designations, DataSet dataSet, int clusterID)
{
Vec mean = new DenseVector(dataSet.getNumNumericalVars());
int clusterSize = 0;
for(int i = 0; i < dataSet.size(); i++)
if(designations[i] == clusterID)
{
clusterSize++;
mean.mutableAdd(dataSet.getDataPoint(i).getNumericalValues());
}
mean.mutableDivide(clusterSize);
double score = 0.0;
for(int i = 0; i < dataSet.size(); i++)
if(designations[i] == clusterID)
score += Math.pow(dm.dist(dataSet.getDataPoint(i).getNumericalValues(), mean), 2);
return score;
}
@Override
public double evaluate(List<DataPoint> dataPoints)
{
if(dataPoints.isEmpty())
return 0;
Vec mean = MatrixStatistics.meanVector(new SimpleDataSet(dataPoints));
double score = 0.0;
for(DataPoint dp : dataPoints)
score += Math.pow(dm.dist(dp.getNumericalValues(), mean), 2);
return score;
}
@Override
public SoSCentroidDistance clone()
{
return new SoSCentroidDistance(this);
}
}
| 2,408 | 25.184783 | 98 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/evaluation/intra/SumOfSqrdPairwiseDistances.java | package jsat.clustering.evaluation.intra;
import java.util.List;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Evaluates a cluster's validity by computing the normalized sum of pairwise
* distances for all points in the cluster. <br>
* Note, the normalization value for each cluster is <i>1/(2 * n)</i>, where
* <i>n</i> is the number of points in each cluster. <br>
* <br>
* For general distance metrics, this requires O(n<sup>2</sup>) work. The
* {@link EuclideanDistance} is a special case, and takes only O(n) work.
*
* @author Edward Raff
*/
public class SumOfSqrdPairwiseDistances implements IntraClusterEvaluation
{
private DistanceMetric dm;
/**
* Creates a new evaluator that uses the Euclidean distance
*/
public SumOfSqrdPairwiseDistances()
{
this(new EuclideanDistance());
}
/**
* Creates a new cluster evaluator using the given distance metric
*
* @param dm the distance metric to use
*/
public SumOfSqrdPairwiseDistances(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public SumOfSqrdPairwiseDistances(SumOfSqrdPairwiseDistances toCopy)
{
this(toCopy.dm.clone());
}
/**
* Sets the distance metric to be used whenever this object is called to
* evaluate a cluster
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
/**
*
* @return the distance metric being used for evaluation
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
@Override
public double evaluate(int[] designations, DataSet dataSet, int clusterID)
{
int N = 0;
double sum = 0;
List<Vec> X = dataSet.getDataVectors();
List<Double> cache = dm.getAccelerationCache(X);
if (dm instanceof EuclideanDistance)//special case, can compute in O(N) isntead
{
Vec mean = new DenseVector(X.get(0).length());
for (int i = 0; i < dataSet.size(); i++)
{
if (designations[i] != clusterID)
continue;
mean.mutableAdd(X.get(i));
N++;
}
mean.mutableDivide((N + 1e-10));//1e-10 incase N=0
List<Double> qi = dm.getQueryInfo(mean);
for (int i = 0; i < dataSet.size(); i++)
{
if (designations[i] == clusterID)
sum += Math.pow(dm.dist(i, mean, qi, X, cache), 2);
}
return sum;
}
//regulare case, O(N^2)
for (int i = 0; i < dataSet.size(); i++)
{
if (designations[i] != clusterID)
continue;
N++;
for (int j = i + 1; j < dataSet.size(); j++)
{
if (designations[j] == clusterID)
sum += 2*Math.pow(dm.dist(i, j, X, cache), 2);
}
}
return sum / (N * 2);
}
@Override
public double evaluate(List<DataPoint> dataPoints)
{
return evaluate(new int[dataPoints.size()], new SimpleDataSet(dataPoints), 0);
}
@Override
public SumOfSqrdPairwiseDistances clone()
{
return new SumOfSqrdPairwiseDistances(this);
}
}
| 3,609 | 26.348485 | 87 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/hierarchical/DivisiveGlobalClusterer.java | package jsat.clustering.hierarchical;
import java.util.*;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.*;
import jsat.clustering.evaluation.ClusterEvaluation;
/**
* DivisiveGlobalClusterer is a hierarchical clustering method that works by
* splitting the data set into sub trees from the top down. Unlike many top-up
* methods, such as {@link SimpleHAC}, top-down methods require another
* clustering method to perform the splitting at each iteration. If the base
* method is not deterministic, then the top-down method will not be
* deterministic.
* <br>
* Like many HAC methods, DivisiveGlobalClusterer will store the merge order of
* the clusters so that the clustering results for many <i>k</i> can be obtained.
* It is limited to the range of clusters successfully computed before.
* <br><br>
* Specifically, DivisiveGlobalClusterer greedily chooses the cluster to split
* based on an evaluation of all resulting clusters after a split. Because of this global
* search of the world, DivisiveLocalClusterer has can make a good estimate of
* the number of clusters in the data set. The quality of this result is
* dependent on the accuracy of the {@link ClusterEvaluation} used. This quality
* comes at the cost of execution speed, as more and more large evaluations of
* the whole dataset are needed at each iteration. If execution speed is more
* important, {@link DivisiveLocalClusterer} should be used instead, which
* requires only a fixed number of evaluations per iteration.
*
* @author Edward Raff
*/
public class DivisiveGlobalClusterer extends KClustererBase
{
private static final long serialVersionUID = -9117751530105155090L;
private KClusterer baseClusterer;
private ClusterEvaluation clusterEvaluation;
private int[] splitList;
private int[] fullDesignations;
private DataSet originalDataSet;
public DivisiveGlobalClusterer(KClusterer baseClusterer, ClusterEvaluation clusterEvaluation)
{
this.baseClusterer = baseClusterer;
this.clusterEvaluation = clusterEvaluation;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DivisiveGlobalClusterer(DivisiveGlobalClusterer toCopy)
{
this.baseClusterer = toCopy.baseClusterer.clone();
this.clusterEvaluation = toCopy.clusterEvaluation.clone();
if(toCopy.splitList != null)
this.splitList = Arrays.copyOf(toCopy.splitList, toCopy.splitList.length);
if(toCopy.fullDesignations != null)
this.fullDesignations = Arrays.copyOf(toCopy.fullDesignations, toCopy.fullDesignations.length);
this.originalDataSet = toCopy.originalDataSet.shallowClone();
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
return cluster(dataSet, clusters, clusters, parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
/**
* Is used to copy the value of designations and then alter to test the quality of a potential new clustering
*/
int[] fakeWorld = new int[dataSet.size()];
/**
* For each current cluster, we store the clustering results if we
* attempt to split it into two.
* <br>
* Each row needs to be re-set since the clustering methods will use the length of the cluster size
*/
final int[][] subDesignation = new int[highK][];
/**
* Stores the index from the sub data set into the full data set
*/
final int[][] originalPositions = new int[highK][dataSet.size()];
/**
* List of Lists for holding the data points of each cluster in
*/
List<List<DataPoint>> pointsInCluster = new ArrayList<>(highK);
for(int i = 0; i < highK; i++)
pointsInCluster.add(new ArrayList<>(dataSet.size()));
/**
* Stores the dissimilarity of the splitting of the cluster with the
* same index value. Negative value indicates not set.
* Special values:<br>
* <ul>
* <li>NEGATIVE_INFINITY : value never used</li>
* <li>-1 : clustering computed, but no current global evaluation</li>
* <li> >=0 : cluster computed, current value is the evaluation for using this split</li>
* </ul>
*/
final double[] splitEvaluation = new double[highK];
Arrays.fill(splitEvaluation, Double.NEGATIVE_INFINITY);
/**
* Records the order in which items were split
*/
splitList = new int[highK*2-2];
int bestK = -1;
double bestKEval = Double.POSITIVE_INFINITY;
//k is the current number of clusters, & the ID of the next cluster
for(int k = 1; k < highK; k++)
{
double bestSplitVal = Double.POSITIVE_INFINITY;
int bestID = -1;
for (int z = 0; z < k; z++)//TODO it might be better to do this loop in parallel
{
if(Double.isNaN(splitEvaluation[z]))
continue;
else if (splitEvaluation[z] == Double.NEGATIVE_INFINITY)//at most 2 will hit this per loop
{//Need to compute a split for that cluster & set up helper structures
List<DataPoint> clusterPointsZ = pointsInCluster.get(z);
clusterPointsZ.clear();
for (int i = 0; i < dataSet.size(); i++)
{
if (designations[i] != z)
continue;
originalPositions[z][clusterPointsZ.size()] = i;
clusterPointsZ.add(dataSet.getDataPoint(i));
}
subDesignation[z] = new int[clusterPointsZ.size()];
if(clusterPointsZ.isEmpty())//Empty cluster? How did that happen...
{
splitEvaluation[z] = Double.NaN;
continue;
}
SimpleDataSet subDataSet = new SimpleDataSet(clusterPointsZ);
try
{
baseClusterer.cluster(subDataSet, 2, parallel, subDesignation[z]);
}
catch(ClusterFailureException ex)
{
splitEvaluation[z] = Double.NaN;
continue;
}
}
System.arraycopy(designations, 0, fakeWorld, 0, fakeWorld.length);
for(int i = 0; i < subDesignation[z].length; i++)
if (subDesignation[z][i] == 1)
fakeWorld[originalPositions[z][i]] = k;
try
{
splitEvaluation[z] = clusterEvaluation.evaluate(fakeWorld, dataSet);
}
catch (Exception ex)//Can occur if one of the clusters has size zeros
{
splitEvaluation[z] = Double.NaN;
continue;
}
if (splitEvaluation[z] < bestSplitVal)
{
bestSplitVal = splitEvaluation[z];
bestID = z;
}
}
//We now know which cluster we should use the split of
for (int i = 0; i < subDesignation[bestID].length; i++)
if (subDesignation[bestID][i] == 1)
designations[originalPositions[bestID][i]] = k;
//The original clsuter id, and the new one should be set to -Inf
splitEvaluation[bestID] = splitEvaluation[k] = Double.NEGATIVE_INFINITY;
//Store a split list
splitList[(k-1)*2] = bestID;
splitList[(k-1)*2+1] = k;
if(lowK-1 <= k && k <= highK-1)//Should we stop?
{
if(bestSplitVal < bestKEval)
{
bestKEval = bestSplitVal;
bestK = k;
// System.out.println("Best k is now " + k + " at " + bestKEval);
}
}
}
fullDesignations = Arrays.copyOf(designations, designations.length);
//Merge the split clusters back to the one that had the best score
for (int k = splitList.length/2-1; k >= bestK; k--)
{
if (splitList[k * 2] == splitList[k * 2 + 1])
continue;//Happens when we bail out early
for (int j = 0; j < designations.length; j++)
if (designations[j] == splitList[k * 2 + 1])
designations[j] = splitList[k * 2];
}
originalDataSet = dataSet;
return designations;
}
/**
* Returns the clustering results for a specific <i>k</i> number of clusters
* for a previously computed data set. If the data set did not compute up to
* the value <i>k</i> <tt>null</tt> will be returned.
* @param targetK the number of clusters to get the result for.
* @return an array containing the assignments for each cluster in the
* original data set.
* @throws ClusterFailureException if no prior data set had been clustered
*/
public int[] clusterSplit(int targetK)
{
if(originalDataSet == null)
throw new ClusterFailureException("No prior cluster stored");
int[] newDesignations = Arrays.copyOf(fullDesignations, fullDesignations.length);
//Merge the split clusters back to the one that had the best score
for (int k = splitList.length/2-1; k >= targetK; k--)
{
if (splitList[k * 2] == splitList[k * 2 + 1])
continue;//Happens when we bail out early
for (int j = 0; j < newDesignations.length; j++)
if (newDesignations[j] == splitList[k * 2 + 1])
newDesignations[j] = splitList[k * 2];
}
return newDesignations;
}
@Override
public DivisiveGlobalClusterer clone()
{
return new DivisiveGlobalClusterer(this);
}
}
| 10,772 | 40.118321 | 117 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/hierarchical/DivisiveLocalClusterer.java |
package jsat.clustering.hierarchical;
import java.util.*;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.*;
import jsat.clustering.evaluation.ClusterEvaluation;
/**
* DivisiveLocalClusterer is a hierarchical clustering method that works by
* splitting the data set into sub trees from the top down. Unlike many top-up
* methods, such as {@link SimpleHAC}, top-down methods require another
* clustering method to perform the splitting at each iteration. If the base
* method is not deterministic, then the top-down method will not be
* deterministic.
* <br><br>
* Specifically, DivisiveLocalClusterer greedily chooses the cluster to split
* based on an evaluation of only the cluster being split. Because of this local
* search of the world, DivisiveLocalClusterer has poor performance in
* determining the number of clusters in the data set. As such, only the methods
* where the exact number of clusters are recommended.
* <br>
* This greedy strategy can also lead to drilling down clusters into small
* parts, and works best when only a small number of clusters are needed.
*
* @author Edward Raff
*/
public class DivisiveLocalClusterer extends KClustererBase
{
private static final long serialVersionUID = 8616401472810067778L;
private KClusterer baseClusterer;
private ClusterEvaluation clusterEvaluation;
public DivisiveLocalClusterer(KClusterer baseClusterer, ClusterEvaluation clusterEvaluation)
{
this.baseClusterer = baseClusterer;
this.clusterEvaluation = clusterEvaluation;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DivisiveLocalClusterer(DivisiveLocalClusterer toCopy)
{
this(toCopy.baseClusterer.clone(), toCopy.clusterEvaluation.clone());
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()), designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
/**
* For each current cluster, we store the clustering results if we
* attempt to split it into two.
* <br>
* Each row needs to be re-set since the clustering methods will use the length of the cluster size
*/
final int[][] subDesignation = new int[clusters][];
/**
* Stores the index from the sub data set into the full data set
*/
final int[][] originalPositions = new int[clusters][dataSet.size()];
/**
* Stores the dissimilarity of the splitting of the cluster with the same index value. Negative value indicates not set
*/
final double[] splitEvaluation = new double[clusters];
PriorityQueue<Integer> clusterToSplit = new PriorityQueue<>(clusters,
(Integer t, Integer t1) -> Double.compare(splitEvaluation[t], splitEvaluation[t1])
);
clusterToSplit.add(0);//We must start out spliting the one cluster of everyone!
Arrays.fill(designations, 0);
//Create initial split we will start from
baseClusterer.cluster(dataSet, 2, parallel, designations);
subDesignation[0] = Arrays.copyOf(designations, designations.length);
for(int i = 0; i < originalPositions[0].length; i++)
originalPositions[0][i] = i;
List<DataPoint> dpSubC1 = new ArrayList<>();
List<DataPoint> dpSubC2 = new ArrayList<>();
/*
* TODO it could be updated to use the split value to do a search range
* and stop when a large jump occurs. This will perform poorl and
* underestimate the number of clusters, becase it will decend one path
* as splitting clusters improves untill it gets to the correct cluster
* size. Popping back up will then casue an increase, which will cause
* the early termination.
*/
for(int k = 1; k < clusters; k++)
{
int useSplit = clusterToSplit.poll();
int newClusterID = k;
dpSubC1.clear();
dpSubC2.clear();
//Split the data set into its two sub data sets
for(int i = 0; i < subDesignation[useSplit].length; i++)
{
int origPos = originalPositions[useSplit][i];
if(subDesignation[useSplit][i] == 0)
{
dpSubC1.add(dataSet.getDataPoint(origPos));
continue;//We will asigng cluster '1' to be the new cluster number
}
dpSubC2.add(dataSet.getDataPoint(origPos));
designations[origPos] = newClusterID;
}
computeSubClusterSplit(subDesignation, useSplit, dpSubC1,
dataSet, designations, originalPositions,
splitEvaluation, clusterToSplit, parallel);
computeSubClusterSplit(subDesignation, newClusterID, dpSubC2,
dataSet, designations, originalPositions,
splitEvaluation, clusterToSplit, parallel);
}
return designations;
}
/**
* Takes the data set and computes the clustering of a sub cluster, and
* stores its information, and places the result in the queue
*
* @param subDesignation the array of arrays to store the designation array
* for the sub clustering
* @param originalCluster the originalCluster that we want to store the
* split information of
* @param listOfDataPointsInCluster the list of all data points that belong
* to <tt>originalCluster</tt>
* @param fullDataSet the full original data set
* @param fullDesignations the designation array for the full original data
* set
* @param originalPositions the array of arrays to store the map from and
* index in <tt>listOfDataPointsInCluster</tt> to its index in the full data
* set.
* @param splitEvaluation the array to store the cluster evaluation of the
* data set
* @param clusterToSplit the priority queue that stores the cluster id and
* sorts based on how good the sub splits were.
*/
private void computeSubClusterSplit(final int[][] subDesignation,
int originalCluster, List<DataPoint> listOfDataPointsInCluster, DataSet fullDataSet,
int[] fullDesignations, final int[][] originalPositions,
final double[] splitEvaluation,
PriorityQueue<Integer> clusterToSplit, boolean parallel)
{
subDesignation[originalCluster] = new int[listOfDataPointsInCluster.size()];
int pos = 0;
for(int i = 0; i < fullDataSet.size(); i++)
{
if(fullDesignations[i] != originalCluster)
continue;
originalPositions[originalCluster][pos++] = i;
}
//Cluster the sub cluster
SimpleDataSet dpSubC1DataSet = new SimpleDataSet(listOfDataPointsInCluster);
try
{
baseClusterer.cluster(dpSubC1DataSet, 2, parallel, subDesignation[originalCluster]);
splitEvaluation[originalCluster] = clusterEvaluation.evaluate(subDesignation[originalCluster], dpSubC1DataSet);
clusterToSplit.add(originalCluster);
}
catch (ClusterFailureException ex)
{
splitEvaluation[originalCluster] = Double.POSITIVE_INFINITY;
}
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
return cluster(dataSet, lowK, parallel, designations);
}
@Override
public DivisiveLocalClusterer clone()
{
return new DivisiveLocalClusterer(this);
}
}
| 8,263 | 38.352381 | 127 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/hierarchical/NNChainHAC.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.clustering.hierarchical;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import static jsat.clustering.ClustererBase.createClusterListFromAssignmentArray;
import jsat.clustering.KClusterer;
import jsat.clustering.KClustererBase;
import jsat.clustering.dissimilarity.LanceWilliamsDissimilarity;
import jsat.clustering.dissimilarity.WardsDissimilarity;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.math.OnLineStatistics;
import jsat.utils.FakeExecutor;
import jsat.utils.IndexTable;
import jsat.utils.IntDoubleMap;
import jsat.utils.IntDoubleMapArray;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.AtomicDouble;
import jsat.utils.concurrent.ParallelUtils;
/**
* This class implements Hierarchical Agglomerative Clustering via the Nearest
* Neighbor Chain approach. This runs in O(n<sup>2</sup>) time for any
* {@link LanceWilliamsDissimilarity Lance Williams} dissimilarity and uses O(n)
* memory. <br>
* This implementation also supports multi-threaded execution.
*
* see:
* <ul>
* <li>Müllner, D. (2011). Modern hierarchical, agglomerative clustering
* algorithms. arXiv Preprint arXiv:1109.2378. Retrieved from
* <a href="http://arxiv.org/abs/1109.2378">here</a></li>
* <li>Murtagh, F., & Contreras, P. (2011). Methods of Hierarchical Clustering.
* In Data Mining and Knowledge Discovery. Wiley-Interscience.</li>
* </ul>
*
* @author Edward Raff <[email protected]>
*/
public class NNChainHAC implements KClusterer
{
private LanceWilliamsDissimilarity distMeasure;
private DistanceMetric dm;
/**
* Stores the merge list, each merge is in a pair of 2 values. The final
* merge list should contain the last merged pairs at the front of the array
* (index 0, 1), and the first merges at the end of the array. The left
* value in each pair is the index of the data point that the clusters were
* merged under, while the right value is the index that was merged in and
* treated as no longer its own cluster.
*/
private int[] merges;
/**
* Creates a new NNChainHAC using the {@link WardsDissimilarity Ward} method.
*/
public NNChainHAC()
{
this(new WardsDissimilarity());
}
/**
* Creates a new NNChainHAC
* @param distMeasure the dissimilarity measure to use
*/
public NNChainHAC(LanceWilliamsDissimilarity distMeasure)
{
this(distMeasure, new EuclideanDistance());
}
/**
* Creates a new NNChain using the given dissimilarity measure and distance
* metric. The correctness guarantees may not hold for distances other than
* the {@link EuclideanDistance Euclidean} distance, which is the norm for
* Hierarchical Cluster.
*
* @param distMeasure the dissimilarity measure to use
* @param distance the distance metric to use
*/
public NNChainHAC(LanceWilliamsDissimilarity distMeasure, DistanceMetric distance)
{
this.distMeasure = distMeasure;
this.dm = distance;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected NNChainHAC(NNChainHAC toCopy)
{
this.distMeasure = toCopy.distMeasure.clone();
this.dm = toCopy.dm.clone();
if(toCopy.merges != null)
this.merges = Arrays.copyOf(toCopy.merges, toCopy.merges.length);
}
@Override
public NNChainHAC clone()
{
return new NNChainHAC(this);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, (int) Math.sqrt(dataSet.size()), parallel, designations);
}
private double getDist(int a, int j, int[] size, List<Vec> vecs, List<Double> cache, List<Map<Integer, Double>> d_xk)
{
if (size[j] == 1 && size[a] == 1)
return dm.dist(a, j, vecs, cache);
else
{
//a is the one we are using over and over, its more likely to have the valu - check it first
if(d_xk.get(a) != null)
{
Double tmp = d_xk.get(a).get(j);
if(tmp != null)
return tmp;
else//wasn't found searching d_xk
return d_xk.get(j).get(a);//has to be found now
}
else
return d_xk.get(j).get(a);//has to be found now
}
}
/**
* Returns the assignment array for that would have been computed for the
* previous data set with the desired number of clusters.
*
* @param designations the array to store the assignments in
* @param clusters the number of clusters desired
* @return the original array passed in, or <tt>null</tt> if no data set has been clustered.
* @see #hasStoredClustering()
*/
public int[] getClusterDesignations(int[] designations, int clusters)
{
if(merges == null)
return null;
return PriorityHAC.assignClusterDesignations(designations, clusters, merges);
}
/**
* Returns the assignment array for that would have been computed for the
* previous data set with the desired number of clusters.
*
* @param clusters the number of clusters desired
* @param data
* @return the list of data points in each cluster, or <tt>null</tt> if no
* data set has been clustered.
* @see #hasStoredClustering()
*/
public List<List<DataPoint>> getClusterDesignations(int clusters, DataSet data)
{
if(merges == null || (merges.length+2)/2 != data.size())
return null;
int[] assignments = new int[data.size()];
assignments = getClusterDesignations(assignments, clusters);
return createClusterListFromAssignmentArray(assignments, data);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
return cluster(dataSet, clusters, clusters, parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
final int N = dataSet.size();
merges = new int[N*2-2];
/**
* Keeps track of which index was removed from the list due to being
* merged at the given point
*/
IntList merge_removed = new IntList(N);
/**
* Keeps track of which index was kept and was in the merge at the given
* point
*/
IntList merge_kept = new IntList(N);
/**
* The number of items in the cluster denoted at the given index
*/
final int[] size = new int[N];
Arrays.fill(size, 1);
/**
* Stores the distance between the two clusters that were merged at each step.
*/
double[] mergedDistance = new double[N-1];
int L_pos = 0;
final IntList S = new IntList(N);
ListUtils.addRange(S, 0, N, 1);
final List<Map<Integer, Double>> dist_map = new ArrayList<>(N);
for(int i = 0; i < N; i++)
dist_map.add(null);
final List<Vec> vecs = dataSet.getDataVectors();
final List<Double> cache = dm.getAccelerationCache(vecs, parallel);
int[] chain = new int[N];
int chainPos = 0;
while(S.size() > 1)
{
int a, b;
if(chainPos <= 3)
{
// 7: a←(any element of S) > E.g. S[0]
a = S.getI(0);
// 8: chain ←[a]
chainPos = 0;
chain[chainPos++] = a;
// 9: b←(any element of S \ {a}) > E.g. S[1]
b = S.getI(1);
}
else
{
// 11: a←chain[−4] > 4th to last element
a = chain[chainPos-4];
// 12: b←chain[−3] > 3rd to last element
b = chain[chainPos-3];
// 13: Remove chain[−1], chain[−2] and chain[−3] > Cut the tail (x, y,x)
chainPos -= 3;
}
double dist_ab;
do//15:
{
// 16: c ← argmin_{ x!=a} d[x, a] with preference for b
AtomicInteger c = new AtomicInteger(b);
AtomicDouble minDist = new AtomicDouble(getDist(a, c.get(), size, vecs, cache, dist_map));
final int a_ = a;
final int c_ = c.get();
boolean doPara = parallel && S.size() > SystemInfo.LogicalCores*2 && S.size() >= 100;
ParallelUtils.run(doPara, S.size(), (start, end)->
{
double local_minDist = Double.POSITIVE_INFINITY;
int local_c = S.get(start);
for(int i = start; i < end; i++)
{
int j = S.getI(i);
if(j == a_ || j == c_)
continue;//we already have these guys! just not removed from S yet
double dist = getDist(a_, j, size, vecs, cache, dist_map);
if(dist < local_minDist)
{
local_minDist = dist;
local_c = j;
}
}
synchronized(minDist)
{
if(local_minDist < minDist.get())
{
minDist.set(local_minDist);
c.set(local_c);
}
}
});
dist_ab = minDist.get();
//17: a, b ← c, a
b = a;
a = c.get();
//18: Append a to chain
chain[chainPos++] = a;
}
while (chainPos < 3 || a != chain[chainPos-3]); //19: until length(chain) ≥ 3 and a = chain[−3] > a, b are reciprocal
final int n = Math.min(a, b);
final int removed = Math.max(a, b);
// 20: Append (a, b, d[a, b]) to L > nearest neighbors.
merge_removed.add(removed);
merge_kept.add(n);
mergedDistance[L_pos] = dist_ab;
L_pos++;
// 21: Remove a, b from S
S.removeAll(Arrays.asList(a, b));
// System.out.println("Removed " + a + " " + b + " S=" + S + " chain=" + IntList.view(chain, chainPos));
// 22: n←(new node label)
for(int i = Math.max(0, chainPos-5); i < chainPos; i++)//bug in paper? we end with [a, b, a] in the chain, but one of them is a bad index now
if(chain[i] == removed)
chain[i] = n;
// 23: size[n]←size[a]+size[b]
final int size_a = size[a], size_b = size[b];
//set defered till later to make sure we don't muck anything needed in computatoin
// 24: Update d with the information d[n,x], for all x ∈ S.
boolean singleThread = !parallel || S.size() <= SystemInfo.LogicalCores*10;
final Map<Integer, Double> map_n; // = S.isEmpty() ? null : new IntDoubleMap(S.size());
if(S.isEmpty())
map_n = null;
else if(S.size()*100 >= N || !singleThread)// Wastefull, but faster and acceptable
map_n = new IntDoubleMapArray(N);
else
{
map_n = new IntDoubleMap(S.size());
//pre fill to guarantee thread safe alteration of values when done in parallel && using IntDoubleMap implementation
for(int x : S)
map_n.put(x, -0.0);
}
final int a_ = a;
final int b_ = b;
final double dist_ab_ = dist_ab;
ParallelUtils.streamP(S.streamInts(), !singleThread).forEach(x ->
{
double d_ax = getDist(a_, x, size, vecs, cache, dist_map);
double d_bx = getDist(b_, x, size, vecs, cache, dist_map);
double d_xn = distMeasure.dissimilarity(size_a, size_b, size[x], dist_ab_, d_ax, d_bx);
Map<Integer, Double> dist_map_x = dist_map.get(x);
if(dist_map_x == null)
{
// dist_map[x] = new IntDoubleMap(1);
// dist_map[x].put(n, d_xn);
}
else //if(dist_map[x] != null)
{
dist_map_x.remove(b_);
dist_map_x.put(n, d_xn);
if(dist_map_x.size()*50 < N && !(dist_map_x instanceof IntDoubleMap))//we are using such a small percentage, put it into a sparser map
dist_map.set(x, new IntDoubleMap(dist_map_x));//distMap is an Array list already filled with entries, so this is thread safe set
}
map_n.put(x, d_xn);
});
dist_map.set(removed, null);//no longer in use no mater what
dist_map.set(n, map_n);
// 25: S ← S ∪ {n}
size[n] = size_a + size_b;
S.add(n);
}
fixMergeOrderAndAssign(mergedDistance, merge_kept, merge_removed, lowK, N, highK, designations);
return designations;
}
/**
* After clustering, we need to fix up the merge order - since the NNchain
* only gets the merges correct, not their ordering. This also figures out
* what number of clusters to use
*
* @param mergedDistance
* @param merge_kept
* @param merge_removed
* @param lowK
* @param N
* @param highK
* @param designations
*/
private void fixMergeOrderAndAssign(double[] mergedDistance, IntList merge_kept, IntList merge_removed, int lowK, final int N, int highK, int[] designations)
{
//Now that we are done clustering, we need to re-order the merges so that the smallest distances are mergered first
IndexTable it = new IndexTable(mergedDistance);
it.apply(merge_kept);
it.apply(merge_removed);
it.apply(mergedDistance);
for(int i = 0; i < it.length(); i++)
{
merges[merges.length-i*2-1] = merge_removed.get(i);
merges[merges.length-i*2-2] = merge_kept.get(i);
}
//Now lets figure out a guess at the cluster size
/*
* Keep track of the average dist when merging, mark when it becomes abnormaly large as a guess at K
*/
OnLineStatistics distChange = new OnLineStatistics();
double maxStndDevs = Double.MIN_VALUE;
/**
* How many clusters to return
*/
int clusterSize = lowK;
for(int i = 0; i < mergedDistance.length; i++)
{
//Keep track of the changes in cluster size, and mark if this one was abnormall large
distChange.add(mergedDistance[i]);
int curK = N - i;
if (curK >= lowK && curK <= highK)//In the cluster window?
{
double stndDevs = (mergedDistance[i] - distChange.getMean()) / distChange.getStandardDeviation();
if (stndDevs > maxStndDevs)
{
maxStndDevs = stndDevs;
clusterSize = curK;
}
}
}
PriorityHAC.assignClusterDesignations(designations, clusterSize, merges);
}
}
| 17,193 | 36.541485 | 161 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/hierarchical/PriorityHAC.java | package jsat.clustering.hierarchical;
import java.util.*;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.KClustererBase;
import jsat.clustering.dissimilarity.UpdatableClusterDissimilarity;
import jsat.math.OnLineStatistics;
import static jsat.clustering.dissimilarity.AbstractClusterDissimilarity.*;
import jsat.utils.IntPriorityQueue;
/**
*
* @author Edward Raff
*/
public class PriorityHAC extends KClustererBase
{
private static final long serialVersionUID = -702489462117567542L;
private UpdatableClusterDissimilarity distMeasure;
/**
* Stores the merge list, each merge is in a pair of 2 values. The final
* merge list should contain the last merged pairs at the front of the array
* (index 0, 1), and the first merges at the end of the array. The left
* value in each pair is the index of the data point that the clusters were
* merged under, while the right value is the index that was merged in and
* treated as no longer its own cluster.
*/
private int[] merges;
private DataSet curDataSet;
public PriorityHAC(UpdatableClusterDissimilarity dissMeasure)
{
this.distMeasure = dissMeasure;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public PriorityHAC(PriorityHAC toCopy)
{
this.distMeasure = toCopy.distMeasure.clone();
if(toCopy.merges != null)
this.merges = Arrays.copyOf(toCopy.merges, toCopy.merges.length);
this.curDataSet = toCopy.curDataSet.shallowClone();
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()), designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
return cluster(dataSet, clusters, clusters, parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, int[] designations)
{
return cluster(dataSet, clusters, clusters, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
return cluster(dataSet, lowK, highK, designations);
}
private void updateDistanceTableAndQueues(List<IntPriorityQueue> P, int[] I, int k1, int k2, final double[][] distanceMatrix)
{
IntPriorityQueue Pk1 = P.get(k1);
for(int i = 0; i < P.size(); i++)
{
if(I[i] == 0 || i == k2 || i == k1)
continue;
IntPriorityQueue curTargetQ = P.get(i);
curTargetQ.remove(k1);
curTargetQ.remove(k2);
double dis = distMeasure.dissimilarity(k1, I[k1], k2, I[k2], i, I[i], distanceMatrix);
setDistance(distanceMatrix, i, k1, dis);
curTargetQ.add(k1);
Pk1.add(i);
}
}
private List<IntPriorityQueue> setUpProrityQueue(int[] I, final double[][] distanceMatrix)
{
List<IntPriorityQueue> P = new ArrayList<IntPriorityQueue>(I.length);
for(int i = 0; i < I.length; i++)
{
//The row index we are considering
final int supremeIndex = i;
IntPriorityQueue pq = new IntPriorityQueue(I.length, new Comparator<Integer>()
{
@Override
public int compare(Integer o1, Integer o2)
{
double d1 = getDistance(distanceMatrix, supremeIndex, o1);
double d2 = getDistance(distanceMatrix, supremeIndex, o2);
return Double.compare(d1, d2);
}
}, IntPriorityQueue.Mode.BOUNDED);
//Fill up the priority que
for(int j = 0; j < I.length; j++ )
{
if(i == j)
continue;
pq.add(j);
}
P.add(pq);
}
return P;
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, int[] designations)
{
this.curDataSet = dataSet;
merges = new int[dataSet.size()*2-2];
/**
* Keeps track of the current cluster size for the data point. If zero,
* the data point has been merged and is no longer a candidate for
* future consideration. If non zero, it indicates the number of data
* points in its implicit cluster. All points start out in their own
* implicit cluster.
*/
int[] I = new int[dataSet.size()];
Arrays.fill(I, 1);
this.curDataSet = dataSet;
/*
* Keep track of the average dist when merging, stop when it becomes abnormaly large
*/
OnLineStatistics distChange = new OnLineStatistics();
final double[][] distanceMatrix = createDistanceMatrix(dataSet, distMeasure);
//Create priority ques for each data point
List<IntPriorityQueue> P = setUpProrityQueue(I, distanceMatrix);
//We will choose the cluster size as the most abnormal jump in dissimilarity from a merge
int clusterSize = lowK;
double maxStndDevs = Double.MIN_VALUE;
//We now know the dissimilarity matrix & Qs we can begin merging
//We will perform all merges, and store them - and then return a clustering level from the merge history
for(int k = 0; k < I.length-1; k++)
{
int k1 = -1, k2 = -1;
double dk1 = Double.MAX_VALUE, tmp;
for(int i = 0; i < P.size(); i++)
if( I[i] > 0 && (tmp = getDistance(distanceMatrix, i, P.get(i).element())) < dk1)
{
dk1 = tmp;
k1 = i;
k2 = P.get(i).element();
}
//Keep track of the changes in cluster size, and mark if this one was abnormall large
distChange.add(dk1);
if( (I.length - k) >= lowK && (I.length - k) <= highK)//IN the cluster window?
{
double stndDevs = (dk1-distChange.getMean())/distChange.getStandardDeviation();
if(stndDevs > maxStndDevs)
{
maxStndDevs = stndDevs;
clusterSize = I.length-k;
}
}
//We have now found the smalles pair in O(n), first we will update the Qs and matrix. k1 will be the new merged cluster
P.get(k1).clear();//This Q will need all new values
P.get(k2).clear();//This Q will no longer be used
updateDistanceTableAndQueues(P, I, k1, k2, distanceMatrix);
//Now we fix up designations
//Note which clusters were just merged
merges[k*2] = k2;
merges[k*2+1] = k1;
//Update counts
I[k1] += I[k2];
I[k2] = 0;
}
reverseMergeArray();
if(designations == null)
designations = new int[dataSet.size()];
designations = assignClusterDesignations(designations, clusterSize);
return designations;
}
/**
* Reverses the merge array so that the front contains the last merges instead of the first.
* This is done so that creating new clusters is accessed in order which is cache friendly. <br>
* This method must be called once before using {@link #assignClusterDesignations(int[], int) }
*/
private void reverseMergeArray()
{
for(int i = 0; i < merges.length/2; i++)
{
int tmp = merges[i];
merges[i] = merges[merges.length-i-1];
merges[merges.length-i-1] = tmp;
}
}
/**
* The PriorityHAC stores its merging order, so that multiple clusterings
* can of different sizes can be obtained without having to recluster the
* data set. This is possible in part because HAC is deterministic. <br>
* This returns <tt>true</tt> if there is currently a data set and its merge
* order stored.
*
* @return <tt>true</tt> if you can call for more clusterings,
* <tt>false</tt> if no data set has been clustered.
*/
public boolean hasStoredClustering()
{
return curDataSet != null;
}
/**
* Returns the assignment array for that would have been computed for the
* previous data set with the desired number of clusters.
*
* @param designations the array to store the assignments in
* @param clusters the number of clusters desired
* @return the original array passed in, or <tt>null</tt> if no data set has been clustered.
* @see #hasStoredClustering()
*/
public int[] getClusterDesignations(int[] designations, int clusters)
{
if(!hasStoredClustering())
return null;
return assignClusterDesignations(designations, clusters);
}
/**
* Returns the assignment array for that would have been computed for the
* previous data set with the desired number of clusters.
*
* @param clusters the number of clusters desired
* @return the list of data points in each cluster, or <tt>null</tt> if no
* data set has been clustered.
* @see #hasStoredClustering()
*/
public List<List<DataPoint>> getClusterDesignations(int clusters)
{
if(!hasStoredClustering())
return null;
int[] assignments = new int[curDataSet.size()];
return createClusterListFromAssignmentArray(assignments, curDataSet);
}
/**
* Goes through the <tt>merge</tt> array in order from last merge to first, and sets the cluster assignment for each data point based on the merge list.
* @param designations the array to store the designations in, or null to have a new one created automatically.
* @param clusters the number of clusters to assume
* @return the array storing the designations. A new one will be created and returned if <tt>designations</tt> was null.
*/
private int[] assignClusterDesignations(int[] designations, int clusters)
{
return assignClusterDesignations(designations, clusters, merges);
}
/**
* Goes through the <tt>merge</tt> array in order from last merge to first, and sets the cluster assignment for each data point based on the merge list.
* @param designations the array to store the designations in, or null to have a new one created automatically.
* @param clusters the number of clusters to assume
* @param merges the array of merge pairs
* @return the array storing the designations. A new one will be created and returned if <tt>designations</tt> was null.
*/
protected static int[] assignClusterDesignations(int[] designations, int clusters, int[] merges)
{
int curCluster = 0;
Arrays.fill(designations, -1);
for(int i = 0; i < merges.length; i++)
{
if(designations[merges[i]] == -1)//it has not been assigned
{
if(curCluster < clusters)//It will be a top level cluster
designations[merges[i]] = curCluster++;
else
designations[merges[i]] = designations[merges[i-1]];//The new cluster is always in an odd index, so its parrent is the even index to the left
}
}
return designations;
}
@Override
public PriorityHAC clone()
{
return new PriorityHAC(this);
}
}
| 12,104 | 35.905488 | 162 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/hierarchical/SimpleHAC.java | package jsat.clustering.hierarchical;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.clustering.KClustererBase;
import static jsat.clustering.dissimilarity.AbstractClusterDissimilarity.createDistanceMatrix;
import jsat.clustering.dissimilarity.ClusterDissimilarity;
import jsat.math.OnLineStatistics;
import jsat.utils.IntSet;
/**
* Provides a naive implementation of hierarchical agglomerative clustering
* (HAC). This means the clustering is built from the bottom up, merging points
* into clusters. HAC clustering is deterministic. The naive implementation runs
* in O(n<sup>3</sup>) time. <br>
* <br>
* NOTE: This implementation does not currently support parallel clustering.
*
*
* @author Edward Raff
*/
public class SimpleHAC extends KClustererBase
{
private static final long serialVersionUID = 7138073766768205530L;
/**
* notion behind the large stnd devs is that as the clustering progresses,
* the min value is (usually) monotonically rising, so we would like a
* bigger jump
*/
private double stndDevs = 3.5;
private ClusterDissimilarity dissMeasure;
public SimpleHAC(ClusterDissimilarity disMeasure)
{
this.dissMeasure = disMeasure;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public SimpleHAC(SimpleHAC toCopy)
{
this(toCopy.dissMeasure.clone());
this.stndDevs = toCopy.stndDevs;
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()), designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
return cluster(dataSet, clusters, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, int[] designations)
{
return cluster(dataSet, clusters, clusters, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
return cluster(dataSet, lowK, highK, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
//Keep track of the average dis when merging, stop when it becomes abnormaly large
OnLineStatistics disChange = new OnLineStatistics();
//Represent each cluster by a set of indices, intialy each data point is its own cluster
List<Set<Integer>> clusters = new ArrayList<Set<Integer>>(dataSet.size());
for(int i =0; i < dataSet.size(); i++)
{
Set<Integer> set = new IntSet();
set.add(i);
clusters.add(set);
}
double[][] distanceMatrix = createDistanceMatrix(dataSet, dissMeasure);
while( clusters.size() > lowK)
{
double lowestDiss = Double.MAX_VALUE, tmp;
int a = 0, b = 1;
//N^2 search for the most similar pairing of clusters
for(int i = 0; i < clusters.size(); i++)
for(int j = i+1; j < clusters.size(); j++)
{
if( (tmp = dissMeasure.dissimilarity(clusters.get(i), clusters.get(j), distanceMatrix)) < lowestDiss)
{
lowestDiss = tmp;
a = i;
b = j;
}
}
if(clusters.size() <= highK)//Then we check if we should stop early
{
if(disChange.getMean() + disChange.getStandardDeviation() * stndDevs < lowestDiss)
break;//Abnormaly large difference, we assume we are forcing two real & sperate clusters into one group
}
disChange.add(lowestDiss);
//Merge clusters , a < b is gaurenteed by the loop structure
clusters.get(a).addAll(clusters.remove(b));
}
//Now that we have the assigments, we must set them
int curClusterID = 0;
for(Set<Integer> clustering : clusters)
{
for(int index : clustering)
designations[index] = curClusterID;
curClusterID++;
}
return designations;
}
@Override
public SimpleHAC clone()
{
return new SimpleHAC(this);
}
}
| 4,802 | 31.234899 | 123 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/ElkanKMeans.java |
package jsat.clustering.kmeans;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.clustering.ClusterFailureException;
import jsat.clustering.SeedSelectionMethods.SeedSelection;
import static jsat.clustering.SeedSelectionMethods.selectIntialPoints;
import jsat.linear.ConstantVector;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.*;
import jsat.utils.*;
import jsat.utils.concurrent.AtomicDoubleArray;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* An efficient implementation of the K-Means algorithm. This implementation uses
* the triangle inequality to accelerate computation while maintaining the exact
* same solution. This requires that the {@link DistanceMetric} used support
* {@link DistanceMetric#isSubadditive() }.
* <br>
* Implementation based on the paper: Using the Triangle Inequality to Accelerate k-Means, by Charles Elkan
*
* @author Edward Raff
*/
public class ElkanKMeans extends KMeans
{
private static final long serialVersionUID = -1629432283103273051L;
private DenseSparseMetric dmds;
private boolean useDenseSparse = false;
/**
* Creates a new KMeans instance.
* @param dm the distance metric to use, must support {@link DistanceMetric#isSubadditive() }.
* @param rand the random number generator to use during seed selection
* @param seedSelection the method of seed selection to use
*/
public ElkanKMeans(DistanceMetric dm, Random rand, SeedSelection seedSelection)
{
super(dm, seedSelection, rand);
if(!dm.isSubadditive())
throw new ClusterFailureException("KMeans implementation requires the triangle inequality");
}
/**
* Creates a new KMeans instance
* @param dm the distance metric to use, must support {@link DistanceMetric#isSubadditive() }.
* @param rand the random number generator to use during seed selection
*/
public ElkanKMeans(DistanceMetric dm, Random rand)
{
this(dm, rand, DEFAULT_SEED_SELECTION);
}
/**
* Creates a new KMeans instance
* @param dm the distance metric to use, must support {@link DistanceMetric#isSubadditive() }.
*/
public ElkanKMeans(DistanceMetric dm)
{
this(dm, RandomUtil.getRandom());
}
/**
* Creates a new KMeans instance. The {@link EuclideanDistance} will be used by default.
*/
public ElkanKMeans()
{
this(new EuclideanDistance());
}
public ElkanKMeans(ElkanKMeans toCopy)
{
super(toCopy);
if(toCopy.dmds != null)
this.dmds = (DenseSparseMetric) toCopy.dmds.clone();
this.useDenseSparse = toCopy.useDenseSparse;
}
/**
* Sets whether or not to use {@link DenseSparseMetric } when computing.
* This may or may not provide a speed increase.
* @param useDenseSparse whether or not to compute the distance from dense
* mean vectors to sparse ones using acceleration
*/
public void setUseDenseSparse(boolean useDenseSparse)
{
this.useDenseSparse = useDenseSparse;
}
/**
* Returns if Dense Sparse acceleration will be used if available
* @return if Dense Sparse acceleration will be used if available
*/
public boolean isUseDenseSparse()
{
return useDenseSparse;
}
/*
* IMPLEMENTATION NOTE: Means are updates as a set of sums via deltas. Deltas are
* computed locally withing a thread local object. Then to avoid dropping updates,
* every thread that was working must apply its deltas itself, as other threads
* can not access another's thread locals.
*/
@Override
protected double cluster(final DataSet dataSet, List<Double> accelCache, final int k, final List<Vec> means, final int[] assignment, boolean exactTotal, boolean parallel, boolean returnError, Vec dataPointWeights)
{
try
{
/**
* N data points
*/
final int N = dataSet.size();
final int D = dataSet.getNumNumericalVars();
if(N < k)//Not enough points
throw new ClusterFailureException("Fewer data points then desired clusters, decrease cluster size");
/**
* Weights for each data point
*/
final Vec W;
if(dataPointWeights == null)
W = dataSet.getDataWeights();
else
W = dataPointWeights;
TrainableDistanceMetric.trainIfNeeded(dm, dataSet);
final List<Vec> X = dataSet.getDataVectors();
//Distance computation acceleration
final List<Double> distAccelCache;
final List<List<Double>> meanQIs = new ArrayList<>(k);
//done a wonky way b/c we want this as a final object for convinence, otherwise we may be stuck with null accel when we dont need to be
if(accelCache == null)
distAccelCache = dm.getAccelerationCache(X, parallel);
else
distAccelCache = accelCache;
if(means.size() != k)
{
means.clear();
means.addAll(selectIntialPoints(dataSet, k, dm, distAccelCache, rand, seedSelection, parallel));
}
//Make our means dense
for(int i = 0; i < means.size(); i++)
if(means.get(i).isSparse())
means.set(i, new DenseVector(means.get(i)));
final double[][] lowerBound = new double[N][k];
final double[] upperBound = new double[N];
/**
* Distances between centroid i and all other centroids
*/
final double[][] centroidSelfDistances = new double[k][k];
final double[] sC = new double[k];
calculateCentroidDistances(k, centroidSelfDistances, means, sC, null, parallel);
final AtomicDoubleArray meanCount = new AtomicDoubleArray(k);
Vec[] oldMeans = new Vec[k];//The means fromt he current step are needed when computing the new means
final Vec[] meanSums = new Vec[k];
for (int i = 0; i < k; i++)
{
oldMeans[i] = means.get(i).clone();//This way the new vectors are of the same implementation
if(dm.supportsAcceleration())
meanQIs.add(dm.getQueryInfo(means.get(i)));
else
meanQIs.add(Collections.EMPTY_LIST);//Avoid null pointers
meanSums[i] = new DenseVector(D);
}
if(dm instanceof DenseSparseMetric && useDenseSparse)
dmds = (DenseSparseMetric) dm;
final double[] meanSummaryConsts = dmds != null ? new double[means.size()] : null;
int atLeast = 2;//Used to performan an extra round (first round does not assign)
final AtomicBoolean changeOccurred = new AtomicBoolean(true);
final boolean[] r = new boolean[N];//Default value of a boolean is false, which is what we want
final ThreadLocal<Vec[]> localDeltas = new ThreadLocal<Vec[]>()
{
@Override
protected Vec[] initialValue()
{
Vec[] toRet = new Vec[k];
for(int i = 0; i < toRet.length; i++)
toRet[i] = new DenseVector(D);
return toRet;
}
};
initialClusterSetUp(k, N, X, means, lowerBound, upperBound, centroidSelfDistances, assignment, meanCount, meanSums, distAccelCache, meanQIs, localDeltas, parallel, W);
int iterLimit = MaxIterLimit;
while ((changeOccurred.get() || atLeast > 0) && iterLimit-- >= 0)
{
atLeast--;
changeOccurred.set(false);
//Step 1
if(iterLimit < MaxIterLimit-1)//we already did this on before iteration
calculateCentroidDistances(k, centroidSelfDistances, means, sC, meanSummaryConsts, parallel);
final CountDownLatch latch = new CountDownLatch(SystemInfo.LogicalCores);
//Step 2 / 3
ParallelUtils.run(parallel, N, (q)->
{
//Step 2, skip those that u(v) < s(c(v))
if (upperBound[q] <= sC[assignment[q]])
return;
final Vec v = X.get(q);
for (int c = 0; c < k; c++)
if (c != assignment[q] && upperBound[q] > lowerBound[q][c] && upperBound[q] > centroidSelfDistances[assignment[q]][c] * 0.5)
{
step3aBoundsUpdate(X, r, q, v, means, assignment, upperBound, lowerBound, meanSummaryConsts, distAccelCache, meanQIs);
step3bUpdate(X, upperBound, q, lowerBound, c, centroidSelfDistances, assignment, v, means, localDeltas, meanCount, changeOccurred, meanSummaryConsts, distAccelCache, meanQIs, W);
}
step4UpdateCentroids(meanSums, localDeltas);
});
step5_6_distanceMovedBoundsUpdate(k, oldMeans, means, meanSums, meanCount, N, lowerBound, upperBound, assignment, r, meanQIs, parallel);
}
double totalDistance = 0.0;
if(returnError)
{
if(saveCentroidDistance)
nearestCentroidDist = new double[N];
else
nearestCentroidDist = null;
if (exactTotal == true)
for (int i = 0; i < N; i++)
{
double dist = dm.dist(i, means.get(assignment[i]), meanQIs.get(assignment[i]), X, distAccelCache);
totalDistance += Math.pow(dist, 2);
if(saveCentroidDistance)
nearestCentroidDist[i] = dist;
}
else
for (int i = 0; i < N; i++)
{
totalDistance += Math.pow(upperBound[i], 2);
if(saveCentroidDistance)
nearestCentroidDist[i] = upperBound[i];
}
}
return totalDistance;
}
catch (Exception ex)
{
Logger.getLogger(ElkanKMeans.class.getName()).log(Level.SEVERE, null, ex);
}
return Double.MAX_VALUE;
}
private void initialClusterSetUp(final int k, final int N, final List<Vec> dataSet, final List<Vec> means, final double[][] lowerBound,
final double[] upperBound, final double[][] centroidSelfDistances, final int[] assignment, final AtomicDoubleArray meanCount,
final Vec[] meanSums, final List<Double> distAccelCache, final List<List<Double>> meanQIs,
final ThreadLocal<Vec[]> localDeltas, boolean parallel, final Vec W)
{
ParallelUtils.run(parallel, N, (from, to)->
{
Vec[] deltas = localDeltas.get();
final boolean[] skip = new boolean[k];
for (int q = from; q < to; q++)
{
Vec v = dataSet.get(q);
double minDistance = Double.MAX_VALUE;
int index = -1;
//Default value is false, we cant skip anything yet
Arrays.fill(skip, false);
for (int i = 0; i < k; i++)
{
if (skip[i])
continue;
double d = dm.dist(q, means.get(i), meanQIs.get(i), dataSet, distAccelCache);
lowerBound[q][i] = d;
if (d < minDistance)
{
minDistance = upperBound[q] = d;
index = i;
//We now have some information, use lemma 1 to see if we can skip anything
for (int z = i + 1; z < k; z++)
if (centroidSelfDistances[i][z] >= 2 * d)
skip[z] = true;
}
}
assignment[q] = index;
final double weight = W.get(q);
meanCount.addAndGet(index, weight);
deltas[index].mutableAdd(weight, v);
}
for (int i = 0; i < deltas.length; i++)
{
synchronized (meanSums[i])
{
meanSums[i].mutableAdd(deltas[i]);
}
deltas[i].zeroOut();
}
});
}
private void step4UpdateCentroids(Vec[] meanSums, ThreadLocal<Vec[]> localDeltas)
{
Vec[] deltas = localDeltas.get();
for(int i = 0; i < deltas.length; i++)
{
if(deltas[i].nnz() == 0)
continue;
synchronized(meanSums[i])
{
meanSums[i].mutableAdd(deltas[i]);
}
deltas[i].zeroOut();
}
}
private void step5_6_distanceMovedBoundsUpdate(final int k, final Vec[] oldMeans, final List<Vec> means, final Vec[] meanSums,
final AtomicDoubleArray meanCount, final int N, final double[][] lowerBound, final double[] upperBound,
final int[] assignment, final boolean[] r, final List<List<Double>> meanQIs, boolean parallel)
{
final double[] distancesMoved = new double[k];
ParallelUtils.run(parallel, k, (i)->
{
//Re compute centroids
means.get(i).copyTo(oldMeans[i]);
//normalize
meanSums[i].copyTo(means.get(i));
double count = meanCount.get(i);
if (count <= 1e-14)
means.get(i).zeroOut();
else
means.get(i).mutableDivide(meanCount.get(i));
distancesMoved[i] = dm.dist(oldMeans[i], means.get(i));
if(dm.supportsAcceleration())
meanQIs.set(i, dm.getQueryInfo(means.get(i)));
//Step 5
for (int q = 0; q < N; q++)
lowerBound[q][i] = Math.max(lowerBound[q][i] - distancesMoved[i], 0);
});
//Step 6
ParallelUtils.run(parallel, N, (start, end) ->
{
for(int q = start; q < end; q++)
{
upperBound[q] += distancesMoved[assignment[q]];
r[q] = true;
}
});
}
private void step3aBoundsUpdate(List<Vec> X, boolean[] r, int q, Vec v, final List<Vec> means, final int[] assignment, double[] upperBound, double[][] lowerBound, double[] meanSummaryConsts, List<Double> distAccelCache, List<List<Double>> meanQIs)
{
//3(a)
if (r[q])
{
r[q] = false;
double d;
int meanIndx = assignment[q];
if(dmds == null)
d = dm.dist(q, means.get(meanIndx), meanQIs.get(meanIndx), X, distAccelCache);
else
d = dmds.dist(meanSummaryConsts[meanIndx], means.get(meanIndx), v);
lowerBound[q][meanIndx] = d;///Not sure if this is supposed to be here
upperBound[q] = d;
}
}
private void step3bUpdate(List<Vec> X, double[] upperBound, final int q, double[][] lowerBound, final int c, double[][] centroidSelfDistances,
final int[] assignment, Vec v, final List<Vec> means, final ThreadLocal<Vec[]> localDeltas, AtomicDoubleArray meanCount,
final AtomicBoolean changeOccurred, double[] meanSummaryConsts, List<Double> distAccelCache, List<List<Double>> meanQIs,
final Vec W)
{
//3(b)
if (upperBound[q] > lowerBound[q][c] || upperBound[q] > centroidSelfDistances[assignment[q]][c] / 2)
{
double d;
if(dmds == null)
d = dm.dist(q, means.get(c), meanQIs.get(c), X, distAccelCache);
else
d = dmds.dist(meanSummaryConsts[c], means.get(c), v);
lowerBound[q][c] = d;
if (d < upperBound[q])
{
Vec[] deltas = localDeltas.get();
final double weight = W.get(q);
deltas[assignment[q]].mutableSubtract(weight, v);
meanCount.addAndGet(assignment[q], -weight);
deltas[c].mutableAdd(weight, v);
meanCount.addAndGet(c, weight);
assignment[q] = c;
upperBound[q] = d;
changeOccurred.set(true);
}
}
}
private void calculateCentroidDistances(final int k, final double[][] centroidSelfDistances, final List<Vec> means, final double[] sC, final double[] meanSummaryConsts, boolean parallel)
{
final List<Double> meanAccelCache = dm.supportsAcceleration() ? dm.getAccelerationCache(means) : null;
//TODO can improve parallel performance for when k ~<= # cores
ParallelUtils.run(parallel, k, (i)->
{
for (int z = i + 1; z < k; z++)
centroidSelfDistances[z][i] = centroidSelfDistances[i][z] = dm.dist(i, z, means, meanAccelCache);;
if (meanSummaryConsts != null)
meanSummaryConsts[i] = dmds.getVectorConstant(means.get(i));
});
//final step quickly figure out sCmin
for (int i = 0; i < k; i++)
{
double sCmin = Double.MAX_VALUE;
for (int z = 0; z < k; z++)
if (z != i)
sCmin = Math.min(sCmin, centroidSelfDistances[i][z]);
sC[i] = sCmin / 2.0;
}
}
@Override
public ElkanKMeans clone()
{
return new ElkanKMeans(this);
}
}
| 18,194 | 38.989011 | 251 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/ElkanKernelKMeans.java |
package jsat.clustering.kmeans;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import jsat.DataSet;
import jsat.clustering.ClusterFailureException;
import jsat.distributions.kernels.KernelTrick;
import jsat.exceptions.FailedToFitException;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.*;
import jsat.utils.*;
import jsat.utils.concurrent.ParallelUtils;
/**
* An efficient implementation of the K-Means algorithm. This implementation uses
* the triangle inequality to accelerate computation while maintaining the exact
* same solution. This requires that the {@link DistanceMetric} used support
* {@link DistanceMetric#isSubadditive() }. <br>
* <br>
* See: Elkan, C. (2003). <i>Using the Triangle Inequality to Accelerate
* k-Means.</i> In Proceedings of the Twentieth International Conference on
* Machine Learning (ICML-2003) (pp. 147–153). AAAI Press.
*
* @author Edward Raff
*/
public class ElkanKernelKMeans extends KernelKMeans
{
private static final long serialVersionUID = 4998832201379993827L;
/**
* Distances between centroid i and all other centroids
*/
private double[][] centroidSelfDistances;
/**
* un-normalized dot product between centroid i and all other centroids.
*/
private double[][] centroidPairDots;
/**
* Creates a new Kernel K Means object
* @param kernel the kernel to use
*/
public ElkanKernelKMeans(KernelTrick kernel)
{
super(kernel);
}
public ElkanKernelKMeans(ElkanKernelKMeans toCopy)
{
super(toCopy);
}
@Override
public int findClosestCluster(Vec x, List<Double> qi)
{
double min = Double.MAX_VALUE;
int min_indx = -1;
//Use triangle inequality to prune out clusters!
boolean[] pruned = new boolean[meanSqrdNorms.length];
Arrays.fill(pruned, false);
for(int i = 0; i < meanSqrdNorms.length; i++)
{
if(ownes[i] <= 1e-15 || pruned[i])
continue;
double dist = distance(x, qi, i);
if(dist < min)
{
min = dist;
min_indx = i;
}
//we now know d(x, c_i), see who we can prune based off this
for(int j = i+1; j < meanSqrdNorms.length; j++)
{
if(centroidSelfDistances[i][j] >= 2*dist)
pruned[j] = true;
}
}
return min_indx;
}
/**
* Updates the un-normalized dot product between cluster centers, which is
* stored in {@link #centroidPairDots}. Using this method avoids redundant
* calculation, only adjusting for the values that have changed.
*
* @param prev_assignments what the previous assignment of data points to
* cluster center looked like
* @param new_assignments the new assignment of data points to cluster
* centers
* @param parallel source of threads for parallel computation. May be null.
*/
private void update_centroid_pair_dots(final int[] prev_assignments, final int[] new_assignments, boolean parallel)
{
final int N = X.size();
//TODO might need an update for alocation
ParallelUtils.run(parallel, N, (start, end) ->
{
double[][] localChanges = new double[centroidPairDots.length][centroidPairDots.length];
for(int i = start; i < end; i++)
{
final double w_i = W.get(i);
int old_c_i = prev_assignments[i];
int new_c_i = new_assignments[i];
for(int j = i; j < N; j++)
{
int old_c_j = prev_assignments[j];
int new_c_j = new_assignments[j];
if(old_c_i == new_c_i && old_c_j == new_c_j)//no class changes
continue;//so we can skip it
final double w_j = W.get(j);
double K_ij = w_i * w_j * kernel.eval(i, j, X, accel);
if(old_c_i >= 0 && old_c_j >= 0)
{
localChanges[old_c_i][old_c_j] -= K_ij;
localChanges[old_c_j][old_c_i] -= K_ij;
}
localChanges[new_c_i][new_c_j] += K_ij;
localChanges[new_c_j][new_c_i] += K_ij;
}
}
for(int i = 0; i < localChanges.length; i++)
{
double[] centroidPairDots_i = centroidPairDots[i];
synchronized(centroidPairDots_i)
{
for(int j = 0; j < localChanges[i].length; j++)
centroidPairDots_i[j] += localChanges[i][j];
}
}
});
}
/**
* This is a helper method where the actual cluster is performed. This is because there
* are multiple strategies for modifying kmeans, but all of them require this step.
* <br>
* The distance metric used is trained if needed
*
* @param dataSet The set of data points to perform clustering on
* @param k the number of clusters
* @param assignment an empty temp space to store the clustering
* classifications. Should be the same length as the number of data points
* @param exactTotal determines how the objective function (return value)
* will be computed. If true, extra work will be done to compute the exact
* distance from each data point to its cluster. If false, an upper bound
* approximation will be used.
* @param parallel the source of threads for parallel computation. If <tt>null</tt>, single threaded execution will occur
* @return the sum of squares distances from each data point to its closest cluster
*/
protected double cluster(final DataSet dataSet, final int k, final int[] assignment, boolean exactTotal, boolean parallel)
{
try
{
/**
* N data points
*/
final int N = dataSet.size();
if(N < k)//Not enough points
throw new ClusterFailureException("Fewer data points then desired clusters, decrease cluster size");
X = dataSet.getDataVectors();
setup(k, assignment, dataSet.getDataWeights());
final double[][] lowerBound = new double[N][k];
final double[] upperBound = new double[N];
/**
* Distances between centroid i and all other centroids
*/
centroidSelfDistances = new double[k][k];
centroidPairDots = new double[k][k];
final double[] sC = new double[k];
calculateCentroidDistances(k, centroidSelfDistances, sC, assignment, null, parallel);
final int[] prev_assignment = new int[N];
int atLeast = 2;//Used to performan an extra round (first round does not assign)
final AtomicBoolean changeOccurred = new AtomicBoolean(true);
final boolean[] r = new boolean[N];//Default value of a boolean is false, which is what we want
initialClusterSetUp(k, N, lowerBound, upperBound, centroidSelfDistances, assignment, parallel);
int iterLimit = maximumIterations;
while ((changeOccurred.get() || atLeast > 0) && iterLimit-- >= 0)
{
atLeast--;
changeOccurred.set(false);
//Step 1
if(iterLimit < maximumIterations-1)//we already did this on before iteration
calculateCentroidDistances(k, centroidSelfDistances, sC, assignment, prev_assignment, parallel);
//Save current assignent for update next iteration
System.arraycopy(assignment, 0, prev_assignment, 0, assignment.length);
final CountDownLatch latch = new CountDownLatch(SystemInfo.LogicalCores);
//Step 2 / 3
ParallelUtils.run(parallel, N, (q) ->
{
//Step 2, skip those that u(v) < s(c(v))
if (upperBound[q] <= sC[assignment[q]])
return;
for (int c = 0; c < k; c++)
if (c != assignment[q] && upperBound[q] > lowerBound[q][c] && upperBound[q] > centroidSelfDistances[assignment[q]][c] * 0.5)
{
step3aBoundsUpdate(r, q, assignment, upperBound, lowerBound);
step3bUpdate(upperBound, q, lowerBound, c, centroidSelfDistances, assignment, changeOccurred);
}
});
int moved = step4_5_6_distanceMovedBoundsUpdate(k, N, lowerBound, upperBound, assignment, r, parallel);
}
double totalDistance = 0.0;
//TODO do I realy want to keep this around for the kernel version?
if (exactTotal == true)
for (int i = 0; i < N; i++)
totalDistance += Math.pow(upperBound[i], 2);//TODO this isn't exact any more
else
for (int i = 0; i < N; i++)
totalDistance += Math.pow(upperBound[i], 2);
return totalDistance;
}
catch (Exception ex)
{
ex.printStackTrace();
throw new FailedToFitException(ex);
}
}
private void initialClusterSetUp(final int k, final int N, final double[][] lowerBound,
final double[] upperBound, final double[][] centroidSelfDistances, final int[] assignment, boolean parallel)
{
ParallelUtils.run(parallel, N, (start, end) ->
{
//Skip markers
final boolean[] skip = new boolean[k];
for (int q = start; q < end; q++)
{
double minDistance = Double.MAX_VALUE;
int index = -1;
//Default value is false, we cant skip anything yet
Arrays.fill(skip, false);
for (int i = 0; i < k; i++)
{
if (skip[i])
continue;
double d = distance(q, i, assignment);
lowerBound[q][i] = d;
if (d < minDistance)
{
minDistance = upperBound[q] = d;
index = i;
//We now have some information, use lemma 1 to see if we can skip anything
for (int z = i + 1; z < k; z++)
if (centroidSelfDistances[i][z] >= 2 * d)
skip[z] = true;
}
}
newDesignations[q] = index;
}
});
}
private int step4_5_6_distanceMovedBoundsUpdate(final int k, final int N,
final double[][] lowerBound, final double[] upperBound,
final int[] assignment, final boolean[] r, boolean parallel)
{
final double[] distancesMoved = new double[k];
//copy the originonal sqrd norms b/c we need them to compute the distance means moved
final double[] oldSqrdNorms = new double[meanSqrdNorms.length];
for(int i = 0; i < meanSqrdNorms.length; i++)
oldSqrdNorms[i] = meanSqrdNorms[i]*normConsts[i];
//first we need to do assignment movement updated, otherwise the sqrdNorms will be wrong and we will get incorrect values for cluster movemnt
int moved = ParallelUtils.run(parallel, N, (start, end)->
{
double[] sqrdChange = new double[k];
double[] ownerChange = new double[k];
int localChange = 0;
for(int q = start; q < end; q++)
localChange += updateMeansFromChange(q, assignment, sqrdChange, ownerChange);
synchronized(assignment)
{
applyMeanUpdates(sqrdChange, ownerChange);
}
return localChange;
},
(t, u) -> t+u);
updateNormConsts();
//now do cluster movement
//Step 5
ParallelUtils.run(parallel, k, (i)->
{
distancesMoved[i] = meanToMeanDistance(i, i, newDesignations, assignment, oldSqrdNorms[i], parallel);
});
ParallelUtils.run(parallel, k, (c) ->
{
for (int q = 0; q < N; q++)
lowerBound[q][c] = Math.max(lowerBound[q][c] - distancesMoved[c], 0);
});
//now we can move the assignments over
System.arraycopy(newDesignations, 0, assignment, 0, N);
//Step 6
ParallelUtils.run(parallel, N, (start, end) ->
{
for(int q = start; q < end; q++)
{
upperBound[q] += distancesMoved[assignment[q]];
r[q] = true;
}
});
return moved;
}
private void step3aBoundsUpdate(boolean[] r, int q, final int[] assignment, double[] upperBound, double[][] lowerBound)
{
//3(a)
if (r[q])
{
r[q] = false;
int meanIndx = assignment[q];
double d = distance(q, meanIndx, assignment);
lowerBound[q][meanIndx] = d;///Not sure if this is supposed to be here
upperBound[q] = d;
}
}
private void step3bUpdate(double[] upperBound, final int q, double[][] lowerBound,
final int c, double[][] centroidSelfDistances, final int[] assignment,
final AtomicBoolean changeOccurred)
{
//3(b)
if (upperBound[q] > lowerBound[q][c] || upperBound[q] > centroidSelfDistances[assignment[q]][c] / 2)
{
double d = distance(q, c, assignment);
lowerBound[q][c] = d;
if (d < upperBound[q])
{
newDesignations[q] = c;
upperBound[q] = d;
changeOccurred.lazySet(true);
}
}
}
private void calculateCentroidDistances(final int k, final double[][] centroidSelfDistances, final double[] sC, final int[] curAssignments, int[] prev_assignments, final boolean parallel)
{
if(prev_assignments == null)
{
prev_assignments = new int[curAssignments.length];
Arrays.fill(prev_assignments, -1);
}
final int[] prev_assing = prev_assignments;
//compute self dot-products
update_centroid_pair_dots(prev_assing, curAssignments, parallel);
double[] weight_per_cluster = new double[k];
for (int i = 0; i < curAssignments.length; i++)
weight_per_cluster[curAssignments[i]] += W.get(i);
//normalize dot products and make distances
for (int i = 0; i < k; i++)
for (int z = i + 1; z < k; z++)
{
double dot = centroidPairDots[i][z];
dot /= (weight_per_cluster[i] * weight_per_cluster[z]);
double d = meanSqrdNorms[i] * normConsts[i] + meanSqrdNorms[z] * normConsts[z] - 2 * dot;
centroidSelfDistances[z][i] = centroidSelfDistances[i][z] = Math.sqrt(Math.max(0, d));//Avoid rare cases wehre 2*dot might be slightly larger
}
//update sC
for (int i = 0; i < k; i++)
{
double sCmin = Double.MAX_VALUE;
for (int z = 0; z < k; z++)
if(i != z)
sCmin = Math.min(sCmin, centroidSelfDistances[i][z]);
sC[i] = sCmin / 2.0;
}
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
if(dataSet.size() < clusters)
throw new ClusterFailureException("Fewer data points then desired clusters, decrease cluster size");
cluster(dataSet, clusters, designations, false, parallel);
return designations;
}
@Override
public ElkanKernelKMeans clone()
{
return new ElkanKernelKMeans(this);
}
}
| 16,411 | 37.616471 | 191 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/GMeans.java |
package jsat.clustering.kmeans;
import java.util.*;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.distributions.Normal;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.clustering.SeedSelectionMethods;
import jsat.linear.*;
import static java.lang.Math.*;
/**
* This class provides a method of performing {@link KMeans} clustering when the
* value of {@code K} is not known. It works by recursively splitting means
* up to some specified maximum.
* value. <br>
* <br>
* When the value of {@code K} is specified, the implementation will simply call
* the regular KMeans object it was constructed with. <br>
* <br>
* See: Hamerly, G.,&Elkan, C. (2003). <i>Learning the K in K-Means</i>. In
* seventeenth annual conference on neural information processing systems (NIPS)
* (pp. 281–288). Retrieved from
* <a href="http://papers.nips.cc/paper/2526-learning-the-k-in-k-means.pdf">here
* </a>
*
* @author Edward Raff
*/
public class GMeans extends KMeans
{
private static final long serialVersionUID = 7306976407786792661L;
private boolean trustH0 = true;
private boolean iterativeRefine = true;
private int minClusterSize = 25;
private KMeans kmeans;
public GMeans()
{
this(new HamerlyKMeans());
}
public GMeans(KMeans kmeans)
{
super(kmeans.dm, kmeans.seedSelection, kmeans.rand);
this.kmeans = kmeans;
kmeans.setStoreMeans(true);
}
public GMeans(GMeans toCopy)
{
super(toCopy);
this.kmeans = toCopy.kmeans.clone();
this.trustH0 = toCopy.trustH0;
this.iterativeRefine = toCopy.iterativeRefine;
this.minClusterSize = toCopy.minClusterSize;
}
/**
* Each new cluster will be tested for normality, with the null hypothesis
* H0 being that the cluster is normal. If this is set to {@code true} then
* an optimization is done that once a center fails to reject the null
* hypothesis, it will never be tested again. This is a safe assumption when
* {@link #setIterativeRefine(boolean) } is set to {@code false}, but
* otherwise may not quite be true. <br>
* <br>
* When {@code trustH0} is {@code true} (the default option), G-Means will
* make at most O(k) runs of k-means for the final value of k chosen. When
* {@code false}, at most O(k<sup>2</sup>) runs of k-means will occur.
*
* @param trustH0 {@code true} if a centroid shouldn't be re-tested once it
* fails to split.
*/
public void setTrustH0(boolean trustH0)
{
this.trustH0 = trustH0;
}
/**
*
* @return {@code true} if cluster that fail to split wont be re-tested.
* {@code false} if they will.
*/
public boolean getTrustH0()
{
return trustH0;
}
/**
* Sets the minimum size for splitting a cluster.
* @param minClusterSize the minimum number of data points that must be present in a
* cluster to consider splitting it
*/
public void setMinClusterSize(int minClusterSize)
{
if(minClusterSize < 2)
throw new IllegalArgumentException("min cluster size that could be split is 2, not " + minClusterSize);
this.minClusterSize = minClusterSize;
}
/**
*
* @return the minimum number of data points that must be present in a
* cluster to consider splitting it
*/
public int getMinClusterSize()
{
return minClusterSize;
}
/**
* Sets whether or not the set of all cluster centers should be refined at
* every iteration. By default this is {@code true} and part of how the
* GMeans algorithm is described. Setting this to {@code false} can result
* in large speedups at the potential cost of quality.
* @param refineCenters {@code true} to refine the cluster centers at every
* step, {@code false} to skip this step of the algorithm.
*/
public void setIterativeRefine(boolean refineCenters)
{
this.iterativeRefine = refineCenters;
}
/**
*
* @return {@code true} if the cluster centers are refined at every
* step, {@code false} if skipping this step of the algorithm.
*/
public boolean getIterativeRefine()
{
return iterativeRefine;
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, 1, Math.max(dataSet.size()/20, 10), designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 1, Math.max(dataSet.size()/20, 10), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
final int N = dataSet.size();
//initiate
if(lowK >= 2)
{
designations = kmeans.cluster(dataSet, lowK, parallel, designations);
means = new ArrayList<Vec>(kmeans.getMeans());
}
else//1 mean of all the data
{
if(designations == null || designations.length < N)
designations = new int[N];
else
Arrays.fill(designations, 0);
means = new ArrayList<Vec>(Arrays.asList(MatrixStatistics.meanVector(dataSet)));
}
int[] subS = new int[designations.length];
int[] subC = new int[designations.length];
Vec v = new DenseVector(dataSet.getNumNumericalVars());
double[] xp = new double[N];
//tract if we should stop testing a mean or not
List<Boolean> dontRedo = new ArrayList<Boolean>(Collections.nCopies(means.size(), false));
//pre-compute acceleration cache instead of re-computing every refine call
List<Double> accelCache = dm.getAccelerationCache(dataSet.getDataVectors(), parallel);
double thresh = 1.8692;//TODO make this configurable
int origMeans;
do
{
origMeans = means.size();
for(int c = 0; c < origMeans; c++)
{
if(dontRedo.get(c))
continue;
//2. Initialize two centers, called “children” of c.
//for now lets just let k-means decide
List<DataPoint> X = getDatapointsFromCluster(c, designations, dataSet, subS);
final int n = X.size();//NOTE, not the same as N. PAY ATENTION
if(X.size() < minClusterSize || means.size() == highK)
continue;//this loop with force it to exit when we hit max K
SimpleDataSet subSet = new SimpleDataSet(X);
//3. Run k-means on these two centers in X. Let c1, c2 be the child centers chosen by k-means
subC = kmeans.cluster(subSet, 2, parallel, subC);
List<Vec> subMean = kmeans.getMeans();
Vec c1 = subMean.get(0);
Vec c2 = subMean.get(1);
/* 4.
* Let v = c1 − c2 be a d-dimensional vector that connects the two
* centers. This is the direction that k-means believes to be
* important for clustering. Then project X onto v:
* x'_i = <x_i, v>/||v||^2. X' is a 1-dimensional representation of
* the data projected onto v. Transform X' so that it has mean 0 and
* variance 1.
*/
c1.copyTo(v);
v.mutableSubtract(c2);
double vNrmSqrd = Math.pow(v.pNorm(2), 2);
if(Double.isNaN(vNrmSqrd) || vNrmSqrd < 1e-6)
continue;//can happen when cluster is all the same item (or nearly so)
for(int i = 0; i < X.size(); i++)
xp[i] = X.get(i).getNumericalValues().dot(v)/vNrmSqrd;
//we need this in sorted order later, so lets just sort them now
Arrays.sort(xp, 0, X.size());
DenseVector Xp = new DenseVector(xp, 0, X.size());
Xp.mutableSubtract(Xp.mean());
Xp.mutableDivide(Math.max(Xp.standardDeviation(), 1e-6));
//5.
for(int i = 0; i < Xp.length(); i++)
Xp.set(i, Normal.cdf(Xp.get(i), 0, 1));
double A = 0;
for(int i = 1; i <= Xp.length(); i++)
{
double phi = Xp.get(i-1);
A += (2*i-1)*log(phi) + (2*(n-i)+1)*log(1-phi);
}
A/=-n;
A += -n;
//eq(2)
A *= 1 + 4.0/n - 25.0/(n*n);
if(A <= thresh)
{
if(trustH0)//if we are going to trust that H0 is true forever, mark it
dontRedo.set(c, true);
continue;//passed the test, do not split
}
//else, accept the split
//first, update assignment array. Cluster '0' stays as is, re-set cluster '1'
for(int i = 0; i < X.size(); i++)
if(subC[i] == 1)
designations[subS[i]] = means.size();
//replace current mean and add new one
means.set(c, c1.clone());//cur index in dontRedo stays false
means.add(c2.clone());//add a 'false' for new center
dontRedo.add(false);
}
//"Between each round of splitting, we run k-means on the entire dataset and all the centers to refine the current solution"
if(iterativeRefine && means.size() > 1)
kmeans.cluster(dataSet, accelCache, means.size(), means, designations, false, parallel, false, null);
}
while (origMeans < means.size());
if(!iterativeRefine && means.size() > 1)//if we havn't been refining we need to do so now!
kmeans.cluster(dataSet, accelCache, means.size(), means, designations, false, parallel, false, null);
return designations;
}
@Override
public int getIterationLimit()
{
return kmeans.getIterationLimit();
}
@Override
public void setIterationLimit(int iterLimit)
{
kmeans.setIterationLimit(iterLimit);
}
@Override
public void setSeedSelection(SeedSelectionMethods.SeedSelection seedSelection)
{
//XXX when called from constructor in superclass seed is ignored
if(kmeans != null)//needed when initing
kmeans.setSeedSelection(seedSelection);
}
@Override
public SeedSelectionMethods.SeedSelection getSeedSelection()
{
return kmeans.getSeedSelection();
}
@Override
protected double cluster(DataSet dataSet, List<Double> accelCache, int k, List<Vec> means, int[] assignment, boolean exactTotal, boolean threadpool, boolean returnError, Vec dataPointWeights)
{
return kmeans.cluster(dataSet, accelCache, k, means, assignment, exactTotal, threadpool, returnError, null);
}
@Override
public GMeans clone()
{
return new GMeans(this);
}
}
| 11,446 | 35.689103 | 195 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/HamerlyKMeans.java | package jsat.clustering.kmeans;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.LongAdder;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.clustering.SeedSelectionMethods;
import static jsat.clustering.SeedSelectionMethods.selectIntialPoints;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.AtomicDoubleArray;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* An efficient implementation of the K-Means algorithm. This implementation uses
* the triangle inequality to accelerate computation while maintaining the exact
* same solution. This requires that the {@link DistanceMetric} used
* support {@link DistanceMetric#isSubadditive() }. It uses only O(n) extra
* memory. <br>
* <br>
* See:
* <ul>
* <li>Hamerly, G. (2010). <i>Making k-means even faster</i>. SIAM International
* Conference on Data Mining (SDM) (pp. 130–140). Retrieved from
* <a href="http://72.32.205.185/proceedings/datamining/2010/dm10_012_hamerlyg.pdf">here</a></li>
* <li>Ryšavý, P., & Hamerly, G. (2016). Geometric methods to accelerate k-means
* algorithms. In Proceedings of the 2016 SIAM International Conference on Data
* Mining (pp. 324–332). Philadelphia, PA: Society for Industrial and Applied
* Mathematics.
* <a href="http://doi.org/10.1137/1.9781611974348.37">http://doi.org/10.1137/1.9781611974348.37</a></li>
* </ul>
*
* @author Edward Raff
*/
public class HamerlyKMeans extends KMeans
{
private static final long serialVersionUID = -4960453870335145091L;
/**
* Creates a new k-Means object
* @param dm the distance metric to use for clustering
* @param seedSelection the method of initial seed selection
* @param rand the source of randomnes to use
*/
public HamerlyKMeans(DistanceMetric dm, SeedSelectionMethods.SeedSelection seedSelection, Random rand)
{
super(dm, seedSelection, rand);
}
/**
* Creates a new k-Means object
* @param dm the distance metric to use for clustering
* @param seedSelection the method of initial seed selection
*/
public HamerlyKMeans(DistanceMetric dm, SeedSelectionMethods.SeedSelection seedSelection)
{
this(dm, seedSelection, RandomUtil.getRandom());
}
/**
* Creates a new k-Means object
*/
public HamerlyKMeans()
{
this(new EuclideanDistance(), SeedSelectionMethods.SeedSelection.KPP);
}
public HamerlyKMeans(HamerlyKMeans toCopy)
{
super(toCopy);
}
//TODO reduce some code duplication in the methods bellow
@Override
protected double cluster(final DataSet dataSet, List<Double> accelCache, final int k, final List<Vec> means, final int[] assignment, final boolean exactTotal, boolean parallel, boolean returnError, Vec dataPointWeights)
{
final int N = dataSet.size();
final int D = dataSet.getNumNumericalVars();
TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel);
/**
* Weights for each data point
*/
final Vec W;
if (dataPointWeights == null)
W = dataSet.getDataWeights();
else
W = dataPointWeights;
final List<Vec> X = dataSet.getDataVectors();
final List<Double> distAccel;//used like htis b/c we want it final for convinence, but input may be null
if(accelCache == null)
distAccel = dm.getAccelerationCache(X, parallel);
else
distAccel = accelCache;
final List<List<Double>> meanQI = new ArrayList<>(k);
if (means.size() != k)
{
means.clear();
means.addAll(selectIntialPoints(dataSet, k, dm, distAccel, rand, seedSelection, parallel));
}
/**
* Used in bound updates. Contains the centroid means from the previous iteration
*/
final Vec[] oldMeans = new Vec[means.size()];
/**
* Distance each mean has moved from one iteration to the next
*/
final double[] distanceMoved = new double[means.size()];
//Make our means dense
for (int i = 0; i < means.size(); i++)
{
if (means.get(i).isSparse())
means.set(i, new DenseVector(means.get(i)));
oldMeans[i] = new DenseVector(means.get(i));
}
/**
* vector sum of all points in cluster j <br>
* denoted c'(j)
*/
final Vec[] cP = new Vec[k];
/**
* Will get intialized in the Initialize function
*/
final Vec[] tmpVecs = new Vec[k];
final Vec[] tmpVecs2 = new Vec[k];
for(int i = 0; i < tmpVecs2.length; i++)
tmpVecs2[i] = new DenseVector(oldMeans[0].length());
/**
* weighted number of points assigned to cluster j,<br>
* denoted q(j)
*/
final AtomicDoubleArray q = new AtomicDoubleArray(k);
/**
* distance that c(j) last moved <br>
* denoted p(j)
*/
final double[] p = new double[k];
/**
* distance from c(j) to its closest other center.<br>
* denoted s(j)
*/
final double[] s = new double[k];
//index of the center to which x(i) is assigned
//use assignment array
/**
* upper bound on the distance between x(i) and its assigned center c(a(i)) <br>
* denoted u(i)
*/
final double[] u = new double[N];
/**
* lower bound on the distance between x(i) and its second closest
* center – that is, the closest center to x(i) that is not c(a(i)) <br>
* denoted l(i)
*/
final double[] l = new double[N];
final List<Vec[]> allLocalDeltas = Collections.synchronizedList(new ArrayList<>());
final ThreadLocal<Vec[]> localDeltas = ThreadLocal.withInitial(()->
{
Vec[] toRet = new Vec[means.size()];
for(int i = 0; i < k; i++)
toRet[i] = new DenseVector(D);
allLocalDeltas.add(toRet);
return toRet;
});
//Start of algo
Initialize(dataSet, q, means, tmpVecs, cP, u, l, assignment, parallel, localDeltas, X, distAccel, meanQI, W);
//Use dense mean objects
for(int i = 0; i < means.size(); i++)
if(means.get(i).isSparse())
means.set(i, new DenseVector(means.get(i)));
int updates = N;
/**
* How many iterations over the dataset did this take?
*/
int iteration = 0;
while(updates > 0)
{
moveCenters(means, oldMeans, tmpVecs, cP, q, p, meanQI);
updates = 0;
updateS(s, distanceMoved, means, oldMeans, parallel, meanQI);
/**
* we maintain m(ci), which is the radius of a hypersphere centered
* at centroid ci that contains all points assigned to ci. (Note
* that m(ci) is easily obtained as the maximum upper-bound of all
* points in the cluster.)
*/
double[] m = new double[means.size()];
Arrays.fill(m, 0.0);
for(int i = 0; i < N; i++)
m[assignment[i]] = Math.max(m[assignment[i]], u[i]);
double[] updateB = new double[m.length];
//Algorithm 3, new bounds update scheme. See "Geometric methods to accelerate k-means algorithms"
EnhancedUpdateBounds(means, distanceMoved, m, s, oldMeans, tmpVecs, tmpVecs2, updateB, p, assignment, u, l);
//perform all updates
updates = ParallelUtils.run(parallel, N, (i) ->
{
Vec[] deltas = localDeltas.get();
return mainLoopWork(dataSet, i, s, assignment, u, l, q, deltas, X, distAccel, means, meanQI, W);
}, (a, b) -> a + b);
//acumulate all deltas
ParallelUtils.range(cP.length, parallel).forEach(i ->
{
for (Vec[] deltas : allLocalDeltas)
{
cP[i].mutableAdd(deltas[i]);
deltas[i].zeroOut();
}
});
iteration++;
}
if (returnError)
{
if (saveCentroidDistance)
nearestCentroidDist = new double[N];
else
nearestCentroidDist = null;
double totalDistance = ParallelUtils.run(parallel, N, (start, end) ->
{
double localDistTotal = 0;
for(int i = start; i < end; i++)
{
double dist;
if(exactTotal)
dist = dm.dist(i, means.get(assignment[i]), meanQI.get(assignment[i]), X, distAccel);
else
dist = u[i];
localDistTotal += Math.pow(dist, 2);
if (saveCentroidDistance)
nearestCentroidDist[i] = dist;
}
return localDistTotal;
},
(a,b)->a+b);
return totalDistance;
}
else
return 0;//who cares
}
private void EnhancedUpdateBounds(final List<Vec> means1, final double[] distanceMoved, double[] m, final double[] s, final Vec[] oldMeans, final Vec[] tmpVecs, final Vec[] tmpVecs2, double[] updateB, final double[] p, final int[] assignment, final double[] u, final double[] l)
{
//NOTE: special here c'j eans the new cluster location. Only for algorithm 3 update. Rest of code uses different notation
//Paper uses current and next, but we are coding current and previous
//so in the paper's termonology, c'j would be the current mean, and cj the previous
for (int i = 0; i < means1.size(); i++)
{
//3: update←−∞
double update = Double.NEGATIVE_INFINITY;
//4: for each cj in centroids that fulfill (3.10) in decreasing order of ||c'j −cj|| do.
IndexTable c_order = new IndexTable(distanceMoved);
c_order.reverse();//we want decreasing order
for (int order = 0; order < c_order.length(); order++)
{
int j = c_order.index(order);
if(j == i)
continue;
//check (3.10)
if(2*m[j] + s[j] < distanceMoved[j] )//paper uses s(c_i) for half the distance, but our code uses it for the full distance
continue;//you didn't satisfy (3.10)
//5: if ||c'j −cj|| ≤ update then break
if(distanceMoved[j] <= update)
break;
//6: update ← max{update calculated by Algorithm 2 using ci and cj, update}
double algo2_1_out;
//begin Algorithm 2 Algorithm for update of l(x, cj) in the multidimensional case, where c(x) = ci
//3: t← eq (3.6)
oldMeans[i].copyTo(tmpVecs[i]);
means1.get(j).copyTo(tmpVecs2[i]);
tmpVecs[i].mutableSubtract(oldMeans[j]);//tmpVec[i] = (ci - cj)
tmpVecs2[i].mutableSubtract(oldMeans[j]);//tmpVec2[i] = (c'j - cj)
double t = tmpVecs[i].dot(tmpVecs2[i])/(distanceMoved[j]*distanceMoved[j]);
//4: dist←||cj + t · (c'j −cj)−ci||
//can be re-arragned as ||cj −ci + t · (c'j −cj)||
// = || (cj −ci) + t · (c'j −cj)||
// = || -(ci - cj) + t · (c'j −cj)||
// double dist = oldMeans[j].add(means.get(j).subtract(oldMeans[j]).multiply(t)).subtract(oldMeans[i]).pNorm(2);
tmpVecs[i].mutableMultiply(-1);
tmpVecs[i].mutableAdd(t, tmpVecs2[i]);
double dist = tmpVecs2[i].pNorm(2);
//5: cix ← (3.7)
double c_ix = dist*2/distanceMoved[j];
//6: ciy ←1−2t
double c_iy = 1 - 2 * t;
//7: r ← (3.9)
double r = m[i]*2/distanceMoved[j];
//8: return update calculated by Algorithm 1 (using r and ci = (cix, ciy)) multiplied by ||c'j−cj||/2
//Algorithm 1 Algorithm for update of l(x, cj) in the simplified two dimensional case, where c(x) = ci.
//3: if cix ≤ r then return max{0,min{2, 2(r − ciy)}}
if(c_ix <= r)
algo2_1_out = Math.max(0, Math.min(2, 2*(r-c_iy)));
else
{
//4: if ciy > r then
if(c_iy > r)
c_iy--;//5:ciy ←ciy −1
//7: return eq (3.2)
double proj_norm_sqrd = Math.sqrt(c_ix * c_ix + c_iy * c_iy);
proj_norm_sqrd *= proj_norm_sqrd;
algo2_1_out = 2*(c_ix*r-c_iy*Math.sqrt(proj_norm_sqrd-r*r))/proj_norm_sqrd;
}
//end Algorithm 1
algo2_1_out *= distanceMoved[j]/2;
//end Algorithm 2
update = Math.max(algo2_1_out, update);
}
updateB[i] = update;
}
//"The appropriate place to calculate the maximum upper bound is before any centroids move", ok we can do upper bound now
UpdateBounds(p, assignment, u, l, updateB);
}
/**
*
* @param dataSet data set
* @param i the index to do the work for
* @param s the centroid centroid nearest distance
* @param assignment the array assignments are stored in
* @param u the "u" array of the algo
* @param l the "l" array of the algo
* @param q the "q" array of the algo (cluster counts)
* @param deltas the location to store the computed delta if one occurs
* @return 0 if no changes in assignment were made, 1 if a change in assignment was made
*/
private int mainLoopWork(DataSet dataSet, int i, double[] s, int[] assignment, double[] u,
double[] l, AtomicDoubleArray q, Vec[] deltas, final List<Vec> X, final List<Double> distAccel, final List<Vec> means, final List<List<Double>> meanQI, final Vec W)
{
final int a_i = assignment[i];
double m = Math.max(s[a_i] / 2, l[i]);
if (u[i] > m)//first bound test
{
Vec x = X.get(i);
u[i] = dm.dist(i, means.get(a_i), meanQI.get(a_i), X, distAccel);//tighten upper bound
if (u[i] > m) //second bound test
{
final int new_a_i = PointAllCtrs(x, i, means, assignment, u, l, X, distAccel, meanQI);
if (a_i != new_a_i)
{
double w = W.get(i);
q.addAndGet(a_i, -w);
q.addAndGet(new_a_i, w);
deltas[a_i].mutableSubtract(w, x);
deltas[new_a_i].mutableAdd(w, x);
return 1;//1 change in ownership
}
}
}
return 0;//no change
}
/**
*
* @param s updated by this method call
* @param distanceMoved distance each cluster has moved from the previous locations. Updated by this method call.
* @param means the new cluster means
* @param oldMeans the old cluster means
* @param parallel
* @param meanQIs
*/
private void updateS(final double[] s, final double[] distanceMoved, final List<Vec> means, final Vec[] oldMeans, final boolean parallel, final List<List<Double>> meanQIs)
{
Arrays.fill(s, Double.MAX_VALUE);
//TODO temp object for puting all the query info into a cache, should probably be cleaned up - or change original code to have one massive list and then use sub lits to get the QIs individualy
final DoubleList meanCache = meanQIs.get(0).isEmpty() ? null : new DoubleList(meanQIs.size());
if (meanCache != null)
for (List<Double> qi : meanQIs)
meanCache.addAll(qi);
final ThreadLocal<double[]> localS = ThreadLocal.withInitial(()->new double[s.length]);
ParallelUtils.run(parallel, means.size(), (j)->
{
double[] sTmp = localS.get();
Arrays.fill(sTmp, Double.POSITIVE_INFINITY);
distanceMoved[j] = dm.dist(oldMeans[j], means.get(j));
double tmp;
for (int jp = j + 1; jp < means.size(); jp++)
{
tmp = dm.dist(j, jp, means, meanCache);
sTmp[j] = Math.min(sTmp[j], tmp);
sTmp[jp] = Math.min(sTmp[jp], tmp);
}
synchronized(s)
{
for(int i = 0; i < s.length; i++)
s[i] = Math.min(s[i], sTmp[i]);
}
});
}
private void Initialize(final DataSet d, final AtomicDoubleArray q, final List<Vec> means, final Vec[] tmp, final Vec[] cP, final double[] u, final double[] l, final int[] a, boolean parallel, final ThreadLocal<Vec[]> localDeltas, final List<Vec> X, final List<Double> distAccel, final List<List<Double>> meanQI, final Vec W)
{
for(int j = 0; j < means.size(); j++)
{
//q would already be initalized to zero on creation by java
cP[j] = new DenseVector(means.get(0).length());
tmp[j] = cP[j].clone();
//set up Quer Info for means
if(dm.supportsAcceleration())
meanQI.add(dm.getQueryInfo(means.get(j)));
else
meanQI.add(Collections.EMPTY_LIST);
}
ParallelUtils.run(parallel, u.length, (start, end) ->
{
Vec[] deltas = localDeltas.get();
for (int i = start; i < end; i++)
{
Vec x = X.get(i);
int j = PointAllCtrs(x, i, means, a, u, l, X, distAccel, meanQI);
double w = W.get(i);
q.addAndGet(j, w);
deltas[j].mutableAdd(w, x);
}
for(int i = 0; i < cP.length; i++)
{
synchronized(cP[i])
{
cP[i].mutableAdd(deltas[i]);
}
deltas[i].zeroOut();
}
});
}
/**
*
* @param x
* @param i
* @param means
* @param a
* @param u
* @param l
* @return the index of the closest cluster center
*/
private int PointAllCtrs(Vec x, int i, List<Vec> means, int[] a, double[] u, double[] l, final List<Vec> X, final List<Double> distAccel, final List<List<Double>> meanQI)
{
double secondLowest = Double.POSITIVE_INFINITY;
int slIndex = -1;
double lowest = Double.MAX_VALUE;
int lIndex = -1;
for(int j = 0; j < means.size(); j++)
{
double dist = dm.dist(i, means.get(j), meanQI.get(j), X, distAccel);
if(dist < secondLowest)
{
if(dist < lowest)
{
secondLowest = lowest;
slIndex = lIndex;
lowest = dist;
lIndex = j;
}
else
{
secondLowest = dist;
slIndex = j;
}
}
}
a[i] = lIndex;
u[i] = lowest;
l[i] = secondLowest;
return lIndex;
}
private void moveCenters(List<Vec> means, Vec[] oldMeans, Vec[] tmpSpace, Vec[] cP, AtomicDoubleArray q, double[] p, final List<List<Double>> meanQI)
{
for(int j = 0; j < means.size(); j++)
{
double count = q.get(j);
//save old mean
means.get(j).copyTo(oldMeans[j]);
if(count > 0)
{
//compute new mean
cP[j].copyTo(tmpSpace[j]);
tmpSpace[j].mutableDivide(count);
}
else
{
cP[j].zeroOut();
tmpSpace[j].zeroOut();
}
//compute distance betwean new and old
p[j] = dm.dist(means.get(j), tmpSpace[j]);
//move it to its positaiotn as new mean
tmpSpace[j].copyTo(means.get(j));
//update QI
if(dm.supportsAcceleration())
meanQI.set(j, dm.getQueryInfo(means.get(j)));
}
}
/**
*
* @param p distance that c(j) last moved, denoted p(j)
* @param a
* @param u
* @param l
*/
private void UpdateBounds(double[] p, int[] a, double[] u, double[] l, double[] updateB)
{
double secondHighest = Double.NEGATIVE_INFINITY;
int shIndex = -1;
double highest = -Double.MAX_VALUE;
int hIndex = -1;
//find argmax values
for(int j = 0; j < p.length; j++)
{
double dist = p[j];
if(dist > secondHighest)
{
if(dist > highest)
{
secondHighest = highest;
shIndex = hIndex;
highest = dist;
hIndex = j;
}
else
{
secondHighest = dist;
shIndex = j;
}
}
}
final int r = hIndex;
final int rP = shIndex;
for(int i = 0; i < u.length; i++)
{
final int j = a[i];
u[i] += p[j];
if(r == j)
l[i] -= Math.min(p[rP], updateB[j]);
else
l[i] -= Math.min(p[r], updateB[j]);
}
}
@Override
public HamerlyKMeans clone()
{
return new HamerlyKMeans(this);
}
}
| 22,551 | 37.223729 | 329 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/KMeans.java |
package jsat.clustering.kmeans;
import java.util.*;
import java.util.concurrent.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.ClusterFailureException;
import jsat.clustering.KClustererBase;
import jsat.clustering.PAM;
import jsat.clustering.SeedSelectionMethods;
import jsat.clustering.SeedSelectionMethods.SeedSelection;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.math.OnLineStatistics;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.*;
import jsat.utils.SystemInfo;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.random.RandomUtil;
/**
* Base class for the numerous implementations of k-means that exist. This base
* class provides an slow heuristic approach to the selection of k.
*
* @author Edward Raff
*/
public abstract class KMeans extends KClustererBase implements Parameterized
{
private static final long serialVersionUID = 8730927112084289722L;
/**
* This is the default seed selection method used in ElkanKMeans. When used
* with the {@link EuclideanDistance}, it selects seeds that are log optimal
* with a high probability.
*/
public static final SeedSelectionMethods.SeedSelection DEFAULT_SEED_SELECTION = SeedSelectionMethods.SeedSelection.KPP;
@ParameterHolder
protected DistanceMetric dm;
protected SeedSelectionMethods.SeedSelection seedSelection;
protected Random rand;
/**
* Indicates whether or not the means from the clustering should be saved
*/
protected boolean storeMeans = true;
/**
* Indicates whether or not the distance between a datapoint and its nearest
* centroid should be saved after clustering. This only applies when the
* error of the model is requested
*/
protected boolean saveCentroidDistance = true;
/**
* Distance from a datapoint to its nearest centroid. May be an approximate
* distance
*/
protected double[] nearestCentroidDist;
/**
* The list of means
*/
protected List<Vec> means;
/**
* Control the maximum number of iterations to perform.
*/
protected int MaxIterLimit = Integer.MAX_VALUE;
public KMeans(DistanceMetric dm, SeedSelectionMethods.SeedSelection seedSelection, Random rand)
{
this.dm = dm;
setSeedSelection(seedSelection);
this.rand = rand;
}
/**
* Copy constructor
* @param toCopy
*/
public KMeans(KMeans toCopy)
{
this.dm = toCopy.dm.clone();
this.seedSelection = toCopy.seedSelection;
this.rand = RandomUtil.getRandom();
if (toCopy.nearestCentroidDist != null)
this.nearestCentroidDist = Arrays.copyOf(toCopy.nearestCentroidDist, toCopy.nearestCentroidDist.length);
if (toCopy.means != null)
{
this.means = new ArrayList<>(toCopy.means.size());
for (Vec v : toCopy.means)
this.means.add(v.clone());
}
}
/**
* Sets the maximum number of iterations allowed
* @param iterLimit the maximum number of iterations of the ElkanKMeans algorithm
*/
public void setIterationLimit(int iterLimit)
{
if(iterLimit < 1)
throw new IllegalArgumentException("Iterations must be a positive value, not " + iterLimit);
this.MaxIterLimit = iterLimit;
}
/**
* Returns the maximum number of iterations of the ElkanKMeans algorithm that will be performed.
* @return the maximum number of iterations of the ElkanKMeans algorithm that will be performed.
*/
public int getIterationLimit()
{
return MaxIterLimit;
}
/**
* If set to {@code true} the computed means will be stored after clustering
* is completed, and can then be retrieved using {@link #getMeans() }.
* @param storeMeans {@code true} if the means should be stored for later,
* {@code false} to discard them once clustering is complete.
*/
public void setStoreMeans(boolean storeMeans)
{
this.storeMeans = storeMeans;
}
/**
* Returns the raw list of means that were used for each class.
* @return the list of means for each class
*/
public List<Vec> getMeans()
{
return means;
}
/**
* Sets the method of seed selection to use for this algorithm. {@link SeedSelection#KPP} is recommended for this algorithm in particular.
* @param seedSelection the method of seed selection to use
*/
public void setSeedSelection(SeedSelectionMethods.SeedSelection seedSelection)
{
this.seedSelection = seedSelection;
}
/**
*
* @return the method of seed selection used
*/
public SeedSelectionMethods.SeedSelection getSeedSelection()
{
return seedSelection;
}
/**
* Returns the distance metric in use
* @return the distance metric in use
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* This is a helper method where the actual cluster is performed. This is because there
* are multiple strategies for modifying kmeans, but all of them require this step.
* <br>
* The distance metric used is trained if needed
*
* @param dataSet The set of data points to perform clustering on
* @param accelCache acceleration cache to use, or {@code null}. If
* {@code null}, the kmeans code will attempt to create one
* @param k the number of clusters
* @param means the initial points to use as the means. Its length is the
* number of means that will be searched for. These means will be altered,
* and should contain deep copies of the points they were drawn from. May be
* empty, in which case the list will be filled with some selected means
* @param assignment an empty temp space to store the clustering
* classifications. Should be the same length as the number of data points
* @param exactTotal determines how the objective function (return value)
* will be computed. If true, extra work will be done to compute the exact
* distance from each data point to its cluster. If false, an upper bound
* approximation will be used. This also impacts the value stored in
* {@link #nearestCentroidDist}
* @param parallel the source of threads for parallel computation. If
* <tt>null</tt>, single threaded execution will occur
* @param returnError {@code true} is the sum of squared distances should be
* returned. {@code false} means any value can be returned.
* {@link #saveCentroidDistance} only applies if this is {@code true}
* @param dataPointWeights the weight value to use for each data point. If
* <tt>null</tt>, assume each point has equal weight.
* @return the double
*/
abstract protected double cluster(final DataSet dataSet, List<Double> accelCache, final int k, final List<Vec> means, final int[] assignment, boolean exactTotal, boolean parallel, boolean returnError, Vec dataPointWeights);
static protected List<List<DataPoint>> getListOfLists(int k)
{
List<List<DataPoint>> ks = new ArrayList<>(k);
for(int i = 0; i < k; i++)
ks.add(new ArrayList<>());
return ks;
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()/2), designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, (int)Math.sqrt(dataSet.size()/2), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
if(dataSet.size() < clusters)
throw new ClusterFailureException("Fewer data points then desired clusters, decrease cluster size");
means = new ArrayList<>(clusters);
cluster(dataSet, null, clusters, means, designations, false, parallel, false, null);
if(!storeMeans)
means = null;
return designations;
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
if(dataSet.size() < highK)
throw new ClusterFailureException("Fewer data points then desired clusters, decrease cluster size");
if(designations == null)
designations = new int[dataSet.size()];
double[] totDistances = new double[highK-lowK+1];
List<Double> cache = dm.getAccelerationCache(dataSet.getDataVectors(), parallel);
for(int k = lowK; k <= highK; k++)
{
totDistances[k-lowK] = cluster(dataSet, cache, k, new ArrayList<>(), designations, true, parallel, true, null);
}
return findK(lowK, highK, totDistances, dataSet, designations);
}
private int[] findK(int lowK, int highK, double[] totDistances, DataSet dataSet, int[] designations)
{
//Now we process the distance changes
/**
* Keep track of the changes
*/
OnLineStatistics stats = new OnLineStatistics();
double maxChange = Double.MIN_VALUE;
int maxChangeK = lowK;
for(int i = lowK; i <= highK; i++)
{
double totDist = totDistances[i-lowK];
if(i > lowK)
{
double change = Math.abs(totDist-totDistances[i-lowK-1]);
stats.add(change);
if(change > maxChange)
{
maxChange = change;
maxChangeK = i;
}
}
}
double changeMean = stats.getMean();
double changeDev = stats.getStandardDeviation();
//If we havent had any huge drops in total distance, assume that there are onlu to clusts
if(maxChange < changeDev*2+changeMean)
maxChangeK = lowK;
else
{
double tmp;
for(int i = 1; i < totDistances.length; i++)
{
if( (tmp = Math.abs(totDistances[i]-totDistances[i-1])) < maxChange )
{
maxChange = tmp;
maxChangeK = i+lowK;
break;
}
}
}
return cluster(dataSet, maxChangeK, designations);
}
@Override
abstract public KMeans clone();
@Override
public boolean supportsWeightedData()
{
return true;
}
}
| 10,920 | 33.891374 | 227 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/KMeansPDN.java | package jsat.clustering.kmeans;
import java.util.*;
import jsat.DataSet;
import jsat.linear.Vec;
/**
* This class provides a method of performing {@link KMeans} clustering when the
* value of {@code K} is not known. It works by incrementing the value
* of {@code k} up to some specified maximum, and running a full KMeans for each
* value. <br>
* <br>
* Note, by default this implementation uses a heuristic for the max value of
* {@code K} that is capped at 100 when using the
* {@link #cluster(jsat.DataSet) } type methods. <br>
* <br>
* When the value of {@code K} is specified, the implementation will simply call
* the regular KMeans object it was constructed with.
*
* See: Pham, D. T., Dimov, S. S.,&Nguyen, C. D. (2005). <i>Selection of K in
* K-means clustering</i>. Proceedings of the Institution of Mechanical
* Engineers, Part C: Journal of Mechanical Engineering Science, 219(1),
* 103–119. doi:10.1243/095440605X8298
*
* @author Edward Raff
*/
public class KMeansPDN extends KMeans
{
private static final long serialVersionUID = -2358377567814606959L;
private KMeans kmeans;
private double[] fKs;
/**
* Creates a new clusterer.
*/
public KMeansPDN()
{
this(new HamerlyKMeans());
}
/**
* Creates a new clustered that uses the specified object to perform clustering for all {@code k}.
* @param kmeans the k-means object to use for clustering
*/
public KMeansPDN(KMeans kmeans)
{
super(kmeans.dm, kmeans.seedSelection, kmeans.rand);
this.kmeans = kmeans;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public KMeansPDN(KMeansPDN toCopy)
{
super(toCopy);
this.kmeans = toCopy.kmeans.clone();
if(toCopy.fKs != null)
this.fKs = Arrays.copyOf(toCopy.fKs, toCopy.fKs.length);
}
/**
* Returns the array of {@code f(K)} values generated for the last data set.
* The value at index {@code i} is the score for cluster {@code i+1}.
* Smaller values indicate better clusterings.
*
* @return the array of {@code f(K)} values, or {@code null} if no data set
* has been clustered
*/
public double[] getfKs()
{
return fKs;
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 1, (int) Math.min(Math.max(Math.sqrt(dataSet.size()), 10), 100), parallel, designations);
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
if(highK == lowK)
return cluster(dataSet, lowK, parallel, designations);
else if(highK < lowK)
throw new IllegalArgumentException("low value of k (" + lowK + ") must be higher than the high value of k(" + highK + ")");
final int N = dataSet.size();
final int D = dataSet.getNumNumericalVars();
fKs = new double[highK-1];//we HAVE to start from k=2
fKs[0] = 1.0;//see eq(2)
int[] bestCluster = new int[N];
double minFk = lowK == 1 ? 1.0 : Double.POSITIVE_INFINITY;//If our low k is > 1, force the check later to kick in at the first candidate k by making fK appear Inf
if(designations == null || designations.length < N)
designations = new int[N];
double alphaKprev = 0, S_k_prev = 0;
//re used every iteration
List<Vec> curMeans = new ArrayList<>(highK);
means = new ArrayList<>();//the best set of means
//pre-compute cache instead of re-computing every time
List<Double> accelCache = dm.getAccelerationCache(dataSet.getDataVectors(), parallel);
for(int k = 2; k < highK; k++)
{
curMeans.clear();
//kmeans objective function result is the same as S_k
double S_k = cluster(dataSet, accelCache, k, curMeans, designations, true, parallel, true, null);//TODO could add a flag to make approximate S_k an option. Though it dosn't seem to work great on toy problems, might be fine on more realistic data
double alpha_k;
if(k == 2)
alpha_k = 1 - 3.0/(4*D); //eq(3a)
else
alpha_k = alphaKprev + (1-alphaKprev)/6;//eq(3b)
double fK;//eq(2)
if(S_k_prev == 0)
fKs[k-1] = fK = 1;
else
fKs[k-1] = fK = S_k/(alpha_k*S_k_prev);
alphaKprev = alpha_k;
S_k_prev = S_k;
if(k >= lowK && minFk > fK)
{
System.arraycopy(designations, 0, bestCluster, 0, N);
minFk = fK;
means.clear();
for(Vec mean : curMeans)
means.add(mean.clone());
}
}
//contract is we return designations with the data in it if we can, so copy the values back
System.arraycopy(bestCluster, 0, designations, 0, N);
return designations;
}
@Override
protected double cluster(DataSet dataSet, List<Double> accelCache, int k, List<Vec> means, int[] assignment, boolean exactTotal, boolean threadpool, boolean returnError, Vec dataPointWeights)
{
return kmeans.cluster(dataSet, accelCache, k, means, assignment, exactTotal, threadpool, returnError, null);
}
@Override
public KMeansPDN clone()
{
return new KMeansPDN(this);
}
}
| 5,608 | 34.27673 | 257 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/KernelKMeans.java |
package jsat.clustering.kmeans;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.clustering.KClustererBase;
import jsat.distributions.kernels.KernelTrick;
import jsat.linear.ConstantVector;
import jsat.linear.Vec;
import jsat.parameters.Parameter;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.Parameterized;
import jsat.utils.DoubleList;
import jsat.utils.ListUtils;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* Base class for various Kernel K Means implementations. Because the Kernelized
* version is more computationally expensive, only the clustering methods where
* the number of clusters is specified apriori are supported. <br>
* <br>
* KernelKMeans keeps a reference to the data passed in for clustering so that
* queries can be conveniently answered, such as getting
* {@link #findClosestCluster(jsat.linear.Vec) the closest cluster} or finding
* the {@link #meanToMeanDistance(int, int) distance between means}
*
* @author Edward Raff
*/
public abstract class KernelKMeans extends KClustererBase implements Parameterized
{
private static final long serialVersionUID = -5294680202634779440L;
/**
* The kernel trick to use
*/
@ParameterHolder
protected KernelTrick kernel;
/**
* The list of data points that this was trained on
*/
protected List<Vec> X;
/**
* The weight of each data point
*/
protected Vec W;
/**
* THe acceleration cache for the kernel
*/
protected List<Double> accel;
/**
* The value of k(x,x) for every point in {@link #X}
*/
protected double[] selfK;
/**
* The value of the un-normalized squared norm for each mean
*/
protected double[] meanSqrdNorms;
/**
* The normalizing constant for each mean. General this would be
* 1/owned[k]<sup>2</sup>
*/
protected double[] normConsts;
/**
* The weighted number of dataums owned by each mean
*/
protected double[] ownes;
/**
* A temporary space for updating ownership designations for each datapoint.
* When done, this will store the final designations for each point
*/
protected int[] newDesignations;
protected int maximumIterations = Integer.MAX_VALUE;
/**
*
* @param kernel the kernel to use
*/
public KernelKMeans(KernelTrick kernel)
{
this.kernel = kernel;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public KernelKMeans(KernelKMeans toCopy)
{
this.kernel = toCopy.kernel.clone();
this.maximumIterations = toCopy.maximumIterations;
if(toCopy.X != null)
{
this.X = new ArrayList<>(toCopy.X.size());
for( Vec v : toCopy.X)
this.X.add(v.clone());
}
if(toCopy.accel != null)
this.accel = new DoubleList(toCopy.accel);
if(toCopy.selfK != null)
this.selfK = Arrays.copyOf(toCopy.selfK, toCopy.selfK.length);
if(toCopy.meanSqrdNorms != null)
this.meanSqrdNorms = Arrays.copyOf(toCopy.meanSqrdNorms, toCopy.meanSqrdNorms.length);
if(toCopy.normConsts != null)
this.normConsts = Arrays.copyOf(toCopy.normConsts, toCopy.normConsts.length);
if(toCopy.ownes != null)
this.ownes = Arrays.copyOf(toCopy.ownes, toCopy.ownes.length);
if(toCopy.newDesignations != null)
this.newDesignations = Arrays.copyOf(toCopy.newDesignations, toCopy.newDesignations.length);
if(toCopy.W != null)
this.W = toCopy.W.clone();
}
/**
* Sets the maximum number of iterations allowed
* @param iterLimit the maximum number of iterations of the KMeans algorithm
*/
public void setMaximumIterations(int iterLimit)
{
if(iterLimit <= 0)
throw new IllegalArgumentException("iterations must be a positive value, not " + iterLimit);
this.maximumIterations = iterLimit;
}
/**
* Returns the maximum number of iterations of the KMeans algorithm that will be performed.
* @return the maximum number of iterations of the KMeans algorithm that will be performed.
*/
public int getMaximumIterations()
{
return maximumIterations;
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
throw new UnsupportedOperationException("Not supported.");
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
throw new UnsupportedOperationException("Not supported.");
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
throw new UnsupportedOperationException("Not supported.");
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, int[] designations)
{
throw new UnsupportedOperationException("Not supported.");
}
/**
* Computes the kernel sum of data point {@code i} against all the points in
* cluster group {@code clusterID}.
* @param i the index of the data point to query for
* @param clusterID the cluster index to get the sum of kernel products
* @param d
* @return the sum <big>Σ</big>k(x<sub>i</sub>, x<sub>j</sub>), ∀ j, d[<i>j</i>] == <i>clusterID</i>
*/
protected double evalSumK(int i, int clusterID, int[] d)
{
double sum = 0;
for(int j = 0; j < X.size(); j++)
if(d[j] == clusterID)
sum += W.get(j) * kernel.eval(i, j, X, accel);
return sum;
}
/**
* Computes the kernel sum of the given data point against all the points in
* cluster group {@code clusterID}.
* @param x the data point to get the kernel sum of
* @param qi the query information for the given data point generated from the kernel in use. See {@link KernelTrick#getQueryInfo(jsat.linear.Vec) }
* @param clusterID the cluster index to get the sum of kernel products
* @param d the array of cluster assignments
* @return the sum <big>Σ</big>k(x<sub>i</sub>, x<sub>j</sub>), ∀ j, d[<i>j</i>] == <i>clusterID</i>
*/
protected double evalSumK(Vec x, List<Double> qi, int clusterID, int[] d)
{
double sum = 0;
for(int j = 0; j < X.size(); j++)
if(d[j] == clusterID)
sum += W.get(j) * kernel.eval(j, x, qi, X, accel);
return sum;
}
/**
* Sets up the internal structure for KenrelKMeans. Should be called first before any work is done
* @param K the number of clusters to find
* @param designations the initial designations array to fill with values
* @param W the weight for each individual data point
*/
protected void setup(int K, int[] designations, Vec W)
{
accel = kernel.getAccelerationCache(X);
final int N = X.size();
selfK = new double[N];
for(int i = 0; i < selfK.length; i++)
selfK[i] = kernel.eval(i, i, X, accel);
ownes = new double[K];
meanSqrdNorms = new double[K];
newDesignations = new int[N];
if(W == null)
this.W = new ConstantVector(1.0, N);
else
this.W = W;
Random rand = RandomUtil.getRandom();
for (int i = 0; i < N; i++)
{
int to = rand.nextInt(K);
ownes[to] += this.W.get(i);
newDesignations[i] = designations[i] = to;
}
normConsts = new double[K];
updateNormConsts();
for (int i = 0; i < N; i++)
{
int i_k = designations[i];
final double w_i = this.W.get(i);
meanSqrdNorms[i_k] += w_i * selfK[i];
for (int j = i + 1; j < N; j++)
if (i_k == designations[j])
meanSqrdNorms[i_k] += 2 * w_i * this.W.get(j) * kernel.eval(i, j, X, accel);
}
}
/**
* Updates the normalizing constants for each mean. Should be called after
* every change in ownership
*/
protected void updateNormConsts()
{
for(int i = 0; i < normConsts.length; i++)
normConsts[i] = 1.0/(ownes[i]*ownes[i]);
}
/**
* Computes the distance between one data point and a specified mean
* @param i the data point to get the distance for
* @param k the mean index to get the distance to
* @param designations the array if ownership designations for each cluster to use
* @return the distance between data point {@link #X x}<sub>i</sub> and mean {@code k}
*/
protected double distance(int i, int k, int[] designations)
{
return Math.sqrt(Math.max(selfK[i] - 2.0/ownes[k] * evalSumK(i, k, designations) + meanSqrdNorms[k]*normConsts[k], 0));
}
/**
* Returns the distance between the given data point and the the specified cluster
* @param x the data point to get the distance for
* @param k the cluster id to get the distance to
* @return the distance between the given data point and the specified cluster
*/
public double distance(Vec x, int k)
{
return distance(x, kernel.getQueryInfo(x), k);
}
/**
* Returns the distance between the given data point and the the specified cluster
* @param x the data point to get the distance for
* @param qi the query information for the given data point generated for the kernel in use. See {@link KernelTrick#getQueryInfo(jsat.linear.Vec) }
* @param k the cluster id to get the distance to
* @return the distance between the given data point and the specified cluster
*/
public double distance(Vec x, List<Double> qi, int k)
{
if(k >= meanSqrdNorms.length || k < 0)
throw new IndexOutOfBoundsException("Only " + meanSqrdNorms.length + " clusters. " + k + " is not a valid index");
return Math.sqrt(Math.max(kernel.eval(0, 0, Arrays.asList(x), qi) - 2.0/ownes[k] * evalSumK(x, qi, k, newDesignations) + meanSqrdNorms[k]*normConsts[k], 0));
}
/**
* Finds the cluster ID that is closest to the given data point
* @param x the data point to get the closest cluster for
* @return the index of the closest cluster
*/
public int findClosestCluster(Vec x)
{
return findClosestCluster(x, kernel.getQueryInfo(x));
}
/**
* Finds the cluster ID that is closest to the given data point
* @param x the data point to get the closest cluster for
* @param qi the query information for the given data point generated for the kernel in use. See {@link KernelTrick#getQueryInfo(jsat.linear.Vec) }
* @return the index of the closest cluster
*/
public int findClosestCluster(Vec x, List<Double> qi)
{
double min = Double.MAX_VALUE;
int min_indx = -1;
for(int i = 0; i < meanSqrdNorms.length; i++)
{
double dist = distance(x, qi, i);
if(dist < min)
{
min = dist;
min_indx = i;
}
}
return min_indx;
}
/**
* Updates the means based off the change of a specific data point
* @param i the index of the data point to try and update the means based on its movement
* @param designations the old assignments for ownership of each data point to one of the means
* @return {@code 1} if the index changed ownership, {@code 0} if the index did not change ownership
*/
protected int updateMeansFromChange(int i, int[] designations)
{
return updateMeansFromChange(i, designations, meanSqrdNorms, ownes);
}
/**
* Accumulates the updates to the means and ownership into the provided
* arrays. This does not update {@link #meanSqrdNorms}, and is meant to
* accumulate the change. To apply the changes pass the same arrays to {@link #applyMeanUpdates(double[], int[]) }
* @param i the index of the data point to try and update the means based on its movement
* @param designations the old assignments for ownership of each data point to one of the means
* @param sqrdNorms the array to place the changes to the squared norms in
* @param ownership the array to place the changes to the ownership counts in
* @return {@code 1} if the index changed ownership, {@code 0} if the index did not change ownership
*/
protected int updateMeansFromChange(final int i, final int[] designations, final double[] sqrdNorms, final double[] ownership)
{
final int old_d = designations[i];
final int new_d = newDesignations[i];
if (old_d == new_d)//this one has not changed!
return 0;
final int N = X.size();
final double w_i = W.get(i);
ownership[old_d] -= w_i;
ownership[new_d] += w_i;
for (int j = 0; j < N; j++)
{
final double w_j = W.get(j);
final int oldD_j = designations[j];
final int newD_j = newDesignations[j];
if (i == j)//diagonal is an easy case
{
sqrdNorms[old_d] -= w_i*selfK[i];
sqrdNorms[new_d] += w_i*selfK[i];
}
else
{
//handle removing contribution from old mean
if (old_d == oldD_j)
{
//only do this for items that were apart of the OLD center
if (i > j && oldD_j != newD_j)
{
/*
* j,j is also being removed from this center.
* To avoid removing the value k_ij twice, the
* person with the later index gets to do the update
*/
}
else//safe to remove the k_ij contribution
sqrdNorms[old_d] -= 2 * w_i * w_j * kernel.eval(i, j, X, accel);
}
//handle adding contributiont to new mean
if (new_d == newD_j)
{
//only do this for items that are apart of the NEW center
if (i > j && oldD_j != newD_j)
{
/*
* j,j is also being added to this center.
* To avoid adding the value k_ij twice, the
* person with the later index gets to do the update
*/
}
else
sqrdNorms[new_d] += 2 * w_i * w_j * kernel.eval(i, j, X, accel);
}
}
}
return 1;
}
protected void applyMeanUpdates(double[] sqrdNorms, double[] ownerships)
{
for(int i = 0; i < sqrdNorms.length; i++)
{
meanSqrdNorms[i] += sqrdNorms[i];
ownes[i] += ownerships[i];
}
}
/**
* Computes the distance between two of the means in the clustering
* @param k0 the index of the first mean
* @param k1 the index of the second mean
* @return the distance between the two
*/
public double meanToMeanDistance(int k0, int k1)
{
if(k0 >= meanSqrdNorms.length || k0 < 0)
throw new IndexOutOfBoundsException("Only " + meanSqrdNorms.length + " clusters. " + k0 + " is not a valid index");
if(k1 >= meanSqrdNorms.length || k1 < 0)
throw new IndexOutOfBoundsException("Only " + meanSqrdNorms.length + " clusters. " + k1 + " is not a valid index");
return meanToMeanDistance(k0, k1, newDesignations);
}
protected double meanToMeanDistance(int k0, int k1, int[] assignments)
{
double d = meanSqrdNorms[k0]*normConsts[k0]+meanSqrdNorms[k1]*normConsts[k1]-2*dot(k0, k1, assignments);
return Math.sqrt(Math.max(0, d));//Avoid rare cases wehre 2*dot might be slightly larger
}
protected double meanToMeanDistance(int k0, int k1, int[] assignments, boolean parallel)
{
double d = meanSqrdNorms[k0]*normConsts[k0]+meanSqrdNorms[k1]*normConsts[k1]-2*dot(k0, k1, assignments, parallel);
return Math.sqrt(Math.max(0, d));//Avoid rare cases wehre 2*dot might be slightly larger
}
/**
*
* @param k0 the index of the first cluster
* @param k1 the index of the second cluster
* @param assignments0 the array of assignments to use for index k0
* @param assignments1 the array of assignments to use for index k1
* @param k1SqrdNorm the <i>normalized</i> squared norm for the mean
* indicated by {@code k1}. (ie: {@link #meanSqrdNorms} multiplied by {@link #normConsts}
* @return
*/
protected double meanToMeanDistance(int k0, int k1, int[] assignments0, int[] assignments1, double k1SqrdNorm)
{
double d = meanSqrdNorms[k0]*normConsts[k0]+k1SqrdNorm-2*dot(k0, k1, assignments0, assignments1);
return Math.sqrt(Math.max(0, d));//Avoid rare cases wehre 2*dot might be slightly larger
}
/**
*
* @param k0 the index of the first cluster
* @param k1 the index of the second cluster
* @param assignments0 the array of assignments to use for index k0
* @param assignments1 the array of assignments to use for index k1
* @param k1SqrdNorm the <i>normalized</i> squared norm for the mean
* indicated by {@code k1}. (ie: {@link #meanSqrdNorms} multiplied by {@link #normConsts}
* @param parallel source of threads for parallel execution
* @return
*/
protected double meanToMeanDistance(int k0, int k1, int[] assignments0, int[] assignments1, double k1SqrdNorm, boolean parallel)
{
double d = meanSqrdNorms[k0]*normConsts[k0]+k1SqrdNorm-2*dot(k0, k1, assignments0, assignments1, parallel);
return Math.sqrt(Math.max(0, d));//Avoid rare cases wehre 2*dot might be slightly larger
}
/**
* dot product between two different clusters from one set of cluster assignments
* @param k0 the index of the first cluster
* @param k1 the index of the second cluster
* @param assignment the array of assignments for cluster ownership
* @return the dot product between the two clusters.
*/
private double dot(final int k0, final int k1, final int[] assignment)
{
return dot(k0, k1, assignment, assignment);
}
/**
* dot product between two different clusters from one set of cluster assignments
* @param k0 the index of the first cluster
* @param k1 the index of the second cluster
* @param assignment the array of assignments for cluster ownership
* @param parallel source of threads for parallel execution
* @return the dot product between the two clusters.
*/
private double dot(final int k0, final int k1, final int[] assignment, boolean parallel)
{
return dot(k0, k1, assignment, assignment, parallel);
}
/**
* dot product between two different clusters from different sets of cluster
* assignments. Two different assignment arrays are used to allow
* overlapping assignment of points to the clusters.
*
* @param k0 the first cluster to take the dot product with
* @param k1 the second cluster to take the dot product with.
* @param assignment0 vector containing assignment values, will be used to
* determine which points belong to k0
* @param assignment1 vector containing assignment values, will be used to
* determine which points belong to k1
* @return the dot product between the two clusters.
*/
private double dot(final int k0, final int k1, final int[] assignment0, final int[] assignment1)
{
double dot = 0;
final int N = X.size();
double a = 0, b = 0;
/*
* Below, unless i&j are somehow in the same cluster - nothing bad will happen
*/
for(int i = 0; i < N; i++)
{
final double w_i = W.get(i);
if(assignment0[i] != k0)
continue;
a += w_i;
for(int j = 0; j < N; j++)
{
if(assignment1[j] != k1)
continue;
final double w_j = W.get(j);
dot += w_i * w_j * kernel.eval(i, j, X, accel);
}
}
for(int j = 0; j < N; j++)
if(assignment1[j] == k1)
b += W.get(j);
return dot/(a*b);
}
/**
* dot product between two different clusters from different sets of cluster
* assignments. Two different assignment arrays are used to allow
* overlapping assignment of points to the clusters.
*
* @param k0 the first cluster to take the dot product with
* @param k1 the second cluster to take the dot product with.
* @param assignment0 vector containing assignment values, will be used to
* determine which points belong to k0
* @param assignment1 vector containing assignment values, will be used to
* determine which points belong to k1
* @param ex source of threads for parallel execution
* @return the dot product between the two clusters.
*/
private double dot(final int k0, final int k1, final int[] assignment0, final int[] assignment1, boolean parallel)
{
double dot = 0;
final int N = X.size();
double a = 0, b = 0;
/*
* Below, unless i&j are somehow in the same cluster - nothing bad will happen
*/
ParallelUtils.run(parallel, N, (i) ->
{
final double w_i = W.get(i);
if(assignment0[i] != k0)
return 0.0;
double localDot = 0;
for(int j = 0; j < N; j++)
{
if(assignment1[j] != k1)
continue;
final double w_j = W.get(j);
localDot += w_i * w_j * kernel.eval(i, j, X, accel);
}
return localDot;
}, (t, u)->t+u);
a = W.sum();
for(int j = 0; j < N; j++)
if(assignment1[j] == k1)
b += W.get(j);
return dot/(a*b);
}
@Override
abstract public KernelKMeans clone();
@Override
public boolean supportsWeightedData()
{
return true;
}
}
| 23,132 | 36.31129 | 165 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/LloydKernelKMeans.java |
package jsat.clustering.kmeans;
import jsat.DataSet;
import jsat.distributions.kernels.KernelTrick;
import jsat.exceptions.FailedToFitException;
import jsat.utils.concurrent.ParallelUtils;
/**
* An implementation of the naive algorithm for performing kernel k-means.
*
* @author Edward Raff
*/
public class LloydKernelKMeans extends KernelKMeans
{
private static final long serialVersionUID = 1280985811243830450L;
/**
* Creates a new Kernel K Means object
* @param kernel the kernel to use
*/
public LloydKernelKMeans(KernelTrick kernel)
{
super(kernel);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public LloydKernelKMeans(LloydKernelKMeans toCopy)
{
super(toCopy);
}
@Override
public int[] cluster(DataSet dataSet, final int K, boolean parallel, int[] designations)
{
if(K < 2)
throw new FailedToFitException("Clustering requires at least 2 clusters");
final int N = dataSet.size();
if(designations == null)
designations = new int[N];
X = dataSet.getDataVectors();
setup(K, designations, dataSet.getDataWeights());
final int[] assignments = designations;
int changed;
int iter = 0;
do
{
changed = 0;
//find new closest center
ParallelUtils.run(parallel, N, (start, end)->
{
for (int i = start; i < end; i++)
{
double minDist = Double.POSITIVE_INFINITY;
int min_indx = 0;
for (int k = 0; k < K; k++)
{
double dist_k = distance(i, k, assignments);
if (dist_k < minDist)
{
minDist = dist_k;
min_indx = k;
}
}
newDesignations[i] = min_indx;
}
});
//now we have all the new assignments, we can compute the changes
changed = ParallelUtils.run(parallel, N, (start, end) ->
{
double[] sqrdChange = new double[K];
double[] ownerChange = new double[K];
int localChagne = 0;
for (int i = start; i < end; i++)
localChagne += updateMeansFromChange(i, assignments, sqrdChange, ownerChange);
synchronized(assignments)
{
applyMeanUpdates(sqrdChange, ownerChange);
}
return localChagne;
},
(t, u) -> t+u);
//update constatns
updateNormConsts();
//update designations
System.arraycopy(newDesignations, 0, designations, 0, N);
}
while (changed > 0 && ++iter < maximumIterations);
return designations;
}
@Override
public int[] cluster(DataSet dataSet, int K, int[] designations)
{
if(K < 2)
throw new FailedToFitException("Clustering requires at least 2 clusters");
final int N = dataSet.size();
if(designations == null)
designations = new int[N];
X = dataSet.getDataVectors();
setup(K, designations, dataSet.getDataWeights());
int changed;
int iter = 0;
do
{
changed = 0;
for (int i = 0; i < N; i++)
{
double minDist = Double.POSITIVE_INFINITY;
int min_indx = 0;
for (int k = 0; k < K; k++)
{
double dist_k = distance(i, k, designations);
if (dist_k < minDist)
{
minDist = dist_k;
min_indx = k;
}
}
newDesignations[i] = min_indx;
}
for(int i = 0; i < N; i++)
changed += updateMeansFromChange(i, designations);
//update constatns
updateNormConsts();
//update designations
System.arraycopy(newDesignations, 0, designations, 0, N);
}
while (changed > 0 && ++iter < maximumIterations);
return designations;
}
@Override
public LloydKernelKMeans clone()
{
return new LloydKernelKMeans(this);
}
}
| 4,617 | 26.819277 | 98 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/MiniBatchKMeans.java | package jsat.clustering.kmeans;
import java.util.*;
import java.util.concurrent.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.clustering.KClustererBase;
import jsat.clustering.SeedSelectionMethods;
import jsat.clustering.SeedSelectionMethods.SeedSelection;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.*;
import jsat.utils.*;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* Implements the mini-batch algorithms for k-means. This is a stochastic algorithm,
* so it does not find the global solution. This implementation is parallel, but
* only the methods that specify the exact number of clusters are supported. <br>
* <br>
* See: Sculley, D. (2010). <i>Web-scale k-means clustering</i>. Proceedings of the
* 19th international conference on World wide web (pp. 1177–1178).
* New York, New York, USA: ACM Press. doi:10.1145/1772690.1772862
*
* @author Edward Raff
*/
public class MiniBatchKMeans extends KClustererBase
{
private static final long serialVersionUID = 412553399508594014L;
private int batchSize;
private int iterations;
private DistanceMetric dm;
private SeedSelectionMethods.SeedSelection seedSelection;
private boolean storeMeans = true;
private List<Vec> means;
/**
* Creates a new Mini-Batch k-Means object that uses
* {@link SeedSelection#KPP k-means++} for seed selection
* and uses the {@link EuclideanDistance}.
*
* @param batchSize the mini-batch size
* @param iterations the number of mini batches to perform
*/
public MiniBatchKMeans(int batchSize, int iterations)
{
this(new EuclideanDistance(), batchSize, iterations);
}
/**
* Creates a new Mini-Batch k-Means object that uses
* {@link SeedSelection#KPP k-means++} for seed selection.
*
* @param dm the distance metric to use
* @param batchSize the mini-batch size
* @param iterations the number of mini batches to perform
*/
public MiniBatchKMeans(DistanceMetric dm, int batchSize, int iterations)
{
this(dm, batchSize, iterations, SeedSelectionMethods.SeedSelection.KPP);
}
/**
* Creates a new Mini-Batch k-Means object
* @param dm the distance metric to use
* @param batchSize the mini-batch size
* @param iterations the number of mini batches to perform
* @param seedSelection the seed selection algorithm to initiate clustering
*/
public MiniBatchKMeans(DistanceMetric dm, int batchSize, int iterations, SeedSelection seedSelection)
{
setBatchSize(batchSize);
setIterations(iterations);
setDistanceMetric(dm);
setSeedSelection(seedSelection);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public MiniBatchKMeans(MiniBatchKMeans toCopy)
{
this.batchSize = toCopy.batchSize;
this.iterations = toCopy.iterations;
this.dm = toCopy.dm.clone();
this.seedSelection = toCopy.seedSelection;
this.storeMeans = toCopy.storeMeans;
if(toCopy.means != null)
{
this.means = new ArrayList<>();
for(Vec v : toCopy.means)
this.means.add(v.clone());
}
}
/**
* If set to {@code true} the computed means will be stored after clustering
* is completed, and can then be retrieved using {@link #getMeans() }.
* @param storeMeans {@code true} if the means should be stored for later,
* {@code false} to discard them once clustering is complete.
*/
public void setStoreMeans(boolean storeMeans)
{
this.storeMeans = storeMeans;
}
/**
* Returns the raw list of means that were used for each class.
* @return the list of means for each class
*/
public List<Vec> getMeans()
{
return means;
}
/**
* Sets the distance metric used for determining the nearest cluster center
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Returns the distance metric used for determining the nearest cluster center
* @return the distance metric in use
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* Sets the batch size to use at each iteration. Increasing the
* batch size can improve the resulting clustering, but increases
* computational cost at each iteration. <br>
* If the batch size is set equal to or larger than data set size,
* it reduces to the {@link NaiveKMeans naive k-means} algorithm.
* @param batchSize the number of points to use at each iteration
*/
public void setBatchSize(int batchSize)
{
if(batchSize < 1)
throw new ArithmeticException("Batch size must be a positive value, not " + batchSize);
this.batchSize = batchSize;
}
/**
* Returns the batch size used at each iteration
* @return the batch size in use
*/
public int getBatchSize()
{
return batchSize;
}
/**
* Sets the number of mini-batch iterations to perform
* @param iterations the number of algorithm iterations to perform
*/
public void setIterations(int iterations)
{
if(iterations < 1)
throw new ArithmeticException("Iterations must be a positive value, not " + iterations);
this.iterations = iterations;
}
/**
* Returns the number of mini-batch iterations used
* @return the number of algorithm iterations that will be used
*/
public int getIterations()
{
return iterations;
}
/**
* Sets the method of selecting the initial data points to
* seed the clustering algorithm.
* @param seedSelection the seed selection algorithm to use
*/
public void setSeedSelection(SeedSelection seedSelection)
{
this.seedSelection = seedSelection;
}
/**
* Returns the method of seed selection to use
* @return the method of seed selection to use
*/
public SeedSelection getSeedSelection()
{
return seedSelection;
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int[] cluster(DataSet dataSet, int clusters, boolean parallel, int[] designations)
{
if(designations == null)
designations = new int[dataSet.size()];
TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel);
final List<Vec> source = dataSet.getDataVectors();
final List<Double> distCache;
distCache = dm.getAccelerationCache(source, parallel);
means = SeedSelectionMethods.selectIntialPoints(dataSet, clusters, dm, distCache, RandomUtil.getRandom(), seedSelection, parallel);
final List<List<Double>> meanQIs = new ArrayList<>(means.size());
for (int i = 0; i < means.size(); i++)
if (dm.supportsAcceleration())
meanQIs.add(dm.getQueryInfo(means.get(i)));
else
meanQIs.add(Collections.EMPTY_LIST);
final int[] v = new int[means.size()];
final int usedBatchSize = Math.min(batchSize, dataSet.size());
/**
* Store the indices of the sampled points instead of sampling, that
* way we can use the distance acceleration cache.
*/
final List<Integer> M = new IntList(usedBatchSize);
final List<Integer> allIndx = new IntList(source.size());
ListUtils.addRange(allIndx, 0, source.size(), 1);
final int[] nearestCenter = new int[usedBatchSize];
for(int iter = 0; iter < iterations; iter++)
{
M.clear();
ListUtils.randomSample(allIndx, M, usedBatchSize);
//compute centers
ParallelUtils.run(parallel, usedBatchSize, (start, end) ->
{
double tmp;
for (int i = start; i < end; i++)
{
double minDist = Double.POSITIVE_INFINITY;
int min = -1;
for (int j = 0; j < means.size(); j++)
{
tmp = dm.dist(M.get(i), means.get(j), meanQIs.get(j), source, distCache);
if (tmp < minDist)
{
minDist = tmp;
min = j;
}
}
nearestCenter[i] = min;
}
});
//Update centers
for(int j = 0; j < M.size(); j++)
{
int c_i = nearestCenter[j];
double eta = 1.0/(++v[c_i]);
Vec c = means.get(c_i);
c.mutableMultiply(1-eta);
c.mutableAdd(eta, source.get(M.get(j)));
}
//update mean caches
if(dm.supportsAcceleration())
for(int i = 0; i < means.size(); i++)
meanQIs.set(i, dm.getQueryInfo(means.get(i)));
}
//Stochastic travel complete, calculate all
final int[] des = designations;
double sumErr = ParallelUtils.run(parallel, dataSet.size(), (start, end) ->
{
double dists = 0;
double tmp;
for (int i = start; i < end; i++)
{
double minDist = Double.POSITIVE_INFINITY;
int min = -1;
for (int j = 0; j < means.size(); j++)
{
tmp = dm.dist(i, means.get(j), meanQIs.get(j), source, distCache);
if (tmp < minDist)
{
minDist = tmp;
min = j;
}
}
des[i] = min;
dists += minDist*minDist;
}
return dists;
}, (t, u) -> t+u);
if(!storeMeans)
means = null;
return des;
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public MiniBatchKMeans clone()
{
return new MiniBatchKMeans(this);
}
}
| 10,873 | 31.076696 | 139 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/NaiveKMeans.java | package jsat.clustering.kmeans;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.LongAdder;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.clustering.SeedSelectionMethods;
import jsat.clustering.SeedSelectionMethods.SeedSelection;
import static jsat.clustering.SeedSelectionMethods.selectIntialPoints;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.AtomicDoubleArray;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* An implementation of Lloyd's K-Means clustering algorithm using the
* naive algorithm. This implementation exists mostly for comparison as
* a base line and educational reasons. For efficient exact k-Means,
* use {@link ElkanKMeans}<br>
* <br>
* This implementation is parallel, but does not support any of the
* clustering methods that do not specify the number of clusters.
*
* @author Edward Raff
*/
public class NaiveKMeans extends KMeans
{
private static final long serialVersionUID = 6164910874898843069L;
/**
* Creates a new naive k-Means cluster using
* {@link SeedSelection#KPP k-means++} for the seed selection and the
* {@link EuclideanDistance}
*/
public NaiveKMeans()
{
this(new EuclideanDistance());
}
/**
* Creates a new naive k-Means cluster using
* {@link SeedSelection#KPP k-means++} for the seed selection.
* @param dm the distance function to use
*/
public NaiveKMeans(DistanceMetric dm)
{
this(dm, SeedSelectionMethods.SeedSelection.KPP);
}
/**
* Creates a new naive k-Means cluster
* @param dm the distance function to use
* @param seedSelection the method of selecting the initial seeds
*/
public NaiveKMeans(DistanceMetric dm, SeedSelection seedSelection)
{
this(dm, seedSelection, RandomUtil.getRandom());
}
/**
* Creates a new naive k-Means cluster
* @param dm the distance function to use
* @param seedSelection the method of selecting the initial seeds
* @param rand the source of randomness to use
*/
public NaiveKMeans(DistanceMetric dm, SeedSelection seedSelection, Random rand)
{
super(dm, seedSelection, rand);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public NaiveKMeans(NaiveKMeans toCopy)
{
super(toCopy);
}
@Override
protected double cluster(final DataSet dataSet, List<Double> accelCacheInit, final int k, final List<Vec> means, final int[] assignment, final boolean exactTotal, boolean parallel, boolean returnError, Vec dataPointWeights)
{
TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel);
/**
* Weights for each data point
*/
final Vec W;
if (dataPointWeights == null)
W = dataSet.getDataWeights();
else
W = dataPointWeights;
final int blockSize = dataSet.size() / SystemInfo.LogicalCores;
final List<Vec> X = dataSet.getDataVectors();
//done a wonky way b/c we want this as a final object for convinence, otherwise we may be stuck with null accel when we dont need to be
final List<Double> accelCache;
if (accelCacheInit == null)
accelCache = dm.getAccelerationCache(X, parallel);
else
accelCache = accelCacheInit;
if (means.size() != k)
{
means.clear();
means.addAll(selectIntialPoints(dataSet, k, dm, accelCache, rand, seedSelection, parallel));
}
final List<List<Double>> meanQIs = new ArrayList<>(k);
//Use dense mean objects
for(int i = 0; i < means.size(); i++)
{
if(dm.supportsAcceleration())
meanQIs.add(dm.getQueryInfo(means.get(i)));
else
meanQIs.add(Collections.EMPTY_LIST);
if(means.get(i).isSparse())
means.set(i, new DenseVector(means.get(i)));
}
final List<Vec> meanSum = new ArrayList<>(means.size());
final AtomicDoubleArray meanCounts = new AtomicDoubleArray(means.size());
for(int i = 0; i < k; i++)
meanSum.add(new DenseVector(means.get(0).length()));
final LongAdder changes = new LongAdder();
//used to store local changes to the means and accumulated at the end
final ThreadLocal<Vec[]> localMeanDeltas = new ThreadLocal<Vec[]>()
{
@Override
protected Vec[] initialValue()
{
Vec[] deltas = new Vec[k];
for(int i = 0; i < k; i++)
deltas[i] = new DenseVector(means.get(0).length());
return deltas;
}
};
final int N = dataSet.size();
Arrays.fill(assignment, -1);
do
{
changes.reset();
ParallelUtils.run(parallel, N, (start, end) ->
{
Vec[] deltas = localMeanDeltas.get();
double tmp;
for (int i = start; i < end; i++)
{
final Vec x = X.get(i);
double minDist = Double.POSITIVE_INFINITY;
int min = -1;
for (int j = 0; j < means.size(); j++)
{
tmp = dm.dist(i, means.get(j), meanQIs.get(j), X, accelCache);
if (tmp < minDist)
{
minDist = tmp;
min = j;
}
}
if(assignment[i] == min)
continue;
final double w = W.get(i);
//add change
deltas[min].mutableAdd(w, x);
meanCounts.addAndGet(min, w);
//remove from prev owner
if(assignment[i] >= 0)
{
deltas[assignment[i]].mutableSubtract(w, x);
meanCounts.getAndAdd(assignment[i], -w);
}
assignment[i] = min;
changes.increment();
}
//accumulate deltas into globals
for(int i = 0; i < deltas.length; i++)
synchronized(meanSum.get(i))
{
meanSum.get(i).mutableAdd(deltas[i]);
deltas[i].zeroOut();
}
});
if(changes.longValue() == 0)
break;
for(int i = 0; i < k; i++)
{
meanSum.get(i).copyTo(means.get(i));
means.get(i).mutableDivide(meanCounts.get(i));
if(dm.supportsAcceleration())
meanQIs.set(i, dm.getQueryInfo(means.get(i)));
}
}
while(changes.longValue() > 0);
if (returnError)
{
if (saveCentroidDistance)
nearestCentroidDist = new double[X.size()];
else
nearestCentroidDist = null;
double totalDistance = ParallelUtils.run(parallel, N, (start, end) ->
{
double totalDistLocal = 0;
for(int i = start; i < end; i++)
{
double dist = dm.dist(i, means.get(assignment[i]), meanQIs.get(assignment[i]), X, accelCache);
totalDistLocal += Math.pow(dist, 2);
if(saveCentroidDistance)
nearestCentroidDist[i] = dist;
}
return totalDistLocal;
}, (t, u) -> t+u);
return totalDistance;
}
else
return 0;//who cares
}
@Override
public NaiveKMeans clone()
{
return new NaiveKMeans(this);
}
}
| 8,555 | 33.224 | 227 | java |
JSAT | JSAT-master/JSAT/src/jsat/clustering/kmeans/XMeans.java |
package jsat.clustering.kmeans;
import java.util.*;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.clustering.SeedSelectionMethods;
import jsat.linear.*;
import static java.lang.Math.*;
/**
* This class provides a method of performing {@link KMeans} clustering when the
* value of {@code K} is not known. It works by recursively splitting means
* up to some specified maximum.
* value. <br>
* <br>
* When the value of {@code K} is specified, the implementation will simply call
* the regular KMeans object it was constructed with. <br>
* <br>
* Note, that specifying a minimum value of {@code K=1} has a tendency to not be
* split by the algorithm, returning the naive result of 1 cluster. It is better
* to use at least {@code K=2} as the default minimum, which is what the
* implementation will start from when no range of {@code K} is given. <br>
* <br>
* See: Pelleg, D.,&Moore, A. (2000). <i>X-means: Extending K-means with
* Efficient Estimation of the Number of Clusters</i>. In ICML (pp. 727–734).
* San Francisco, CA, USA: Morgan Kaufmann Publishers Inc. Retrieved from
* <a href="http://pdf.aminer.org/000/335/443/x_means_extending_k_means_with_efficient_estimation_of_the.pdf">
* here</a>
*
* @author Edward Raff
*/
public class XMeans extends KMeans
{
private static final long serialVersionUID = -2577160317892141870L;
private boolean stopAfterFail = false;
private boolean iterativeRefine = true;
private int minClusterSize = 25;
private KMeans kmeans;
public XMeans()
{
this(new HamerlyKMeans());
}
public XMeans(KMeans kmeans)
{
super(kmeans.dm, kmeans.seedSelection, kmeans.rand);
this.kmeans = kmeans;
this.kmeans.saveCentroidDistance = true;
this.kmeans.setStoreMeans(true);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public XMeans(XMeans toCopy)
{
super(toCopy);
this.kmeans = toCopy.kmeans.clone();
this.stopAfterFail = toCopy.stopAfterFail;
this.iterativeRefine = toCopy.iterativeRefine;
this.minClusterSize = toCopy.minClusterSize;
}
/**
* Each new cluster will be tested for improvement according to the BIC
* metric. If this is set to {@code true} then an optimization is done that
* once a center fails be improved by splitting, it will never be tested
* again. This is a safe assumption when
* {@link #setIterativeRefine(boolean) } is set to {@code false}, but
* otherwise may not quite be true. <br>
* <br>
* When {@code trustH0} is {@code true} , X-Means will
* make at most O(k) runs of k-means for the final value of k chosen. When
* {@code false} (the default option), at most O(k<sup>2</sup>) runs of
* k-means will occur.
*
* @param stopAfterFail {@code true} if a centroid shouldn't be re-tested once it
* fails to split.
*/
public void setStopAfterFail(boolean stopAfterFail)
{
this.stopAfterFail = stopAfterFail;
}
/**
*
* @return {@code true} if clusters that fail to split wont be re-tested.
* {@code false} if they will.
*/
public boolean isStopAfterFail()
{
return stopAfterFail;
}
/**
* Sets the minimum size for splitting a cluster.
* @param minClusterSize the minimum number of data points that must be present in a
* cluster to consider splitting it
*/
public void setMinClusterSize(int minClusterSize)
{
if(minClusterSize < 2)
throw new IllegalArgumentException("min cluster size that could be split is 2, not " + minClusterSize);
this.minClusterSize = minClusterSize;
}
/**
*
* @return the minimum number of data points that must be present in a
* cluster to consider splitting it
*/
public int getMinClusterSize()
{
return minClusterSize;
}
/**
* Sets whether or not the set of all cluster centers should be refined at
* every iteration. By default this is {@code true} and part of how the
* X-Means algorithm is described. Setting this to {@code false} can result
* in large speedups at the potential cost of quality.
* @param refineCenters {@code true} to refine the cluster centers at every
* step, {@code false} to skip this step of the algorithm.
*/
public void setIterativeRefine(boolean refineCenters)
{
this.iterativeRefine = refineCenters;
}
/**
*
* @return {@code true} if the cluster centers are refined at every
* step, {@code false} if skipping this step of the algorithm.
*/
public boolean getIterativeRefine()
{
return iterativeRefine;
}
@Override
public int[] cluster(DataSet dataSet, int[] designations)
{
return cluster(dataSet, 2, Math.max(dataSet.size()/20, 10), designations);
}
@Override
public int[] cluster(DataSet dataSet, boolean parallel, int[] designations)
{
return cluster(dataSet, 2, Math.max(dataSet.size()/20, 10), parallel, designations);
}
/**
* "p_j is simply the sum of K- 1 class probabilities, M * K centroid
* coordinates, and one variance estimate."
*
* @param K the number of clusters
* @param D the number of dimensions
* @return the number of free parameters
*/
private static int freeParameters(int K, int D)
{
return (K - 1) + (D * K) + 1;
}
@Override
public int[] cluster(DataSet dataSet, int lowK, int highK, boolean parallel, int[] designations)
{
final int N = dataSet.size();
final int D = dataSet.getNumNumericalVars();//"M" in orig paper
if(designations == null || designations.length < dataSet.size())
designations = new int[N];
List<Vec> data = dataSet.getDataVectors();
final List<Double> accelCache = dm.getAccelerationCache(data, parallel);
/**
* The sum of ||x - \mu_i||^2 for each cluster currently kept
*/
double[] localVar = new double[highK];
int[] localOwned = new int[highK];
//initiate
if(lowK >= 2)
{
means = new ArrayList<Vec>();
kmeans.cluster(dataSet, accelCache, lowK, means, designations, true, parallel, true, null);
for(int i = 0; i < data.size(); i++)
{
localVar[designations[i]] += Math.pow(kmeans.nearestCentroidDist[i], 2);
localOwned[designations[i]]++;
}
}
else//1 mean of all the data
{
if(designations == null || designations.length < N)
designations = new int[N];
else
Arrays.fill(designations, 0);
means = new ArrayList<>(Arrays.asList(MatrixStatistics.meanVector(dataSet)));
localOwned[0] = N;
List<Double> qi = dm.getQueryInfo(means.get(0));
for(int i = 0; i < data.size(); i++)
localVar[0] += Math.pow(dm.dist(i, means.get(0), qi, data, accelCache), 2);
}
int[] subS = new int[designations.length];
int[] subC = new int[designations.length];
//tract if we should stop testing a mean or not
List<Boolean> dontRedo = new ArrayList<>(Collections.nCopies(means.size(), false));
int origMeans;
do
{
origMeans = means.size();
for(int c = 0; c < origMeans; c++)
{
if(dontRedo.get(c))
continue;
/*
* Next, in each parent region we run a local K-means (with
* K = 2) for each pair of children. It is local in that the
* children are fighting each other for the points in the
* parent's region: no others
*/
List<DataPoint> X = getDatapointsFromCluster(c, designations, dataSet, subS);
final int n = X.size();//NOTE, not the same as N. PAY ATENTION
//TODO add the optimization in the paper where we check for movment, and dont test means that haven't mvoed much
if(X.size() < minClusterSize || means.size() == highK)
continue;//this loop with force it to exit when we hit max K
subC = kmeans.cluster(new SimpleDataSet(X), 2, parallel, subC);
//call explicitly to force that distance to nearest center is saved
List<Vec> subMean = new ArrayList<>(2);
kmeans.cluster(new SimpleDataSet(X), null, 2, subMean, subC, true, parallel, true, null);
double[] nearDist = kmeans.nearestCentroidDist;
Vec c1 = subMean.get(0);
Vec c2 = subMean.get(1);
/*
* "it determines which one to explore by improving the BIC
* locally in each region."
* so we only compute BIC from local information
*/
double newSigma = 0;
int size_c1 = 0;
for(int i = 0; i < X.size(); i++)
{
newSigma += Math.pow(nearDist[i], 2);
if(subC[i] == 0)
size_c1++;
}
newSigma /= D*(n-2);
int size_c2 = n-size_c1;
//have needed values, now compute BIC for LOCAL models
double localNewBic = size_c1*log(size_c1) + size_c2*log(size_c2)
- n*log(n)
- n*D/2.0*log(2*PI*newSigma)
- D/2.0*(n-2)//that gets us the log like, last line to penalize for bic
-freeParameters(2, D)/2.0*log(n);
double localOldBic =
- n*D/2.0*log(2*PI*localVar[c]/(D*(n-1)))
- D/2.0*(n-1)//that gets us the log like, last line to penalize for bic
-freeParameters(1, D)/2.0*log(n);
if(localOldBic > localNewBic)
{
if(stopAfterFail)//if we are going to trust that H0 is true forever, mark it
dontRedo.set(c, true);
continue;//passed the test, do not split
}
//else, accept the split
//first, update assignment array. Cluster '0' stays as is, re-set cluster '1'
for(int i = 0; i < X.size(); i++)
if(subC[i] == 1)
designations[subS[i]] = means.size();
//replace current mean and add new one
means.set(c, c1.clone());//cur index in dontRedo stays false
means.add(c2.clone());//add a 'false' for new center
dontRedo.add(false);
}
//"Between each round of splitting, we run k-means on the entire dataset and all the centers to refine the current solution"
if(iterativeRefine && means.size() > 1)
{
kmeans.cluster(dataSet, accelCache, means.size(), means, designations, true, parallel, true, null);
Arrays.fill(localVar, 0.0);
Arrays.fill(localOwned, 0);
for(int i = 0; i < data.size(); i++)
{
localVar[designations[i]] += Math.pow(kmeans.nearestCentroidDist[i], 2);
localOwned[designations[i]]++;
}
}
}
while (origMeans < means.size());
if(!iterativeRefine)//if we havn't been refining we need to do so now!
kmeans.cluster(dataSet, accelCache, means.size(), means, designations, false, parallel, false, null);
return designations;
}
@Override
public int getIterationLimit()
{
return kmeans.getIterationLimit();
}
@Override
public void setIterationLimit(int iterLimit)
{
kmeans.setIterationLimit(iterLimit);
}
@Override
public void setSeedSelection(SeedSelectionMethods.SeedSelection seedSelection)
{
if(kmeans != null)//needed when initing
kmeans.setSeedSelection(seedSelection);
}
@Override
public SeedSelectionMethods.SeedSelection getSeedSelection()
{
return kmeans.getSeedSelection();
}
@Override
protected double cluster(DataSet dataSet, List<Double> accelCache, int k, List<Vec> means, int[] assignment, boolean exactTotal, boolean threadpool, boolean returnError, Vec dataPointWeights)
{
return kmeans.cluster(dataSet, accelCache, k, means, assignment, exactTotal, threadpool, returnError, null);
}
@Override
public XMeans clone()
{
return new XMeans(this);
}
}
| 13,163 | 36.186441 | 195 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/AutoDeskewTransform.java |
package jsat.datatransform;
import java.util.Arrays;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.math.IndexFunction;
import jsat.math.OnLineStatistics;
import jsat.utils.DoubleList;
/**
* This transform applies a shifted Box-Cox transform for several fixed values
* of λ, and selects the one that provides the greatest reduction in the
* skewness of the distribution. This is done in an attempt to make the
* individual features appear more normal. The shifted values are done to
* preserve zeros and keep sparse inputs sparse. This is done with two passes
* through the data set, but requires only O(D #λ values) memory.
* <br><br>
* The default values of λ are -1, -1/2, 0, 1/2, 1. When using
* negative λ values all zeros are skipped and left as zeros. λ =
* 1 is an implicit value that is always included regardless of the input, as it
* is equivalent to leaving the data unchanged when preserving zero values.
* The stated default values include the <i>log(x+1)</i> and <i>sqrt(x)</i>
* transforms that are commonly used for deskewing as special cases.
* <br><br>
* Skewness can be calculated by including zero, but by default ignores them as
* "not-present" values.
*
* @author Edward Raff
*/
public class AutoDeskewTransform implements InPlaceTransform
{
private static final long serialVersionUID = -4894242802345656448L;
private double[] finalLambdas;
private double[] mins;
private final IndexFunction transform = new IndexFunction()
{
private static final long serialVersionUID = -404316813485246422L;
@Override
public double indexFunc(double value, int index)
{
if(index < 0)
return 0.0;
return transform(value, finalLambdas[index], mins[index]);
}
};
private static final DoubleList defaultList = new DoubleList(7);
static
{
defaultList.add(-1.0);
defaultList.add(-0.5);
defaultList.add(0.0);
defaultList.add(0.5);
defaultList.add(1.0);
}
private List<Double> lambdas;
private boolean ignorZeros;
/**
* Creates a new AutoDeskew transform
*
*/
public AutoDeskewTransform()
{
this(true, defaultList);
}
/**
* Creates a new AutoDeskew transform
*
* @param lambdas the list of lambda values to evaluate
*/
public AutoDeskewTransform(final double... lambdas)
{
this(true, DoubleList.view(lambdas, lambdas.length));
}
/**
* Creates a new AutoDeskew transform
*
* @param lambdas the list of lambda values to evaluate
*/
public AutoDeskewTransform(final List<Double> lambdas)
{
this(true, lambdas);
}
/**
* Creates a new AutoDeskew transform
*
* @param ignorZeros {@code true} to ignore zero values when calculating the
* skewness, {@code false} to include them.
* @param lambdas the list of lambda values to evaluate
*/
public AutoDeskewTransform(boolean ignorZeros, final List<Double> lambdas)
{
this.ignorZeros = ignorZeros;
this.lambdas = lambdas;
}
/**
* Creates a new deskewing object from the given data set
*
* @param dataSet the data set to deskew
*/
public AutoDeskewTransform(DataSet dataSet)
{
this(dataSet, defaultList);
}
/**
* Creates a new deskewing object from the given data set
*
* @param dataSet the data set to deskew
* @param lambdas the list of lambda values to evaluate
*/
public AutoDeskewTransform(DataSet dataSet, final List<Double> lambdas)
{
this(dataSet, true, lambdas);
}
/**
* Creates a new deskewing object from the given data set
*
* @param dataSet the data set to deskew
* @param ignorZeros {@code true} to ignore zero values when calculating the
* skewness, {@code false} to include them.
* @param lambdas the list of lambda values to evaluate
*/
public AutoDeskewTransform(DataSet dataSet, boolean ignorZeros, final List<Double> lambdas)
{
this(ignorZeros, lambdas);
fit(dataSet);
}
@Override
public void fit(DataSet dataSet)
{
//going to try leaving things alone nomatter what
if (!lambdas.contains(1.0))
lambdas.add(1.0);
OnLineStatistics[][] stats = new OnLineStatistics[lambdas.size()][dataSet.getNumNumericalVars()];
for (int i = 0; i < stats.length; i++)
for (int j = 0; j < stats[i].length; j++)
stats[i][j] = new OnLineStatistics();
mins = new double[dataSet.getNumNumericalVars()];
Arrays.fill(mins, Double.POSITIVE_INFINITY);
boolean containsSparseVecs = false;
//First pass, get min/max values
for (int i = 0; i < dataSet.size(); i++)
{
Vec x = dataSet.getDataPoint(i).getNumericalValues();
if (x.isSparse())
containsSparseVecs = true;
for (IndexValue iv : x)
{
final int indx = iv.getIndex();
final double val = iv.getValue();
mins[indx] = Math.min(val, mins[indx]);
}
}
if (containsSparseVecs)
for (int i = 0; i < mins.length; i++)//done b/c we only iterated the non-zeros
mins[i] = Math.min(0, mins[i]);
//Second pass, find the best skew transform
for (int i = 0; i < dataSet.size(); i++)
{
Vec x = dataSet.getDataPoint(i).getNumericalValues();
double weight = dataSet.getWeight(i);
int lastIndx = -1;
for (IndexValue iv : x)
{
int indx = iv.getIndex();
double val = iv.getValue();
updateStats(lambdas, stats, indx, val, mins, weight);
if (!ignorZeros)//we have to do this here instead of bulk insert at the end b/c of different weight value combinations
for (int prevIndx = lastIndx + 1; prevIndx < indx; prevIndx++)
updateStats(lambdas, stats, prevIndx, 0.0, mins, weight);
lastIndx = indx;
}
//Catch trailing zero values
if (!ignorZeros)//we have to do this here instead of bulk insert at the end b/c of different weight value combinations
for (int prevIndx = lastIndx + 1; prevIndx < mins.length; prevIndx++)
updateStats(lambdas, stats, prevIndx, 0.0, mins, weight);
}
//Finish by figureing out which did best
finalLambdas = new double[mins.length];
int lambdaOneIndex = lambdas.indexOf(1.0);
for (int d = 0; d < finalLambdas.length; d++)
{
double minSkew = Double.POSITIVE_INFINITY;
double bestLambda = 1;//done this way incase a NaN slips in, we will leave data unchanged
for (int k = 0; k < lambdas.size(); k++)
{
double skew = Math.abs(stats[k][d].getSkewness());
if (skew < minSkew)
{
minSkew = skew;
bestLambda = lambdas.get(k);
}
}
double origSkew = Math.abs(stats[lambdaOneIndex][d].getSkewness());
if (origSkew > minSkew * 1.05)//only change if there is a reasonable improvment
finalLambdas[d] = bestLambda;
else
finalLambdas[d] = 1.0;
}
}
/**
* Copy constructor
*
* @param toCopy the object to copy
*/
protected AutoDeskewTransform(AutoDeskewTransform toCopy)
{
this.finalLambdas = Arrays.copyOf(toCopy.finalLambdas, toCopy.finalLambdas.length);
this.mins = Arrays.copyOf(toCopy.mins, toCopy.mins.length);
}
private static double transform(final double val, final double lambda, final double min)
{
if (val == 0)
return 0;
//special cases
if (lambda == 2)
{
return val * val;
}
if (lambda == 1)
{
return val;
}
else if (lambda == 0.5)
{
return Math.sqrt(val - min);
}
else if (lambda == 0)
{
return Math.log(val + 1 - min);//log(1) = 0
}
else if (lambda == -0.5)
{
return 1 / (Math.sqrt(val - min));
}
else if (lambda == -1)
{
return 1 / val;
}
else if (lambda == -2)
{
return 1 / (val * val);
}
else
{
//commented out case handled at top
//if(lambda < 0 && val == 0)
// return 0;//should be Inf, but we want to preserve sparsity
return Math.pow(val, lambda) / lambda;
}
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint newDP = dp.clone();
mutableTransform(newDP);
return newDP;
}
@Override
public void mutableTransform(DataPoint dp)
{
dp.getNumericalValues().applyIndexFunction(transform);
}
@Override
public AutoDeskewTransform clone()
{
return new AutoDeskewTransform(this);
}
/**
* Updates the online stats for each value of lambda
*
* @param lambdas the list of lambda values
* @param stats the array of statistics trackers
* @param indx the feature index to add to
* @param val the value at the given feature index
* @param mins the minimum value array
* @param weight the weight to the given update
*/
private void updateStats(final List<Double> lambdas, OnLineStatistics[][] stats, int indx, double val, double[] mins, double weight)
{
for (int k = 0; k < lambdas.size(); k++)
stats[k][indx].add(transform(val, lambdas.get(k), mins[indx]), weight);
}
@Override
public boolean mutatesNominal()
{
return false;
}
}
| 10,233 | 30.58642 | 136 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/DataModelPipeline.java | package jsat.datatransform;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.parameters.Parameter;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.Parameterized;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
/**
* A Data Model Pipeline combines several data transforms and a base Classifier
* or Regressor into a unified object for performing classification and
* Regression with. This is useful for certain transforms for which their
* behavior is more tightly coupled with the model being used. In addition this
* allows a way for easily turning the parameters for a transform along with
* those of the predictor. <br>
* When using the Data Model Pipeline, the transforms that are apart of the
* pipeline should not be added to the model evaluators - as this will cause the
* transforms to be applied multiple times.
*
* @author Edward Raff
*/
public class DataModelPipeline implements Classifier, Regressor, Parameterized
{
private static final long serialVersionUID = -2300996837897094414L;
@ParameterHolder(skipSelfNamePrefix = true)
private DataTransformProcess baseDtp;
private Classifier baseClassifier;
private Regressor baseRegressor;
private DataTransformProcess learnedDtp;
private Classifier learnedClassifier;
private Regressor learnedRegressor;
/**
* Creates a new Data Model Pipeline from the given transform process and
* base classifier
* @param dtp the data transforms to apply
* @param baseClassifier the classifier to learn with
*/
public DataModelPipeline(Classifier baseClassifier, DataTransformProcess dtp)
{
this.baseDtp = dtp;
this.baseClassifier = baseClassifier;
if(baseClassifier instanceof Regressor)
this.baseRegressor = (Regressor) baseClassifier;
}
/**
* Creates a new Data Model Pipeline from the given transform factories and
* base classifier
* @param transforms the data transforms to apply
* @param baseClassifier the classifier to learn with
*/
public DataModelPipeline(Classifier baseClassifier, DataTransform... transforms)
{
this(baseClassifier, new DataTransformProcess(transforms));
}
/**
* Creates a new Data Model Pipeline from the given transform process and
* base regressor
* @param dtp the data transforms to apply
* @param baseRegressor the regressor to learn with
*/
public DataModelPipeline(Regressor baseRegressor, DataTransformProcess dtp)
{
this.baseDtp = dtp;
this.baseRegressor = baseRegressor;
if(baseRegressor instanceof Classifier)
this.baseClassifier = (Classifier) baseRegressor;
}
/**
* Creates a new Data Model Pipeline from the given transform factories and
* base classifier
* @param transforms the data transforms to apply
* @param baseRegressor the regressor to learn with
*/
public DataModelPipeline(Regressor baseRegressor, DataTransform... transforms)
{
this(baseRegressor, new DataTransformProcess(transforms));
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DataModelPipeline(DataModelPipeline toCopy)
{
this.baseDtp = toCopy.baseDtp.clone();
if(toCopy.baseClassifier != null && toCopy.baseClassifier == toCopy.baseRegressor)//only possible if both a classifier and regressor
{
this.baseClassifier = toCopy.baseClassifier.clone();
this.baseRegressor = (Regressor) this.baseClassifier;
}
else if(toCopy.baseClassifier != null)
this.baseClassifier = toCopy.baseClassifier.clone();
else if(toCopy.baseRegressor != null)
this.baseRegressor = toCopy.baseRegressor.clone();
else
throw new RuntimeException("BUG: Report Me!");
if(toCopy.learnedDtp != null)
this.learnedDtp = toCopy.learnedDtp.clone();
if(toCopy.learnedClassifier != null)
this.learnedClassifier = toCopy.learnedClassifier.clone();
if(toCopy.learnedRegressor != null)
this.learnedRegressor = toCopy.learnedRegressor.clone();
}
@Override
public CategoricalResults classify(DataPoint data)
{
return learnedClassifier.classify(learnedDtp.transform(data));
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
learnedDtp = baseDtp.clone();
dataSet = dataSet.shallowClone();//dont want to actually edit the data set they gave us
learnedDtp.learnApplyTransforms(dataSet);
learnedClassifier = baseClassifier.clone();
learnedClassifier.train(dataSet, parallel);
}
@Override
public boolean supportsWeightedData()
{
if(baseClassifier != null)
return baseClassifier.supportsWeightedData();
else if(baseRegressor != null)
return baseRegressor.supportsWeightedData();
else
throw new RuntimeException("BUG: Report Me! This should not have happened");
}
@Override
public double regress(DataPoint data)
{
return learnedRegressor.regress(learnedDtp.transform(data));
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
learnedDtp = baseDtp.clone();
dataSet = dataSet.shallowClone();//dont want to actually edit the data set they gave us
learnedDtp.learnApplyTransforms(dataSet);
learnedRegressor = baseRegressor.clone();
learnedRegressor.train(dataSet, parallel);
}
@Override
public DataModelPipeline clone()
{
return new DataModelPipeline(this);
}
@Override
public List<Parameter> getParameters()
{
List<Parameter> params = Parameter.getParamsFromMethods(this);
if(baseClassifier != null && baseClassifier instanceof Parameterized)
params.addAll(((Parameterized)baseClassifier).getParameters());
else if(baseRegressor != null && baseRegressor instanceof Parameterized)
params.addAll(((Parameterized)baseRegressor).getParameters());
return params;
}
@Override
public Parameter getParameter(String paramName)
{
return Parameter.toParameterMap(getParameters()).get(paramName);
}
}
| 6,683 | 34.553191 | 140 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/DataTransform.java |
package jsat.datatransform;
import java.io.Serializable;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.exceptions.FailedToFitException;
/**
* A pre-processing step may be desirable before training. If a pre-processing
* step is used, it is necessary to also apply the same transform on the input
* being sent to the learning algorithm. This interface provides the needed
* mechanism. <br>
* A transform may or may not require training, it could be fully specified at
* construction, or learned from the data set. Learning is done via the
* {@link #fit(jsat.DataSet) fit method}. Many DataTransforms will include a
* constructor that takes a dataset as a parameter. These transforms will fit
* the data when constructed, and exist for convenience.
*
* @author Edward Raff
*/
public interface DataTransform extends Cloneable, Serializable
{
/**
* Returns a new data point that is a transformation of the original data
* point. This new data point is a different object, but may contain the
* same references as the original data point. It is not guaranteed that you
* can mutate the transformed point without having a side effect on the
* original point.
*
* @param dp the data point to apply a transformation to
* @return a transformed data point
*/
public DataPoint transform(DataPoint dp);
/**
* Fits this transform to the given dataset. Some transforms can only be
* learned from classification or regression datasets. If an incompatible
* dataset type is given, a {@link FailedToFitException} exception may be
* thrown.
*
* @param data the dataset to fir this transform to
* @throws FailedToFitException if the dataset type is not compatible with
* the transform
*/
public void fit(DataSet data);
public DataTransform clone();
}
| 1,896 | 36.94 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/DataTransformBase.java | package jsat.datatransform;
import java.util.List;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
/**
* This abstract class implements the Parameterized interface to ease the
* development of simple Data Transforms. If a more complicated set of
* parameters is needed then what is obtained from
* {@link Parameter#getParamsFromMethods(java.lang.Object) } than there is no
* reason to use this class.
*
* @author Edward Raff
*/
abstract public class DataTransformBase implements DataTransform, Parameterized
{
@Override
abstract public DataTransform clone();
}
| 615 | 25.782609 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/DataTransformProcess.java | package jsat.datatransform;
import java.util.ArrayList;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.parameters.Parameter;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.Parameterized;
/**
* Performing a transform on the whole data set before training a classifier can
* add bias to the results. For proper evaluation, the transforms must be
* learned from the training set and not contain any knowledge from the testing
* set. A DataTransformProcess aids in this by providing a mechanism to contain
* several different transforms to learn and then apply.
* <br><br>
* The Parameters of the Data Transform Process are the parameters from the
* individual transform factories that make up the whole process. The name
* "DataTransformProcess" will not be prefixed to the parameter names.
*
* @author Edward Raff
*/
public class DataTransformProcess implements DataTransform, Parameterized
{
private static final long serialVersionUID = -2844495690944305885L;
@ParameterHolder(skipSelfNamePrefix = true)
private List<DataTransform> transformSource;
private List<DataTransform> learnedTransforms;
/**
* Creates a new transform process that is empty. Transform factories must
* be added using
* {@link #addTransform(jsat.datatransform.DataTransformFactory) }.
*/
public DataTransformProcess()
{
transformSource = new ArrayList<>();
learnedTransforms = new ArrayList<>();
}
/**
* Creates a new transform process from the listed factories, which will be
* applied in order by index.
*
* @param transforms the array of factories to apply as the data transform process
*/
public DataTransformProcess(DataTransform... transforms)
{
this();
for(DataTransform dt : transforms)
this.addTransform(dt);
}
/**
* Adds a transform to the list of transforms. Transforms are learned and
* applied in the order in which they are added.
* @param transform the factory for the transform to add
*/
public void addTransform(DataTransform transform)
{
transformSource.add(transform);
}
/**
*
* @return the number of transforms currently chained in this transform
* process
*/
public int getNumberOfTransforms()
{
return transformSource.size();
}
/**
* Consolidates transformation objects when possible. Currently only works with {@link RemoveAttributeTransform}
*/
private void consolidateTransforms()
{
for(int i = 0; i < learnedTransforms.size()-1; i++)
{
DataTransform t1 = learnedTransforms.get(i);
DataTransform t2 = learnedTransforms.get(i+1);
if(!(t1 instanceof RemoveAttributeTransform && t2 instanceof RemoveAttributeTransform))
continue;//They are not both RATs
RemoveAttributeTransform r1 = (RemoveAttributeTransform) t1;
RemoveAttributeTransform r2 = (RemoveAttributeTransform) t2;
r2.consolidate(r1);
learnedTransforms.remove(i);
i--;
}
}
@Override
public void fit(DataSet data)
{
learnApplyTransforms(data);
}
/**
* Learns the transforms for the given data set. The data set will not be
* altered. Once finished, <tt>this</tt> DataTransformProcess can be applied
* to the dataSet to get the transformed data set.
*
* @param dataSet the data set to learn a series of transforms from
*/
public void leanTransforms(DataSet dataSet)
{
learnApplyTransforms(dataSet.shallowClone());
}
/**
* Learns the transforms for the given data set. The data set is then
* altered after each transform is learned so the next transform can be
* learned as well. <br> The results are equivalent to calling
* {@link #learnApplyTransforms(jsat.DataSet) } on the data set and then
* calling {@link DataSet#applyTransform(jsat.datatransform.DataTransform) }
* with this DataTransformProces.
*
* @param dataSet the data set to learn a series of transforms from and
* alter into the final transformed form
*/
public void learnApplyTransforms(DataSet dataSet)
{
learnedTransforms.clear();
//used to keep track if we can start using in place transforms
boolean vecSafe = false;
boolean catSafe = false;
int iter = 0;
//copy original references so we can check saftey of inplace mutation later
Vec[] origVecs = new Vec[dataSet.size()];
int[][] origCats = new int[dataSet.size()][];
for (int i = 0; i < origVecs.length; i++)
{
DataPoint dp = dataSet.getDataPoint(i);
origVecs[i] = dp.getNumericalValues();
origCats[i] = dp.getCategoricalValues();
}
for (DataTransform dtf : transformSource)
{
DataTransform transform = dtf.clone();
transform.fit(dataSet);
if(transform instanceof InPlaceTransform)
{
InPlaceTransform ipt = (InPlaceTransform) transform;
//check if it is safe to apply mutations
if(iter > 0 && !vecSafe || (ipt.mutatesNominal() && !catSafe))
{
boolean vecClear = true, catClear = true;
for (int i = 0; i < origVecs.length && (vecClear || catClear); i++)
{
DataPoint dp = dataSet.getDataPoint(i);
vecClear = origVecs[i] != dp.getNumericalValues();
catClear = origCats[i] != dp.getCategoricalValues();
}
vecSafe = vecClear;
catSafe = catClear;
}
//Now we know if we can apply the mutations or not
if(vecSafe && (!ipt.mutatesNominal() || catSafe))
dataSet.applyTransformMutate(ipt, true);
else//go back to normal
dataSet.applyTransform(transform);
}
else
dataSet.applyTransform(transform);
learnedTransforms.add(transform);
iter++;
}
consolidateTransforms();
}
@Override
public DataPoint transform(DataPoint dp)
{
final Vec origNum = dp.getNumericalValues();
final int[] origCat = dp.getCategoricalValues();
for(DataTransform dt : learnedTransforms)
{
if(dt instanceof InPlaceTransform)
{
InPlaceTransform it = (InPlaceTransform) dt;
//check if we can safley mutableTransform instead of allocate
if(origNum != dp.getNumericalValues() && (!it.mutatesNominal() || origCat != dp.getCategoricalValues()))
{
it.mutableTransform(dp);
continue;
}
}
dp = dt.transform(dp);
}
return dp;
}
@Override
public DataTransformProcess clone()
{
DataTransformProcess clone = new DataTransformProcess();
for(DataTransform dtf : this.transformSource)
clone.transformSource.add(dtf.clone());
for(DataTransform dt : this.learnedTransforms)
clone.learnedTransforms.add(dt.clone());
return clone;
}
}
| 7,687 | 34.266055 | 120 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/DenseSparceTransform.java |
package jsat.datatransform;
import java.util.Iterator;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
/**
* Dense sparce transform alters the vectors that store the numerical values.
* Based on a threshold in (0, 1), vectors will be converted from dense to
* sparce, sparce to dense, or left alone.
*
* @author Edward Raff
*/
public class DenseSparceTransform implements DataTransform
{
private static final long serialVersionUID = -1177913691660616290L;
private double factor;
/**
* Creates a new Dense Sparce Transform. The <tt>factor</tt> gives the maximal
* percentage of values that may be non zero for a vector to be sparce. Any
* vector meeting the requirement will be converted to a sparce vector, and
* others made dense. If the factor is greater than or equal to 1, then all
* vectors will be made sparce. If less than or equal to 0, then all will
* be made dense.
*
* @param factor the fraction of the vectors values that may be non zero to qualify as sparce
*/
public DenseSparceTransform(double factor)
{
this.factor = factor;
}
@Override
public void fit(DataSet data)
{
//no - op, nothing we need to learn
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec orig = dp.getNumericalValues();
final int nnz = orig.nnz();
if (nnz / (double) orig.length() < factor)///make sparse
{
if(orig.isSparse())//already sparse, just return
return dp;
//else, make sparse
SparseVector sv = new SparseVector(orig.length(), nnz);//TODO create a constructor for this
for(int i = 0; i < orig.length(); i++)
if(orig.get(i) != 0)
sv.set(i, orig.get(i));
return new DataPoint(sv, dp.getCategoricalValues(), dp.getCategoricalData());
}
else//make dense
{
if(!orig.isSparse())//already dense, just return
return dp;
DenseVector dv = new DenseVector(orig.length());
Iterator<IndexValue> iter = orig.getNonZeroIterator();
while (iter.hasNext())
{
IndexValue indexValue = iter.next();
dv.set(indexValue.getIndex(), indexValue.getValue());
}
return new DataPoint(dv, dp.getCategoricalValues(), dp.getCategoricalData());
}
}
@Override
public DenseSparceTransform clone()
{
return new DenseSparceTransform(factor);
}
}
| 2,623 | 31 | 104 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/FastICA.java | package jsat.datatransform;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
import static java.lang.Math.*;
import jsat.exceptions.FailedToFitException;
/**
* Provides an implementation of the FastICA algorithm for Independent Component
* Analysis (ICA). ICA is similar to PCA and Whitening, but assumes that the
* data is generated from a mixture of some <i>C</i> base components where
* mixing occurs instantaneously (i.e. produced from some matrix transform of
* the true components). ICA attempts to find the <i>C</i> components from the
* raw observations. <br>
* <br>
* See:
* <ul>
* <li>Hyvärinen, A. (1999). <i>Fast and robust fixed-point algorithms for
* independent component analysis</i>. IEEE Transactions on Neural Networks
* / a Publication of the IEEE Neural Networks Council, 10(3), 626–34.
* doi:10.1109/72.761722
* </li>
* <li>
* Hyvärinen, a.,&Oja, E. (2000). <i>Independent component analysis:
* algorithms and applications</i>. Neural Networks, 13(4-5), 411–430.
* doi:10.1016/S0893-6080(00)00026-5
* </li>
* </ul>
* @author Edward Raff
*/
public class FastICA implements InvertibleTransform
{
//TODO add default search for C
private static final long serialVersionUID = -8644025740457515563L;
/**
* the number of base components to assume and try to discover
*/
private int C;
/**
* the Negative Entropy function to use
*/
private NegEntropyFunc G;
/**
* {@code true} to assume the data has already been
*/
private boolean preWhitened;
private ZeroMeanTransform zeroMean;
/**
* Un-mixes the observed data into the raw components we learned
*/
private Matrix unmixing;
/**
* The estimated mixing matrix to go from raw components to the observed data
*/
private Matrix mixing;
/**
* The FastICA algorithm requires a function f(x) to be used iteratively in
* the algorithm, but only makes use of the first and second derivatives of
* the algorithm.
*/
public static interface NegEntropyFunc
{
/**
*
* @param x the input to the function
* @return the first derivative of this function
*/
public double deriv1(double x);
/**
*
* @param x the input to the function
* @param d1 the first derivative of this function (from
* {@link #deriv1(double) })
* @return the second derivative of this function
*/
public double deriv2(double x, double d1);
}
/**
* A set of default negative entropy functions as specified in the original
* FastICA paper
*/
public enum DefaultNegEntropyFunc implements NegEntropyFunc
{
/**
* This is function <i>G<sub>1</sub></i> in the paper. This Negative
* Entropy function is described as a "good general-purpose contrast
* function" in the original paper, and the default method used.
*/
LOG_COSH
{
@Override
public double deriv1(double x)
{
return tanh(x);
}
@Override
public double deriv2(double x, double d1)
{
return 1-d1*d1;
}
},
/**
* This is function <i>G<sub>2</sub></i> in the paper, and according to
* the paper may be better than {@link #LOG_COSH} "when the
* independent components are highly super-Gaussian, or when
* robustness is very important"
*/
EXP
{
@Override
public double deriv1(double x)
{
return x*exp(-x*x/2);
}
@Override
public double deriv2(double x, double d1)
{
//calling exp is more expensive than just dividing to get back e(-x^2/2)
if(x == 0)
return 1;
return (1-x*x)*(d1/x);
}
},
/**
* This is the kurtosis-based approximation function <i>G<sub>3</sub>(x)
* = 1/4*x<sup>4</sup></i>. According to the original paper its use is
* "is justified on statistical grounds only for estimating sub-Gaussian
* independent components when there are no outliers."
*/
KURTOSIS
{
@Override
public double deriv1(double x)
{
return x*x*x;//x^3
}
@Override
public double deriv2(double x, double d1)
{
return x*x*3;//3 x^2
}
};
@Override
abstract public double deriv1(double x);
@Override
abstract public double deriv2(double x, double d1);
};
/**
* Creates a new FastICA transform that will attempt to fit 10 components.
* This is likely not optimal for any particular dataset
*/
public FastICA()
{
this(10);
}
/**
* Creates a new FastICA transform
*
* @param C the number of base components to assume and try to discover
*/
public FastICA(int C)
{
this(C, DefaultNegEntropyFunc.LOG_COSH, false);
}
/**
* Creates a new FastICA transform
* @param data the data set to transform
* @param C the number of base components to assume and try to discover
*/
public FastICA(DataSet data, int C)
{
this(data, C, DefaultNegEntropyFunc.LOG_COSH, false);
}
/**
* Creates a new FastICA transform
*
* @param data the data set to transform
* @param C the number of base components to assume and try to discover
* @param G the Negative Entropy function to use
* @param preWhitened {@code true} to assume the data has already been
* whitened before being given to the transform, {@code false} and the
* FastICA implementation will perform its own whitening.
*/
public FastICA(int C, NegEntropyFunc G, boolean preWhitened)
{
setC(C);
setNegEntropyFunction(G);
setPreWhitened(preWhitened);
}
/**
* Creates a new FastICA transform
* @param data the data set to transform
* @param C the number of base components to assume and try to discover
* @param G the Negative Entropy function to use
* @param preWhitened {@code true} to assume the data has already been
* whitened before being given to the transform, {@code false} and the
* FastICA implementation will perform its own whitening.
*/
public FastICA(DataSet data, int C, NegEntropyFunc G, boolean preWhitened)
{
this(C, G, preWhitened);
fit(data);
}
@Override
public void fit(DataSet data)
{
int N = data.size();
Vec tmp = new DenseVector(N);
List<Vec> ws = new ArrayList<Vec>(C);
Matrix X;
WhitenedPCA whiten = null;
if(!preWhitened)
{
//well allocate a dense matrixa and grab row view for extra efficency
zeroMean = new ZeroMeanTransform(data);
data = data.shallowClone();
data.applyTransform(zeroMean);
whiten = new WhitenedPCA(data);
data.applyTransform(whiten);
X = data.getDataMatrixView();
}
else
X = data.getDataMatrixView();
int subD = X.cols();//projected space may be smaller if low rank
Vec w_tmp = new DenseVector(subD);//used to check for convergence
int maxIter = 500;//TODO make this configurable
for(int p = 0; p < C; p++)
{
Vec w_p = Vec.random(subD);
w_p.normalize();
int iter = 0;
do
{
//w_tmp is our old value use for convergence checking
w_p.copyTo(w_tmp);
tmp.zeroOut();
X.multiply(w_p, 1.0, tmp);
double gwx_avg = 0;
for(int i = 0; i < tmp.length(); i++)
{
final double x = tmp.get(i);
final double g = G.deriv1(x);
final double gp = G.deriv2(x, g);
if(Double.isNaN(g) || Double.isInfinite(g) ||
Double.isNaN(gp) || Double.isNaN(gp))
throw new FailedToFitException("Encountered NaN or Inf in calculation");
tmp.set(i, g);
gwx_avg += gp;
}
gwx_avg /= N;
//w+ =E{xg(wTx)}−E{g'(wT x)}w
w_p.mutableMultiply(-gwx_avg);
X.transposeMultiply(1.0/N, tmp, w_p);
//reorthoganalization by w_p = w_p - sum_{i=0}^{p-1} w_p^T w_j w_j
double[] coefs = new double[ws.size()];
for(int i= 0; i < coefs.length; i++)
coefs[i] = w_p.dot(ws.get(i));
for(int i= 0; i < coefs.length; i++)
w_p.mutableAdd(-coefs[i], ws.get(i));
//re normalize
w_p.normalize();
/*
* Convergence check at end of loop: "Note that convergencemeans
* that the old and new values of w point in the same direction,
* i.e. their dot-product is (almost) equal to 1. It is not
* necessary that the vector converges to a single point, since
* w and −w define the same direction"
*/
}
while(abs(1-abs(w_p.dot(w_tmp))) > 1e-6 && iter++ < maxIter);
ws.add(w_p);
}
if(!preWhitened)
{
Matrix W = new MatrixOfVecs(ws);
unmixing = W.multiply(whiten.transform).transpose();
}
else
unmixing = new DenseMatrix(new MatrixOfVecs(ws)).transpose();
mixing = new SingularValueDecomposition(unmixing.clone()).getPseudoInverse();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public FastICA(FastICA toCopy)
{
this.C = toCopy.C;
this.G = toCopy.G;
this.preWhitened = toCopy.preWhitened;
if (toCopy.zeroMean != null)
this.zeroMean = toCopy.zeroMean.clone();
if (toCopy.unmixing != null)
this.unmixing = toCopy.unmixing.clone();
if (toCopy.mixing != null)
this.mixing = toCopy.mixing.clone();
}
/**
* Sets the number of base components to learn
* @param C the number of base components to assume and try to discover
*/
public void setC(int C)
{
if(C < 1)
throw new IllegalArgumentException("Number of components must be positive, not " + C);
this.C = C;
}
/**
*
* @return the number of base components to assume and try to discover
*/
public int getC()
{
return C;
}
/**
* Sets the Negative Entropy function used to infer the base components.
*
* @param G the Negative Entropy function to use
*/
public void setNegEntropyFunction(NegEntropyFunc G)
{
if(G == null)
throw new NullPointerException("Negative Entropy function must be non-null");
this.G = G;
}
/**
*
* @return the Negative Entropy function to use
*/
public NegEntropyFunc getNegEntropyFunction()
{
return G;
}
/**
* Controls where or not the implementation assumes the input data is
* already whitened. Whitening is a requirement for the algorithm to work as
* intended.
*
* @param preWhitened {@code true} to assume the data has already been
* whitened, {@code false} for this object to do its own whitening
*/
public void setPreWhitened(boolean preWhitened)
{
this.preWhitened = preWhitened;
}
/**
*
* @return {@code true} if this object will assume the data has already been
* whitened, {@code false} for this object to do its own whitening
*/
public boolean isPreWhitened()
{
return preWhitened;
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec x;
if (zeroMean != null)
x = zeroMean.transform(dp).getNumericalValues();
else
x = dp.getNumericalValues();
Vec newX = x.multiply(unmixing);
//we know that zeroMean wont impact cat values or weight
return new DataPoint(newX, dp.getCategoricalValues(), dp.getCategoricalData());
}
@Override
public DataPoint inverse(DataPoint dp)
{
Vec x = dp.getNumericalValues();
x = x.multiply(mixing);
DataPoint toRet = new DataPoint(x, dp.getCategoricalValues(), dp.getCategoricalData());
if(zeroMean != null)
zeroMean.mutableInverse(toRet);
return toRet;
}
@Override
public FastICA clone()
{
return new FastICA(this);
}
}
| 13,509 | 29.496614 | 98 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/FixedDataTransform.java | /*
* This code contributed in the public domain.
*/
package jsat.datatransform;
import jsat.classifiers.DataPoint;
/**
* This interface is meant to be used for convinence when you wish to apply a
* transformation to a data set using the Java 8 lambda features. It is for
* transformations that do not need to be trained on any data, or where all
* training has been done in advance.
*
* @author Edward Raff
*/
public interface FixedDataTransform
{
/**
* Returns a new data point that is a transformation of the original data
* point. This new data point is a different object, but may contain the
* same references as the original data point. It is not guaranteed that you
* can mutate the transformed point without having a side effect on the
* original point.
*
* @param dp the data point to apply a transformation to
* @return a transformed data point
*/
public DataPoint transform(DataPoint dp);
}
| 968 | 30.258065 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/Imputer.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.math.OnLineStatistics;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
/**
* Imputes missing values in a dataset by finding reasonable default values. For
* categorical features, the mode will always be used for imputing. Numeric
* values can change how the imputing value is selected by using the
* {@link NumericImputionMode} enum.
*
* @author Edward Raff <[email protected]>
*/
public class Imputer implements InPlaceTransform
{
private NumericImputionMode mode;
/**
* The values to impute for missing numeric columns
*/
protected int[] cat_imputs;
/**
* The values to impute for missing numeric columns
*/
protected double[] numeric_imputs;
public static enum NumericImputionMode
{
MEAN,
MEDIAN,
//TODO, add mode
}
public Imputer(DataSet<?> data)
{
this(data, NumericImputionMode.MEAN);
}
public Imputer(DataSet<?> data, NumericImputionMode mode)
{
this.mode = mode;
this.fit(data);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public Imputer(Imputer toCopy)
{
this.mode = toCopy.mode;
if (toCopy.cat_imputs != null)
this.cat_imputs = Arrays.copyOf(toCopy.cat_imputs, toCopy.cat_imputs.length);
if (toCopy.numeric_imputs != null)
this.numeric_imputs = Arrays.copyOf(toCopy.numeric_imputs, toCopy.numeric_imputs.length);
}
@Override
public void fit(DataSet d)
{
numeric_imputs = new double[d.getNumNumericalVars()];
cat_imputs = new int[d.getNumCategoricalVars()];
List<List<Double>> columnCounts = null;
List<List<Double>> columnWeights = null;
double[] colSoW = null;
switch(mode)
{
case MEAN:
//lets just do this now since we are calling the function
OnLineStatistics[] stats = d.getOnlineColumnStats(true);
for(int i = 0; i < stats.length; i++)
numeric_imputs[i] = stats[i].getMean();
break;
case MEDIAN:
columnCounts = new ArrayList<List<Double>>(d.getNumNumericalVars());
columnWeights = new ArrayList<List<Double>>(d.getNumNumericalVars());
colSoW = new double[d.getNumNumericalVars()];
for(int i = 0; i < d.getNumNumericalVars(); i++)
{
columnCounts.add(new DoubleList(d.size()));
columnWeights.add(new DoubleList(d.size()));
}
break;
}
//space to count how many times each cat is seen
double[][] cat_counts = new double[d.getNumCategoricalVars()][];
for(int i = 0; i < cat_counts.length; i++)
cat_counts[i] = new double[d.getCategories()[i].getNumOfCategories()];
for(int sample = 0; sample < d.size(); sample++)
{
DataPoint dp = d.getDataPoint(sample);
final double weights = d.getWeight(sample);
int[] cats = dp.getCategoricalValues();
for(int i = 0; i < cats.length; i++)
if(cats[i] >= 0)//missing is < 0
cat_counts[i][cats[i]] += weights;
Vec numeric = dp.getNumericalValues();
if (mode == NumericImputionMode.MEDIAN)
{
for (IndexValue iv : numeric)
if (!Double.isNaN(iv.getValue()))
{
columnCounts.get(iv.getIndex()).add(iv.getValue());
columnWeights.get(iv.getIndex()).add(weights);
colSoW[iv.getIndex()] += weights;
}
}
}
if(mode == NumericImputionMode.MEDIAN)
{
IndexTable it = new IndexTable(d.getNumNumericalVars());
for (int col = 0; col < d.getNumNumericalVars(); col++)
{
List<Double> colVal = columnCounts.get(col);
List<Double> colWeight = columnWeights.get(col);
it.reset();
it.sort(colVal);
//we are going to loop through until we reach past the half weight mark, getting us the weighted median
double goal = colSoW[col]/2;
double lastSeen = 0;
double curWeight = 0;
//loop breaks one we pass the median, so last seen is the median
for(int i = 0; i < it.length() && curWeight < goal; i++)
{
int indx = it.index(i);
lastSeen = colVal.get(indx);
curWeight += colWeight.get(indx);
}
numeric_imputs[col] = lastSeen;
}
}
//last, determine mode for cats
for(int col = 0; col < cat_counts.length; col++)
{
int col_mode = 0;
for(int j = 1; j < cat_counts[col].length; j++)
if(cat_counts[col][j] > cat_counts[col][col_mode])
col_mode = j;
cat_imputs[col] = col_mode;
}
}
@Override
public void mutableTransform(DataPoint dp)
{
Vec vec = dp.getNumericalValues();
for(IndexValue iv : vec)
if(Double.isNaN(iv.getValue()))
vec.set(iv.getIndex(), numeric_imputs[iv.getIndex()]);
int[] cats = dp.getCategoricalValues();
for(int i = 0; i < cats.length; i++)
if(cats[i] < 0)
cats[i] = cat_imputs[i];
}
@Override
public boolean mutatesNominal()
{
return true;
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint toRet = dp.clone();
//TODO, sparse vec case can be handled better by making a and setting it seperatly
mutableTransform(toRet);
return toRet;
}
@Override
public Imputer clone()
{
return new Imputer(this);
}
}
| 7,126 | 32.303738 | 119 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/InPlaceInvertibleTransform.java | package jsat.datatransform;
import jsat.classifiers.DataPoint;
/**
* This interface behaves exactly as {@link InPlaceTransform} specifies, with
* the addition of an in-place "reverse" method that can be used to alter any
* given transformed data point back into an <i>approximation</i> of the
* original vector, without having to new vector object, but altering the one
* given.
*
* @author Edward Raff
*/
public interface InPlaceInvertibleTransform extends InPlaceTransform, InvertibleTransform
{
/**
* Mutates the given data point. This causes side effects, altering the data
* point to have the same value as the output of
* {@link #inverse(jsat.classifiers.DataPoint) }
*
* @param dp the data point to alter with an inverse transformation
*/
public void mutableInverse(DataPoint dp);
@Override
public InPlaceInvertibleTransform clone();
}
| 907 | 29.266667 | 89 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/InPlaceTransform.java | package jsat.datatransform;
import jsat.classifiers.DataPoint;
/**
* An In Place Transform is one that has the same number of categorical and
* numeric features as the input. This means it can mutableTransform the input data point
* instead of allocating a new one, which can reduce overhead on memory
* allocations. This can be useful when performing many data transforms in cross
* validation or when processing new examples in an environment that is applying
* an already learned model.
* <br><br> This interface is assumed that it will be applied to numeric
* features. Incase this is not true, a {@link #mutatesNominal() } method is
* provided for the implementation to indicate otherwise.
*
* @author Edward Raff
*/
public interface InPlaceTransform extends DataTransform
{
/**
* Mutates the given data point. This causes side effects, altering the data
* point to have the same value as the output of
* {@link #transform(jsat.classifiers.DataPoint) }.
*
* @param dp the data point to alter
*/
public void mutableTransform(DataPoint dp);
/**
* By default returns {@code false}. Only returns true if this transform
* will mutableTransform the nominal feature values of a data point.
*
* @return {@code true} if nominal feature values are mutated, {@code false}
* otherwise.
*/
public boolean mutatesNominal();
}
| 1,410 | 35.179487 | 89 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/InsertMissingValuesTransform.java | /*
* Copyright (C) 2016 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform;
import java.util.Random;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.utils.random.RandomUtil;
import jsat.utils.random.XORWOW;
/**
* This transform mostly exists for testing code. It alters a dataset by setting
* features to missing with a fixed probability.
*
* @author edwardraff
*/
public class InsertMissingValuesTransform implements InPlaceTransform
{
private double prob;
private Random rand;
/**
*
* @param prob the probability of setting each feature to missing
*/
public InsertMissingValuesTransform(double prob)
{
this(prob, RandomUtil.getRandom());
}
/**
*
* @param prob the probability of setting each feature to missing
* @param rand the source of randomness
*/
public InsertMissingValuesTransform(double prob, Random rand)
{
this.prob = Math.min(1, Math.max(0, prob));
this.rand = rand;
}
@Override
public void fit(DataSet data)
{
//no-op, nothing to do
}
@Override
public void mutableTransform(DataPoint dp)
{
Vec v = dp.getNumericalValues();
for(int i = 0; i < v.length(); i++)
if(rand.nextDouble() < prob)
v.set(i, Double.NaN);
int[] cats = dp.getCategoricalValues();
for(int i = 0; i < cats.length; i++)
if(rand.nextDouble() < prob)
cats[i] = -1;
}
@Override
public boolean mutatesNominal()
{
return true;
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint ndp = dp.clone();
mutableTransform(ndp);
return ndp;
}
@Override
public InsertMissingValuesTransform clone()
{
return new InsertMissingValuesTransform(prob, rand);
}
}
| 2,564 | 25.443299 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/InverseOfTransform.java | package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
/**
* Creates a new Transform object that simply uses the inverse of an
* {@link InvertibleTransform} as a regular transform. This allows one to apply
* inverses after the fact in a simple matter like:
* <pre><code>
* DataSet x = //some data set;
* InvertibleTransform transform = //some transform;
* x.applyTransform(transform);//apply the original transform
* //reverse the transform, getting back to where we started
* x.applyTransform(new InverseOfTransform(transform));
* </code></pre>
* @author Edward Raff
*/
public class InverseOfTransform implements DataTransform
{
private static final long serialVersionUID = 2565737661260748018L;
private InvertibleTransform transform;
/**
* Creates a new transform that uses the
* {@link InvertibleTransform#transform(jsat.classifiers.DataPoint)
* transform} of the given transform
* @param transform the transform to use the inverse function of
*/
public InverseOfTransform(InvertibleTransform transform)
{
this.transform = transform;
}
@Override
public void fit(DataSet data)
{
//no-op, nothing to do
}
/**
* Copy constructor
* @param toClone the object to copy
*/
public InverseOfTransform(InverseOfTransform toClone)
{
this(toClone.transform.clone());
}
@Override
public DataPoint transform(DataPoint dp)
{
return transform.inverse(dp);
}
@Override
public InverseOfTransform clone()
{
return new InverseOfTransform(this);
}
}
| 1,663 | 25 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/InvertibleTransform.java | package jsat.datatransform;
import jsat.classifiers.DataPoint;
/**
* A InvertibleTransform is one in which any given transformed vector can be
inverse to recover an <i>approximation</i> of the original vector when using
* a transform that implements this interface. It may not be possible to
* perfectly reproduce the original data point: ie, this process may not be
* loss-less.
*
* @author Edward Raff
*/
public interface InvertibleTransform extends DataTransform
{
/**
* Applies the inverse or "reverse" transform to approximately undo the
* effect of {@link #transform(jsat.classifiers.DataPoint) } to recover an
* approximation of the original data point.
*
* @param dp the transformed data point
* @return the original data point, or a reasonable approximation
*/
public DataPoint inverse(DataPoint dp);
@Override
public InvertibleTransform clone();
}
| 927 | 29.933333 | 78 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/JLTransform.java | package jsat.datatransform;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.distributions.Distribution;
import jsat.distributions.LogUniform;
import jsat.linear.DenseMatrix;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.Matrix;
import jsat.linear.RandomMatrix;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.IntList;
import jsat.utils.random.RandomUtil;
/**
* The Johnson-Lindenstrauss (JL) Transform is a type of random projection down
* to a lower dimensional space. The goal is, with a high probability, to keep
* the {@link EuclideanDistance Euclidean distances} between points
* approximately the same in the original and projected space. <br>
* The JL lemma, with a high probability, bounds the error of a distance
* computation between two points <i>u</i> and <i>v</i> in the lower dimensional
* space by (1 ± ε) d(<i>u</i>, <i>v</i>)<sup>2</sup>, where d is
* the Euclidean distance. It works best for very high dimension problems, 1000
* or more.
* <br>
* For more information see: <br>
* Achlioptas, D. (2003). <i>Database-friendly random projections:
* Johnson-Lindenstrauss with binary coins</i>. Journal of Computer and System
* Sciences, 66(4), 671–687. doi:10.1016/S0022-0000(03)00025-4
*
* @author Edward Raff
*/
public class JLTransform extends DataTransformBase
{
private static final long serialVersionUID = -8621368067861343913L;
/**
* Determines which distribution to construct the transform matrix from
*/
public enum TransformMode
{
/**
* The transform matrix values come from the gaussian distribution and
* is dense <br><br>
* This transform is expensive to use when not using an in memory matrix
*/
GAUSS,
/**
* The transform matrix values are binary and faster to generate.
*/
BINARY,
/**
* The transform matrix values are sparse, using the original "Data Base
* Friendly" transform approach.
*/
SPARSE,
/**
* The transform matrix is sparser, making it faster to apply. For most
* all datasets should provide results of equal quality to
* {@link #SPARSE} option while being faster.
*/
SPARSE_SQRT,
/**
* The transform matrix is highly sparse, making it exceptionally fast
* for larger datasets. However, accuracy may be reduced for some
* problems.
*/
SPARSE_LOG
}
/**
* This is used to make the Sparse JL option run faster by avoiding FLOPS.
* <br>
* There will be one IntList for every feature in the feature set. Each
* IntList value, abs(j), indicates which of the transformed indecies
* feature i will contribute value to. The sign of sign(j) indicates if it
* should be additive or subtractive.
*/
private List<IntList> sparse_jl_map;
private double sparse_jl_cnst;
private TransformMode mode;
private Matrix R;
/**
* Copy constructor
* @param transform the transform to copy
*/
protected JLTransform(JLTransform transform)
{
this.mode = transform.mode;
this.R = transform.R.clone();
this.k = transform.k;
if(transform.sparse_jl_map != null)
{
this.sparse_jl_map = new ArrayList<>(transform.sparse_jl_map.size());
for(IntList a : transform.sparse_jl_map)
this.sparse_jl_map.add(new IntList(a));
}
this.sparse_jl_cnst = transform.sparse_jl_cnst;
}
/**
* Creates a new JL Transform that uses a target dimension of 50 features.
* This may not be optimal for any particular dataset.
*
*/
public JLTransform()
{
this(50);
}
/**
* Creates a new JL Transform
* @param k the target dimension size
*/
public JLTransform(final int k)
{
this(k, TransformMode.SPARSE_SQRT);
}
/**
* Creates a new JL Transform
* @param k the target dimension size
* @param mode how to construct the transform
* @param rand the source of randomness
*/
public JLTransform(final int k, final TransformMode mode)
{
this(k, mode, true);
}
/**
* Target dimension size
*/
private int k;
private boolean inMemory;
/**
* Creates a new JL Transform
* @param k the target dimension size
* @param mode how to construct the transform
* @param inMemory if {@code false}, the matrix will be stored in O(1)
* memory at the cost of execution time.
*/
public JLTransform(final int k, final TransformMode mode, boolean inMemory)
{
this.mode = mode;
this.k = k;
this.inMemory = inMemory;
}
@Override
public void fit(DataSet data)
{
final int d = data.getNumNumericalVars();
Random rand = RandomUtil.getRandom();
Matrix oldR = R = new RandomMatrixJL(k, d, rand.nextLong(), mode);
if(mode == TransformMode.GAUSS)
{
if(inMemory)
{
R = new DenseMatrix(k, d);
R.mutableAdd(oldR);
}
}
else//Sparse case! Lets do this smarter
{
int s;
switch(mode)
{
case SPARSE_SQRT:
s = (int) Math.round(Math.sqrt(d+1));
break;
case SPARSE_LOG:
s = (int) Math.round(d/Math.log(d+1));
break;
default://default case, use original SPARSE JL algo
s = 3;
}
sparse_jl_cnst = Math.sqrt(s);
//Lets set up some random mats.
sparse_jl_map = new ArrayList<>(d);
IntList all_embed_dims = IntList.range(0, k);
int nnz = k/s;
for(int j = 0; j < d; j++)
{
Collections.shuffle(all_embed_dims, rand);
IntList x_j_map = new IntList(nnz);
//First 1/(2 s) become the positives
for(int i = 0; i < nnz; i++)
x_j_map.add(i);
//Second 1/(2 s) become the negatives
for(int i = nnz/2; i < nnz; i++)
x_j_map.add(-(i+1));//+1 b/c -0 would be a problem, since it does not exist
//Sort this after so that the later use of this iteration order is better behaved for CPU cache & prefetching
Collections.sort(x_j_map, (Integer o1, Integer o2) -> Integer.compare(Math.abs(o1), Math.abs(o2)));
sparse_jl_map.add(x_j_map);
}
}
}
/**
* The JL transform uses a random matrix to project the data, and the mode
* controls which method is used to construct this matrix.
*
* @param mode how to construct the transform
*/
public void setMode(TransformMode mode)
{
this.mode = mode;
}
/**
*
* @return how to construct the transform
*/
public TransformMode getMode()
{
return mode;
}
/**
* Sets whether or not the transform matrix is stored explicitly in memory
* or not. Explicit storage is often faster, but can be prohibitive for
* large datasets
* @param inMemory {@code true} to explicitly store the transform matrix,
* {@code false} to re-create it on the fly as needed
*/
public void setInMemory(boolean inMemory)
{
this.inMemory = inMemory;
}
/**
*
* @return {@code true} if this object will explicitly store the transform
* matrix, {@code false} to re-create it on the fly as needed
*/
public boolean isInMemory()
{
return inMemory;
}
/**
* Sets the target dimension size to use for the output
* @param k the dimension after apply the transform
*/
public void setProjectedDimension(int k)
{
this.k = k;
}
/**
*
* @return the dimension after apply the transform
*/
public int getProjectedDimension()
{
return k;
}
public static Distribution guessProjectedDimension(DataSet d)
{
//huristic, could be improved by some theory app
double max = 100;
double min = 10;
if(d.getNumNumericalVars() > 10000)
{
min = 100;
max = 1000;
}
return new LogUniform(min, max);
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec newVec;
switch(mode)
{
case SPARSE:
case SPARSE_SQRT:
case SPARSE_LOG:
//Sparse JL case, do adds and final mul
newVec = new DenseVector(k);
for(IndexValue iv : dp.getNumericalValues())
{
double x_i = iv.getValue();
int i = iv.getIndex();
for(int j : sparse_jl_map.get(i))
{
if(j >= 0)
newVec.increment(j, x_i);
else
newVec.increment(-j-1, -x_i);
}
newVec.mutableMultiply(sparse_jl_cnst);
}
break;
default://default case, do the explicity mat-mul
newVec = dp.getNumericalValues();
newVec = R.multiply(newVec);
}
DataPoint newDP = new DataPoint(newVec, dp.getCategoricalValues(),
dp.getCategoricalData());
return newDP;
}
@Override
public JLTransform clone()
{
return new JLTransform(this);
}
private static class RandomMatrixJL extends RandomMatrix
{
private static final long serialVersionUID = 2009377824896155918L;
public double cnst;
private TransformMode mode;
public RandomMatrixJL(RandomMatrixJL toCopy)
{
super(toCopy);
this.cnst = toCopy.cnst;
this.mode = toCopy.mode;
}
public RandomMatrixJL(int rows, int cols, long XORSeed, TransformMode mode)
{
super(rows, cols, XORSeed);
this.mode = mode;
int k = rows;
if (mode == TransformMode.GAUSS || mode == TransformMode.BINARY)
cnst = 1.0 / Math.sqrt(k);
else if (mode == TransformMode.SPARSE)
cnst = Math.sqrt(3) / Math.sqrt(k);
}
@Override
protected double getVal(Random rand)
{
if (mode == TransformMode.GAUSS)
{
return rand.nextGaussian()*cnst;
}
else if (mode == TransformMode.BINARY)
{
return (rand.nextBoolean() ? -cnst : cnst);
}
else if (mode == TransformMode.SPARSE)
{
int val = rand.nextInt(6);
//1 with prob 1/6, -1 with prob 1/6
if(val == 0)
return -cnst;
else if(val == 1)
return cnst;
else //0 with prob 2/3
return 0;
}
else
throw new RuntimeException("BUG: Please report");
}
@Override
public RandomMatrixJL clone()
{
return new RandomMatrixJL(this);
}
}
}
| 11,924 | 29.113636 | 125 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/LinearTransform.java |
package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.math.OnLineStatistics;
/**
* This class transforms all numerical values into a specified range by a linear
* scaling of all the data point values.
*
* @author Edward Raff
*/
public class LinearTransform implements InPlaceInvertibleTransform
{
private static final long serialVersionUID = 5580283565080452022L;
/**
* The max value
*/
private double A;
/**
* The min value
*/
private double B;
/**
* The minimum observed value for each attribute
*/
private Vec mins;
/**
* Represents
*
* A - B
* -----------
* max - min
*/
private Vec mutliplyConstants;
/**
* Creates a new Linear Transformation that will scale
* values to the [0, 1] range.
*
*/
public LinearTransform()
{
this(1, 0);
}
/**
* Creates a new Linear Transformation for the input data set so that all
* values are in the [0, 1] range.
*
* @param dataSet the data set to learn the transform from
*/
public LinearTransform(DataSet dataSet)
{
this(dataSet, 1, 0);
}
/**
* Creates a new Linear Transformation.
*
* @param dataSet the data set to learn the transform from
* @param A the maximum value for the transformed data set
* @param B the minimum value for the transformed data set
*/
public LinearTransform(double A, double B)
{
setRange(A, B);
}
/**
* Creates a new Linear Transformation for the input data set.
*
* @param dataSet the data set to learn the transform from
* @param A the maximum value for the transformed data set
* @param B the minimum value for the transformed data set
*/
public LinearTransform(DataSet dataSet, double A, double B)
{
this(A, B);
fit(dataSet);
}
/**
* Sets the min and max value to scale the data to. If given in the wrong order, this method will swap them
* @param A the maximum value for the transformed data set
* @param B the minimum value for the transformed data set
*/
public void setRange(double A, double B)
{
if(A == B)
throw new RuntimeException("Values must be different");
else if(B > A)
{
double tmp = A;
A = B;
B = tmp;
}
this.A = A;
this.B = B;
}
@Override
public void fit(DataSet dataSet)
{
mins = new DenseVector(dataSet.getNumNumericalVars());
Vec maxs = new DenseVector(mins.length());
mutliplyConstants = new DenseVector(mins.length());
OnLineStatistics[] stats = dataSet.getOnlineColumnStats(false);
for(int i = 0; i < mins.length(); i++)
{
double min = stats[i].getMin();
double max = stats[i].getMax();
if (max - min < 1e-6)//No change
{
mins.set(i, 0);
maxs.set(i, 1);
mutliplyConstants.set(i, 1.0);
}
else
{
mins.set(i, min);
maxs.set(i, max);
mutliplyConstants.set(i, A - B);
}
}
/**
* Now we set up the vectors to perform transformations
*
* if x := the variable to be transformed to the range [A, B]
* Then the transformation we want is
*
* (A - B)
* B + --------- * (-min+x)
* max - min
*
* This middle constant will be placed in "maxs"
*
*/
maxs.mutableSubtract(mins);
mutliplyConstants.mutablePairwiseDivide(maxs);
}
/**
* Copy constructor
* @param other the transform to copy
*/
private LinearTransform(LinearTransform other)
{
this.A = other.A;
this.B = other.B;
if(other.mins != null)
this.mins = other.mins.clone();
if(other.mutliplyConstants != null)
this.mutliplyConstants = other.mutliplyConstants.clone();
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint toRet = dp.clone();
mutableTransform(toRet);
return toRet;
}
@Override
public LinearTransform clone()
{
return new LinearTransform(this);
}
@Override
public void mutableInverse(DataPoint dp)
{
Vec v = dp.getNumericalValues();
v.mutableSubtract(B);
v.mutablePairwiseDivide(mutliplyConstants);
v.mutableAdd(mins);
}
@Override
public void mutableTransform(DataPoint dp)
{
Vec v = dp.getNumericalValues();
v.mutableSubtract(mins);
v.mutablePairwiseMultiply(mutliplyConstants);
v.mutableAdd(B);
}
@Override
public boolean mutatesNominal()
{
return false;
}
@Override
public DataPoint inverse(DataPoint dp)
{
DataPoint toRet = dp.clone();
mutableInverse(toRet);
return toRet;
}
}
| 5,314 | 23.606481 | 111 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/NominalToNumeric.java |
package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
/**
* This transform converts nominal feature values to numeric ones be adding a
* new numeric feature for each possible categorical value for each nominal
* feature. The numeric features will be all zeros, with only a single numeric
* feature having a value of "1.0" for each nominal variable.
*
* @author Edward Raff
*/
public class NominalToNumeric implements DataTransform
{
private static final long serialVersionUID = -7765605678836464143L;
private int origNumericalCount;
private CategoricalData[] categoricalData;
private int addedNumers;
/**
* Creates a new transform to convert categorical to numeric features
*/
public NominalToNumeric()
{
}
/**
* Creates a new transform to convert categorical to numeric features for the given dataset
* @param dataSet the dataset to fit the transform to
*/
public NominalToNumeric(DataSet dataSet)
{
fit(dataSet);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public NominalToNumeric(NominalToNumeric toCopy)
{
this.origNumericalCount = toCopy.origNumericalCount;
this.categoricalData = toCopy.categoricalData;
this.addedNumers = toCopy.addedNumers;
}
@Override
public void fit(DataSet data)
{
this.origNumericalCount = data.getNumNumericalVars();
this.categoricalData = data.getCategories();
addedNumers = 0;
for(CategoricalData cd : categoricalData)
addedNumers += cd.getNumOfCategories();
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec v;
//TODO we should detect if there are going to be so many sparce spaces added by the categorical data that we should just choose a sparce vector anyway
if(dp.getNumericalValues().isSparse())
v = new SparseVector(origNumericalCount+addedNumers);
else
v = new DenseVector(origNumericalCount+addedNumers);
Vec oldV = dp.getNumericalValues();
int i = 0;
for(i = 0; i < origNumericalCount; i++)
v.set(i, oldV.get(i));
for(int j =0; j < categoricalData.length; j++)
{
v.set(i+dp.getCategoricalValue(j), 1.0);
i += categoricalData[j].getNumOfCategories();
}
return new DataPoint(v, new int[0], new CategoricalData[0]);
}
@Override
public NominalToNumeric clone()
{
return new NominalToNumeric(this);
}
}
| 2,730 | 27.747368 | 158 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/NumericalToHistogram.java |
package jsat.datatransform;
import java.util.Arrays;
import jsat.DataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.distributions.Distribution;
import jsat.distributions.LogUniform;
import jsat.distributions.discrete.UniformDiscrete;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
* This transform converts numerical features into categorical ones via a simple
* histogram. Bins will be created for each numeric feature of equal sizes. Each
* numeric feature will be converted to the same number of bins. <br>
* This transform will handle missing values by simply ignoring them, and
* leaving the value missing in the transformed categorical variable.
*
*
* @author Edward Raff
*/
public class NumericalToHistogram implements DataTransform
{
private static final long serialVersionUID = -2318706869393636074L;
private int n;
//First index is the vector index, 2nd index is the min value then the increment value
double[][] conversionArray;
CategoricalData[] newDataArray;
/**
* Creates a new transform which will use at most 25 bins when converting
* numeric features. This may not be optimal for any given dataset
*
*/
public NumericalToHistogram()
{
this(25);
}
/**
* Creates a new transform which will use O(sqrt(n)) bins for each numeric
* feature, where <i>n</i> is the number of data points in the dataset.
*
* @param dataSet the data set to create the transform from
*/
public NumericalToHistogram(DataSet dataSet)
{
this(dataSet, (int) Math.ceil(Math.sqrt(dataSet.size())));
}
/**
* Creates a new transform which will use at most the specified number of bins
*
* @param n the number of bins to create
*/
public NumericalToHistogram(int n)
{
setNumberOfBins(n);
}
/**
* Creates a new transform which will use the specified number of bins for
* each numeric feature.
* @param dataSet the data set to create the transform from
* @param n the number of bins to create
*/
public NumericalToHistogram(DataSet dataSet, int n)
{
this(n);
fit(dataSet);
}
@Override
public void fit(DataSet dataSet)
{
conversionArray = new double[dataSet.getNumNumericalVars()][2];
double[] mins = new double[conversionArray.length];
double[] maxs = new double[conversionArray.length];
for(int i = 0; i < mins.length; i++)
{
mins[i] = Double.MAX_VALUE;
maxs[i] = Double.MIN_VALUE;
}
for(int i = 0; i < dataSet.size(); i++)
{
Vec v = dataSet.getDataPoint(i).getNumericalValues();
for(int j = 0; j < mins.length; j++)
{
final double val = v.get(j);
if(Double.isNaN(val))
continue;
mins[j] = Math.min(mins[j], val);
maxs[j] = Math.max(maxs[j], val);
}
}
for(int i = 0; i < conversionArray.length; i++)
{
conversionArray[i][0] = mins[i];
conversionArray[i][1] = (maxs[i]-mins[i])/n;
}
newDataArray = new CategoricalData[dataSet.getNumNumericalVars() + dataSet.getNumCategoricalVars()];
for(int i = 0; i < dataSet.getNumNumericalVars(); i++)
newDataArray[i] = new CategoricalData(n);
System.arraycopy(dataSet.getCategories(), 0, newDataArray, dataSet.getNumNumericalVars(), dataSet.getNumCategoricalVars());
}
/**
* Sets the maximum number of histogram bins to use when creating the categorical version of numeric features.
* @param n the number of bins to create
*/
public void setNumberOfBins(int n)
{
if(n <= 0)
throw new RuntimeException("Must partition into a positive number of groups");
this.n = n;
}
/**
*
* @return the maximum number of bins to create
*/
public int getNumberOfBins()
{
return n;
}
/**
* Attempts to guess the number of bins to use
* @param data the dataset to be transforms
* @return a distribution of the guess
*/
public static Distribution guessNumberOfBins(DataSet data)
{
if(data.size() < 20)
return new UniformDiscrete(2, data.size()-1);
else if(data.size() >= 1000000)
return new LogUniform(50, 1000);
int sqrt = (int) Math.sqrt(data.size());
return new UniformDiscrete(Math.max(sqrt/3, 2), Math.min(sqrt*3, data.size()-1));
}
/**
* Copy constructor
* @param other the transform to copy
*/
private NumericalToHistogram(NumericalToHistogram other)
{
this.n = other.n;
if(other.conversionArray != null)
{
this.conversionArray = new double[other.conversionArray.length][];
for(int i = 0; i < other.conversionArray.length; i++)
this.conversionArray[i] = Arrays.copyOf(other.conversionArray[i], other.conversionArray[i].length);
}
if(other.newDataArray != null)
{
this.newDataArray = new CategoricalData[other.newDataArray.length];
for(int i = 0; i < other.newDataArray.length; i++)
this.newDataArray[i] = other.newDataArray[i].clone();
}
}
@Override
public DataPoint transform(DataPoint dp)
{
int[] newCatVals = new int[newDataArray.length];
Vec v = dp.getNumericalValues();
for(int i = 0; i < conversionArray.length; i++)
{
double val = v.get(i) - conversionArray[i][0];
if(Double.isNaN(val))
{
newCatVals[i] = -1;//missing
continue;
}
int catVal = (int) Math.floor(val / conversionArray[i][1]);
if(catVal < 0)
catVal = 0;
else if(catVal >= n)
catVal = n-1;
newCatVals[i] = catVal;
}
System.arraycopy(dp.getCategoricalValues(), 0, newCatVals, conversionArray.length, dp.numCategoricalValues());
return new DataPoint(new DenseVector(0), newCatVals, newDataArray);
}
@Override
public NumericalToHistogram clone()
{
return new NumericalToHistogram(this);
}
}
| 6,534 | 30.723301 | 131 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.